code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
def _UpperCAmelCase ( snake_case = 50_00_00_00 ): """simple docstring""" _lowerCAmelCase = set() _lowerCAmelCase = int((limit - 24) ** (1 / 2) ) _lowerCAmelCase = set(range(3 , prime_square_limit + 1 , 2 ) ) primes.add(2 ) for p in range(3 , prime_square_limit + 1 , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , prime_square_limit + 1 , snake_case ) ) ) for primea in primes: _lowerCAmelCase = primea * primea for primea in primes: _lowerCAmelCase = primea * primea * primea if square + cube >= limit - 16: break for primea in primes: _lowerCAmelCase = primea * primea * primea * primea _lowerCAmelCase = square + cube + tetr if total >= limit: break ret.add(snake_case ) return len(snake_case ) if __name__ == "__main__": print(f"{solution() = }")
82
import argparse import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTokenizer, CLIPTokenizerFast, VideoMAEImageProcessor, XCLIPConfig, XCLIPModel, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) def _UpperCAmelCase ( snake_case , snake_case ): """simple docstring""" _lowerCAmelCase = XCLIPTextConfig() # derive patch size from model name _lowerCAmelCase = model_name.find("""patch""" ) _lowerCAmelCase = int(model_name[start_idx + len("""patch""" ) : start_idx + len("""patch""" ) + 2] ) _lowerCAmelCase = XCLIPVisionConfig(patch_size=snake_case , num_frames=snake_case ) if "large" in model_name: _lowerCAmelCase = 7_68 _lowerCAmelCase = 30_72 _lowerCAmelCase = 12 _lowerCAmelCase = 10_24 _lowerCAmelCase = 40_96 _lowerCAmelCase = 16 _lowerCAmelCase = 24 _lowerCAmelCase = 7_68 _lowerCAmelCase = 30_72 if model_name == "xclip-large-patch14-16-frames": _lowerCAmelCase = 3_36 _lowerCAmelCase = XCLIPConfig.from_text_vision_configs(snake_case , snake_case ) if "large" in model_name: _lowerCAmelCase = 7_68 return config def _UpperCAmelCase ( snake_case ): """simple docstring""" if name == "token_embedding.weight": _lowerCAmelCase = name.replace("""token_embedding.weight""" , """text_model.embeddings.token_embedding.weight""" ) if name == "positional_embedding": _lowerCAmelCase = name.replace("""positional_embedding""" , """text_model.embeddings.position_embedding.weight""" ) if "ln_1" in name: _lowerCAmelCase = name.replace("""ln_1""" , """layer_norm1""" ) if "ln_2" in name: _lowerCAmelCase = name.replace("""ln_2""" , """layer_norm2""" ) if "c_fc" in name: _lowerCAmelCase = name.replace("""c_fc""" , """fc1""" ) if "c_proj" in name: _lowerCAmelCase = name.replace("""c_proj""" , """fc2""" ) if name.startswith("""transformer.resblocks""" ): _lowerCAmelCase = name.replace("""transformer.resblocks""" , """text_model.encoder.layers""" ) if "attn.out_proj" in name and "message" not in name: _lowerCAmelCase = name.replace("""attn.out_proj""" , """self_attn.out_proj""" ) if "ln_final" in name: _lowerCAmelCase = name.replace("""ln_final""" , """text_model.final_layer_norm""" ) # visual encoder if name == "visual.class_embedding": _lowerCAmelCase = name.replace("""visual.class_embedding""" , """vision_model.embeddings.class_embedding""" ) if name == "visual.positional_embedding": _lowerCAmelCase = name.replace("""visual.positional_embedding""" , """vision_model.embeddings.position_embedding.weight""" ) if name.startswith("""visual.transformer.resblocks""" ): _lowerCAmelCase = name.replace("""visual.transformer.resblocks""" , """vision_model.encoder.layers""" ) if "visual.conv1" in name: _lowerCAmelCase = name.replace("""visual.conv1""" , """vision_model.embeddings.patch_embedding""" ) if "visual.ln_pre" in name: _lowerCAmelCase = name.replace("""visual.ln_pre""" , """vision_model.pre_layernorm""" ) if "visual.ln_post" in name: _lowerCAmelCase = name.replace("""visual.ln_post""" , """vision_model.post_layernorm""" ) if "visual.proj" in name: _lowerCAmelCase = name.replace("""visual.proj""" , """visual_projection.weight""" ) if "text_projection" in name: _lowerCAmelCase = name.replace("""text_projection""" , """text_projection.weight""" ) # things on top if "prompts_visual_proj" in name: _lowerCAmelCase = name.replace("""prompts_visual_proj""" , """prompts_visual_projection""" ) if "prompts_visual_ln" in name: _lowerCAmelCase = name.replace("""prompts_visual_ln""" , """prompts_visual_layernorm""" ) # mit if name == "mit.positional_embedding": _lowerCAmelCase = name.replace("""positional""" , """position""" ) if name.startswith("""mit.resblocks""" ): _lowerCAmelCase = name.replace("""mit.resblocks""" , """mit.encoder.layers""" ) # prompts generator if name.startswith("""prompts_generator.norm""" ): _lowerCAmelCase = name.replace("""prompts_generator.norm""" , """prompts_generator.layernorm""" ) return name def _UpperCAmelCase ( snake_case , snake_case ): """simple docstring""" for key in orig_state_dict.copy().keys(): _lowerCAmelCase = orig_state_dict.pop(snake_case ) if "attn.in_proj" in key: _lowerCAmelCase = key.split(""".""" ) if key.startswith("""visual""" ): _lowerCAmelCase = key_split[3] _lowerCAmelCase = config.vision_config.hidden_size if "message_attn" in key: if "weight" in key: _lowerCAmelCase = val[ :dim, : ] _lowerCAmelCase = val[ dim : dim * 2, : ] _lowerCAmelCase = val[ -dim:, : ] else: _lowerCAmelCase = val[ :dim ] _lowerCAmelCase = val[ dim : dim * 2 ] _lowerCAmelCase = val[ -dim: ] else: if "weight" in key: _lowerCAmelCase = val[ :dim, : ] _lowerCAmelCase = val[ dim : dim * 2, : ] _lowerCAmelCase = val[ -dim:, : ] else: _lowerCAmelCase = val[:dim] _lowerCAmelCase = val[ dim : dim * 2 ] _lowerCAmelCase = val[-dim:] elif key.startswith("""mit""" ): _lowerCAmelCase = key_split[2] _lowerCAmelCase = config.vision_config.mit_hidden_size if "weight" in key: _lowerCAmelCase = val[:dim, :] _lowerCAmelCase = val[dim : dim * 2, :] _lowerCAmelCase = val[-dim:, :] else: _lowerCAmelCase = val[:dim] _lowerCAmelCase = val[dim : dim * 2] _lowerCAmelCase = val[-dim:] else: _lowerCAmelCase = key_split[2] _lowerCAmelCase = config.text_config.hidden_size if "weight" in key: _lowerCAmelCase = val[:dim, :] _lowerCAmelCase = val[ dim : dim * 2, : ] _lowerCAmelCase = val[-dim:, :] else: _lowerCAmelCase = val[:dim] _lowerCAmelCase = val[ dim : dim * 2 ] _lowerCAmelCase = val[-dim:] else: _lowerCAmelCase = rename_key(snake_case ) if new_key_name in ["visual_projection.weight", "text_projection.weight"]: _lowerCAmelCase = val.T _lowerCAmelCase = val return orig_state_dict def _UpperCAmelCase ( snake_case ): """simple docstring""" if num_frames == 8: _lowerCAmelCase = """eating_spaghetti_8_frames.npy""" elif num_frames == 16: _lowerCAmelCase = """eating_spaghetti.npy""" elif num_frames == 32: _lowerCAmelCase = """eating_spaghetti_32_frames.npy""" _lowerCAmelCase = hf_hub_download( repo_id="""hf-internal-testing/spaghetti-video""" , filename=snake_case , repo_type="""dataset""" , ) _lowerCAmelCase = np.load(snake_case ) return list(snake_case ) def _UpperCAmelCase ( snake_case , snake_case=None , snake_case=False ): """simple docstring""" _lowerCAmelCase = { # fully supervised kinetics-400 checkpoints """xclip-base-patch32""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth""", """xclip-base-patch32-16-frames""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth""" ), """xclip-base-patch16""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth""", """xclip-base-patch16-16-frames""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth""" ), """xclip-large-patch14""": """https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb""", """xclip-large-patch14-16-frames""": """https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f""", # fully supervised kinetics-600 checkpoints """xclip-base-patch16-kinetics-600""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth""" ), """xclip-base-patch16-kinetics-600-16-frames""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth""" ), """xclip-large-patch14-kinetics-600""": """https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be""", # few shot """xclip-base-patch16-hmdb-2-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth""" ), """xclip-base-patch16-hmdb-4-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth""" ), """xclip-base-patch16-hmdb-8-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth""" ), """xclip-base-patch16-hmdb-16-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth""" ), """xclip-base-patch16-ucf-2-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth""" ), """xclip-base-patch16-ucf-4-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth""" ), """xclip-base-patch16-ucf-8-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth""" ), """xclip-base-patch16-ucf-16-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth""" ), # zero shot """xclip-base-patch16-zero-shot""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth""", } _lowerCAmelCase = model_to_url[model_name] _lowerCAmelCase = 8 if "16-frames" in model_name: _lowerCAmelCase = 16 elif "shot" in model_name: _lowerCAmelCase = 32 _lowerCAmelCase = get_xclip_config(snake_case , snake_case ) _lowerCAmelCase = XCLIPModel(snake_case ) model.eval() if "drive" in checkpoint_url: _lowerCAmelCase = """pytorch_model.bin""" gdown.cached_download(snake_case , snake_case , quiet=snake_case ) _lowerCAmelCase = torch.load(snake_case , map_location="""cpu""" )["""model"""] else: _lowerCAmelCase = torch.hub.load_state_dict_from_url(snake_case )["""model"""] _lowerCAmelCase = convert_state_dict(snake_case , snake_case ) _lowerCAmelCase = XCLIPModel(snake_case ) _lowerCAmelCase , _lowerCAmelCase = model.load_state_dict(snake_case , strict=snake_case ) assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] model.eval() _lowerCAmelCase = 3_36 if model_name == """xclip-large-patch14-16-frames""" else 2_24 _lowerCAmelCase = VideoMAEImageProcessor(size=snake_case ) _lowerCAmelCase = CLIPTokenizer.from_pretrained("""openai/clip-vit-base-patch32""" ) _lowerCAmelCase = CLIPTokenizerFast.from_pretrained("""openai/clip-vit-base-patch32""" ) _lowerCAmelCase = XCLIPProcessor(image_processor=snake_case , tokenizer=snake_case ) _lowerCAmelCase = prepare_video(snake_case ) _lowerCAmelCase = processor( text=["""playing sports""", """eating spaghetti""", """go shopping"""] , videos=snake_case , return_tensors="""pt""" , padding=snake_case ) print("""Shape of pixel values:""" , inputs.pixel_values.shape ) with torch.no_grad(): _lowerCAmelCase = model(**snake_case ) # Verify outputs _lowerCAmelCase = outputs.logits_per_video _lowerCAmelCase = logits_per_video.softmax(dim=1 ) print("""Probs:""" , snake_case ) # kinetics-400 if model_name == "xclip-base-patch32": _lowerCAmelCase = torch.tensor([[0.0_019, 0.9_951, 0.0_030]] ) elif model_name == "xclip-base-patch32-16-frames": _lowerCAmelCase = torch.tensor([[7.09_99E-04, 9.98_83E-01, 4.55_80E-04]] ) elif model_name == "xclip-base-patch16": _lowerCAmelCase = torch.tensor([[0.0_083, 0.9_681, 0.0_236]] ) elif model_name == "xclip-base-patch16-16-frames": _lowerCAmelCase = torch.tensor([[7.69_37E-04, 9.97_28E-01, 1.94_73E-03]] ) elif model_name == "xclip-large-patch14": _lowerCAmelCase = torch.tensor([[0.0_062, 0.9_864, 0.0_075]] ) elif model_name == "xclip-large-patch14-16-frames": _lowerCAmelCase = torch.tensor([[3.38_77E-04, 9.99_37E-01, 2.88_88E-04]] ) # kinetics-600 elif model_name == "xclip-base-patch16-kinetics-600": _lowerCAmelCase = torch.tensor([[0.0_555, 0.8_914, 0.0_531]] ) elif model_name == "xclip-base-patch16-kinetics-600-16-frames": _lowerCAmelCase = torch.tensor([[3.85_54E-04, 9.99_29E-01, 3.27_54E-04]] ) elif model_name == "xclip-large-patch14-kinetics-600": _lowerCAmelCase = torch.tensor([[0.0_036, 0.9_920, 0.0_045]] ) # few shot elif model_name == "xclip-base-patch16-hmdb-2-shot": _lowerCAmelCase = torch.tensor([[7.18_90E-06, 9.99_94E-01, 5.65_59E-05]] ) elif model_name == "xclip-base-patch16-hmdb-4-shot": _lowerCAmelCase = torch.tensor([[1.03_20E-05, 9.99_93E-01, 6.24_35E-05]] ) elif model_name == "xclip-base-patch16-hmdb-8-shot": _lowerCAmelCase = torch.tensor([[4.13_77E-06, 9.99_90E-01, 9.83_86E-05]] ) elif model_name == "xclip-base-patch16-hmdb-16-shot": _lowerCAmelCase = torch.tensor([[4.13_47E-05, 9.99_62E-01, 3.34_11E-04]] ) elif model_name == "xclip-base-patch16-ucf-2-shot": _lowerCAmelCase = torch.tensor([[8.58_57E-05, 9.99_28E-01, 6.32_91E-04]] ) elif model_name == "xclip-base-patch16-ucf-4-shot": _lowerCAmelCase = torch.tensor([[8.58_57E-05, 9.99_28E-01, 6.32_91E-04]] ) elif model_name == "xclip-base-patch16-ucf-8-shot": _lowerCAmelCase = torch.tensor([[0.0_027, 0.9_904, 0.0_070]] ) elif model_name == "xclip-base-patch16-ucf-16-shot": _lowerCAmelCase = torch.tensor([[9.82_19E-04, 9.95_93E-01, 3.08_63E-03]] ) # zero shot elif model_name == "xclip-base-patch16-zero-shot": _lowerCAmelCase = torch.tensor([[3.50_82E-04, 9.97_85E-01, 1.79_66E-03]] ) else: raise ValueError(F'Model name {model_name} not supported' ) assert torch.allclose(snake_case , snake_case , atol=1E-3 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(F'Saving model {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(snake_case ) if push_to_hub: print("""Pushing model, processor and slow tokenizer files to the hub...""" ) model.push_to_hub(snake_case , organization="""nielsr""" ) processor.push_to_hub(snake_case , organization="""nielsr""" ) slow_tokenizer.push_to_hub(snake_case , organization="""nielsr""" ) if __name__ == "__main__": A__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""xclip-base-patch32""", type=str, help="""Name of the model.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) A__ = parser.parse_args() convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
82
1
import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel from transformers.models.esm.modeling_esm import ( ESM_PRETRAINED_MODEL_ARCHIVE_LIST, EsmEmbeddings, create_position_ids_from_input_ids, ) class UpperCamelCase_ : def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=7 , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=33 , lowerCAmelCase_=32 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=512 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=None , ) -> Dict: _snake_case = parent _snake_case = batch_size _snake_case = seq_length _snake_case = is_training _snake_case = use_input_mask _snake_case = use_token_type_ids _snake_case = use_labels _snake_case = vocab_size _snake_case = hidden_size _snake_case = num_hidden_layers _snake_case = num_attention_heads _snake_case = intermediate_size _snake_case = hidden_act _snake_case = hidden_dropout_prob _snake_case = attention_probs_dropout_prob _snake_case = max_position_embeddings _snake_case = type_vocab_size _snake_case = type_sequence_label_size _snake_case = initializer_range _snake_case = num_labels _snake_case = num_choices _snake_case = scope def lowerCAmelCase ( self ) -> Optional[Any]: _snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _snake_case = None if self.use_input_mask: _snake_case = random_attention_mask([self.batch_size, self.seq_length] ) _snake_case = None _snake_case = None _snake_case = None if self.use_labels: _snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _snake_case = ids_tensor([self.batch_size] , self.num_choices ) _snake_case = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase ( self ) -> Optional[Any]: return EsmConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str: _snake_case = EsmModel(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() _snake_case = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ ) _snake_case = model(lowerCAmelCase_ ) _snake_case = model(lowerCAmelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple: _snake_case = EsmForMaskedLM(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() _snake_case = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict: _snake_case = self.num_labels _snake_case = EsmForTokenClassification(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() _snake_case = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase ( self ) -> Optional[int]: _snake_case = self.prepare_config_and_inputs() ( ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ) = config_and_inputs _snake_case = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ): lowerCAmelCase_ = False lowerCAmelCase_ = ( ( EsmForMaskedLM, EsmModel, EsmForSequenceClassification, EsmForTokenClassification, ) if is_torch_available() else () ) lowerCAmelCase_ = () lowerCAmelCase_ = ( { '''feature-extraction''': EsmModel, '''fill-mask''': EsmForMaskedLM, '''text-classification''': EsmForSequenceClassification, '''token-classification''': EsmForTokenClassification, '''zero-shot''': EsmForSequenceClassification, } if is_torch_available() else {} ) lowerCAmelCase_ = True def lowerCAmelCase ( self ) -> str: _snake_case = EsmModelTester(self ) _snake_case = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 ) def lowerCAmelCase ( self ) -> str: self.config_tester.run_common_tests() def lowerCAmelCase ( self ) -> Dict: _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> Any: _snake_case = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _snake_case = type self.model_tester.create_and_check_model(*lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> Dict: _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> Optional[Any]: _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase_ ) @slow def lowerCAmelCase ( self ) -> Tuple: for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case = EsmModel.from_pretrained(lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> str: _snake_case = self.model_tester.prepare_config_and_inputs()[0] _snake_case = EsmEmbeddings(config=lowerCAmelCase_ ) _snake_case = torch.as_tensor([[12, 31, 13, model.padding_idx]] ) _snake_case = torch.as_tensor( [ [ 0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx, ] ] ) _snake_case = create_position_ids_from_input_ids(lowerCAmelCase_ , model.padding_idx ) self.assertEqual(position_ids.shape , expected_positions.shape ) self.assertTrue(torch.all(torch.eq(lowerCAmelCase_ , lowerCAmelCase_ ) ) ) def lowerCAmelCase ( self ) -> Dict: _snake_case = self.model_tester.prepare_config_and_inputs()[0] _snake_case = EsmEmbeddings(config=lowerCAmelCase_ ) _snake_case = torch.empty(2 , 4 , 30 ) _snake_case = [ 0 + embeddings.padding_idx + 1, 1 + embeddings.padding_idx + 1, 2 + embeddings.padding_idx + 1, 3 + embeddings.padding_idx + 1, ] _snake_case = torch.as_tensor([expected_single_positions, expected_single_positions] ) _snake_case = embeddings.create_position_ids_from_inputs_embeds(lowerCAmelCase_ ) self.assertEqual(position_ids.shape , expected_positions.shape ) self.assertTrue(torch.all(torch.eq(lowerCAmelCase_ , lowerCAmelCase_ ) ) ) @unittest.skip('Esm does not support embedding resizing' ) def lowerCAmelCase ( self ) -> Optional[int]: pass @unittest.skip('Esm does not support embedding resizing' ) def lowerCAmelCase ( self ) -> List[Any]: pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def lowerCAmelCase ( self ) -> List[str]: pass @require_torch class UpperCamelCase_ ( _lowerCamelCase ): @slow def lowerCAmelCase ( self ) -> List[Any]: with torch.no_grad(): _snake_case = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' ) model.eval() _snake_case = torch.tensor([[0, 1, 2, 3, 4, 5]] ) _snake_case = model(lowerCAmelCase_ )[0] _snake_case = 33 _snake_case = torch.Size((1, 6, vocab_size) ) self.assertEqual(output.shape , lowerCAmelCase_ ) _snake_case = torch.tensor( [[[8.92_15, -10.58_98, -6.46_71], [-6.39_67, -13.91_14, -1.12_12], [-7.78_12, -13.95_16, -3.74_06]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1E-4 ) ) @slow def lowerCAmelCase ( self ) -> Optional[Any]: with torch.no_grad(): _snake_case = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' ) model.eval() _snake_case = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) _snake_case = model(lowerCAmelCase_ )[0] # compare the actual values for a slice. _snake_case = torch.tensor( [[[0.14_44, 0.54_13, 0.32_48], [0.30_34, 0.00_53, 0.31_08], [0.32_28, -0.24_99, 0.34_15]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1E-4 ) )
295
from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo UpperCAmelCase_ = """\ @misc{wu2016googles, title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation}, author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes and Jeffrey Dean}, year={2016}, eprint={1609.08144}, archivePrefix={arXiv}, primaryClass={cs.CL} } """ UpperCAmelCase_ = """\ The BLEU score has some undesirable properties when used for single sentences, as it was designed to be a corpus measure. We therefore use a slightly different score for our RL experiments which we call the 'GLEU score'. For the GLEU score, we record all sub-sequences of 1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then compute a recall, which is the ratio of the number of matching n-grams to the number of total n-grams in the target (ground truth) sequence, and a precision, which is the ratio of the number of matching n-grams to the number of total n-grams in the generated output sequence. Then GLEU score is simply the minimum of recall and precision. This GLEU score's range is always between 0 (no matches) and 1 (all match) and it is symmetrical when switching output and target. According to our experiments, GLEU score correlates quite well with the BLEU metric on a corpus level but does not have its drawbacks for our per sentence reward objective. """ UpperCAmelCase_ = """\ Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references. Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values. Args: predictions (list of str): list of translations to score. Each translation should be tokenized into a list of tokens. references (list of list of str): list of lists of references for each translation. Each reference should be tokenized into a list of tokens. min_len (int): The minimum order of n-gram this function should extract. Defaults to 1. max_len (int): The maximum order of n-gram this function should extract. Defaults to 4. Returns: 'google_bleu': google_bleu score Examples: Example 1: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results[\"google_bleu\"], 2)) 0.44 Example 2: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results[\"google_bleu\"], 2)) 0.61 Example 3: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2) >>> print(round(results[\"google_bleu\"], 2)) 0.53 Example 4: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6) >>> print(round(results[\"google_bleu\"], 2)) 0.4 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase_ ( datasets.Metric ): def lowerCAmelCase ( self ) -> MetricInfo: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ), 'references': datasets.Sequence( datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ), } ) , ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 4 , ) -> Dict[str, float]: return { "google_bleu": gleu_score.corpus_gleu( list_of_references=lowerCAmelCase_ , hypotheses=lowerCAmelCase_ , min_len=lowerCAmelCase_ , max_len=lowerCAmelCase_ ) }
295
1
'''simple docstring''' import qiskit def snake_case_ (_a : int , _a : int ): UpperCAmelCase = qiskit.Aer.get_backend('''aer_simulator''' ) # Create a Quantum Circuit acting on the q register UpperCAmelCase = qiskit.QuantumCircuit(_a , _a ) # Map the quantum measurement to the classical bits circuit.measure([0] , [0] ) # Execute the circuit on the simulator UpperCAmelCase = qiskit.execute(_a , _a , shots=1_0_0_0 ) # Return the histogram data of the results of the experiment. return job.result().get_counts(_a ) if __name__ == "__main__": print(f"""Total count for various states are: {single_qubit_measure(1, 1)}""")
34
'''simple docstring''' import argparse import logging from collections import namedtuple import torch from model_bertabs import BertAbsSummarizer from models.model_builder import AbsSummarizer # The authors' implementation from transformers import BertTokenizer logging.basicConfig(level=logging.INFO) A =logging.getLogger(__name__) A ='Hello world! cécé herlolip' A =namedtuple( 'BertAbsConfig', [ 'temp_dir', 'large', 'use_bert_emb', 'finetune_bert', 'encoder', 'share_emb', 'max_pos', 'enc_layers', 'enc_hidden_size', 'enc_heads', 'enc_ff_size', 'enc_dropout', 'dec_layers', 'dec_hidden_size', 'dec_heads', 'dec_ff_size', 'dec_dropout', ], ) def snake_case_ (_a : List[Any] , _a : Any ): UpperCAmelCase = BertAbsConfig( temp_dir='''.''' , finetune_bert=_a , large=_a , share_emb=_a , use_bert_emb=_a , encoder='''bert''' , max_pos=5_1_2 , enc_layers=6 , enc_hidden_size=5_1_2 , enc_heads=8 , enc_ff_size=5_1_2 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_6_8 , dec_heads=8 , dec_ff_size=2_0_4_8 , dec_dropout=0.2 , ) UpperCAmelCase = torch.load(_a , lambda _a , _a : storage ) UpperCAmelCase = AbsSummarizer(_a , torch.device('''cpu''' ) , _a ) original.eval() UpperCAmelCase = BertAbsSummarizer(_a , torch.device('''cpu''' ) ) new_model.eval() # ------------------- # Convert the weights # ------------------- logging.info('''convert the model''' ) new_model.bert.load_state_dict(original.bert.state_dict() ) new_model.decoder.load_state_dict(original.decoder.state_dict() ) new_model.generator.load_state_dict(original.generator.state_dict() ) # ---------------------------------- # Make sure the outpus are identical # ---------------------------------- logging.info('''Make sure that the models\' outputs are identical''' ) UpperCAmelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' ) # prepare the model inputs UpperCAmelCase = tokenizer.encode('''This is sample éàalj\'-.''' ) encoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(_a )) ) UpperCAmelCase = torch.tensor(_a ).unsqueeze(0 ) UpperCAmelCase = tokenizer.encode('''This is sample 3 éàalj\'-.''' ) decoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(_a )) ) UpperCAmelCase = torch.tensor(_a ).unsqueeze(0 ) # failsafe to make sure the weights reset does not affect the # loaded weights. assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0 # forward pass UpperCAmelCase = encoder_input_ids UpperCAmelCase = decoder_input_ids UpperCAmelCase = UpperCAmelCase = None UpperCAmelCase = None UpperCAmelCase = UpperCAmelCase = None UpperCAmelCase = UpperCAmelCase = None UpperCAmelCase = None # The original model does not apply the geneator layer immediatly but rather in # the beam search (where it combines softmax + linear layer). Since we already # apply the softmax in our generation process we only apply the linear layer here. # We make sure that the outputs of the full stack are identical UpperCAmelCase = original(_a , _a , _a , _a , _a , _a , _a )[0] UpperCAmelCase = original.generator(_a ) UpperCAmelCase = new_model( _a , _a , _a , _a , _a )[0] UpperCAmelCase = new_model.generator(_a ) UpperCAmelCase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item() print('''Maximum absolute difference beween weights: {:.2f}'''.format(_a ) ) UpperCAmelCase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item() print('''Maximum absolute difference beween weights: {:.2f}'''.format(_a ) ) UpperCAmelCase = torch.allclose(_a , _a , atol=1E-3 ) if are_identical: logging.info('''all weights are equal up to 1e-3''' ) else: raise ValueError('''the weights are different. The new model is likely different from the original one.''' ) # The model has been saved with torch.save(model) and this is bound to the exact # directory structure. We save the state_dict instead. logging.info('''saving the model\'s state dictionary''' ) torch.save( new_model.state_dict() , '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' ) if __name__ == "__main__": A =argparse.ArgumentParser() parser.add_argument( '--bertabs_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.', ) A =parser.parse_args() convert_bertabs_checkpoints( args.bertabs_checkpoint_path, args.pytorch_dump_folder_path, )
34
1
from . import ( albert, align, altclip, audio_spectrogram_transformer, auto, autoformer, bark, bart, barthez, bartpho, beit, bert, bert_generation, bert_japanese, bertweet, big_bird, bigbird_pegasus, biogpt, bit, blenderbot, blenderbot_small, blip, blip_a, bloom, bridgetower, byta, camembert, canine, chinese_clip, clap, clip, clipseg, codegen, conditional_detr, convbert, convnext, convnextva, cpm, cpmant, ctrl, cvt, dataavec, deberta, deberta_va, decision_transformer, deformable_detr, deit, deprecated, deta, detr, dialogpt, dinat, distilbert, dit, donut, dpr, dpt, efficientformer, efficientnet, electra, encodec, encoder_decoder, ernie, ernie_m, esm, falcon, flaubert, flava, fnet, focalnet, fsmt, funnel, git, glpn, gpta, gpt_bigcode, gpt_neo, gpt_neox, gpt_neox_japanese, gpt_swa, gptj, gptsan_japanese, graphormer, groupvit, herbert, hubert, ibert, imagegpt, informer, instructblip, jukebox, layoutlm, layoutlmva, layoutlmva, layoutxlm, led, levit, lilt, llama, longformer, longta, luke, lxmert, mam_aaa, marian, markuplm, maskaformer, maskformer, mbart, mbartaa, mega, megatron_bert, megatron_gpta, mgp_str, mluke, mobilebert, mobilenet_va, mobilenet_va, mobilevit, mobilevitva, mpnet, mra, mta, musicgen, mvp, nat, nezha, nllb, nllb_moe, nystromformer, oneformer, open_llama, openai, opt, owlvit, pegasus, pegasus_x, perceiver, phobert, pixastruct, plbart, poolformer, prophetnet, qdqbert, rag, realm, reformer, regnet, rembert, resnet, roberta, roberta_prelayernorm, roc_bert, roformer, rwkv, sam, segformer, sew, sew_d, speech_encoder_decoder, speech_to_text, speech_to_text_a, speechta, splinter, squeezebert, swiftformer, swin, swinasr, swinva, switch_transformers, ta, table_transformer, tapas, time_series_transformer, timesformer, timm_backbone, transfo_xl, trocr, tvlt, umta, unispeech, unispeech_sat, upernet, videomae, vilt, vision_encoder_decoder, vision_text_dual_encoder, visual_bert, vit, vit_hybrid, vit_mae, vit_msn, vivit, wavaveca, wavaveca_conformer, wavaveca_phoneme, wavaveca_with_lm, wavlm, whisper, x_clip, xglm, xlm, xlm_prophetnet, xlm_roberta, xlm_roberta_xl, xlnet, xmod, yolos, yoso, )
70
import argparse import logging import pickle import random import time import numpy as np from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) _lowerCAmelCase : Tuple = logging.getLogger(__name__) def __snake_case ( ) -> Tuple: A_ : List[str] = argparse.ArgumentParser( description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." ) parser.add_argument("--file_path" , type=_lowerCAmelCase , default="data/dump.txt" , help="The path to the data." ) parser.add_argument("--tokenizer_type" , type=_lowerCAmelCase , default="bert" , choices=["bert", "roberta", "gpt2"] ) parser.add_argument("--tokenizer_name" , type=_lowerCAmelCase , default="bert-base-uncased" , help="The tokenizer to use." ) parser.add_argument("--dump_file" , type=_lowerCAmelCase , default="data/dump" , help="The dump file prefix." ) A_ : int = parser.parse_args() logger.info(f"Loading Tokenizer ({args.tokenizer_name})" ) if args.tokenizer_type == "bert": A_ : int = BertTokenizer.from_pretrained(args.tokenizer_name ) A_ : Union[str, Any] = tokenizer.special_tokens_map["cls_token"] # `[CLS]` A_ : Any = tokenizer.special_tokens_map["sep_token"] # `[SEP]` elif args.tokenizer_type == "roberta": A_ : Dict = RobertaTokenizer.from_pretrained(args.tokenizer_name ) A_ : List[str] = tokenizer.special_tokens_map["cls_token"] # `<s>` A_ : Any = tokenizer.special_tokens_map["sep_token"] # `</s>` elif args.tokenizer_type == "gpt2": A_ : Union[str, Any] = GPTaTokenizer.from_pretrained(args.tokenizer_name ) A_ : Any = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>` A_ : Union[str, Any] = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>` logger.info(f"Loading text from {args.file_path}" ) with open(args.file_path , "r" , encoding="utf8" ) as fp: A_ : Union[str, Any] = fp.readlines() logger.info("Start encoding" ) logger.info(f"{len(_lowerCAmelCase )} examples to process." ) A_ : List[Any] = [] A_ : Tuple = 0 A_ : Union[str, Any] = 10000 A_ : Optional[int] = time.time() for text in data: A_ : Any = f"{bos} {text.strip()} {sep}" A_ : List[Any] = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) rslt.append(_lowerCAmelCase ) iter += 1 if iter % interval == 0: A_ : str = time.time() logger.info(f"{iter} examples processed. - {(end-start):.2f}s/{interval}expl" ) A_ : Union[str, Any] = time.time() logger.info("Finished binarization" ) logger.info(f"{len(_lowerCAmelCase )} examples processed." ) A_ : int = f"{args.dump_file}.{args.tokenizer_name}.pickle" A_ : List[Any] = tokenizer.vocab_size if vocab_size < (1 << 16): A_ : Union[str, Any] = [np.uintaa(_lowerCAmelCase ) for d in rslt] else: A_ : List[str] = [np.intaa(_lowerCAmelCase ) for d in rslt] random.shuffle(rslt_ ) logger.info(f"Dump to {dp_file}" ) with open(_lowerCAmelCase , "wb" ) as handle: pickle.dump(rslt_ , _lowerCAmelCase , protocol=pickle.HIGHEST_PROTOCOL ) if __name__ == "__main__": main()
70
1
__A : int = { 'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.', 'G': '--.', 'H': '....', 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..', 'M': '--', 'N': '-.', 'O': '---', 'P': '.--.', 'Q': '--.-', 'R': '.-.', 'S': '...', 'T': '-', 'U': '..-', 'V': '...-', 'W': '.--', 'X': '-..-', 'Y': '-.--', 'Z': '--..', '1': '.----', '2': '..---', '3': '...--', '4': '....-', '5': '.....', '6': '-....', '7': '--...', '8': '---..', '9': '----.', '0': '-----', '&': '.-...', '@': '.--.-.', ':': '---...', ',': '--..--', '.': '.-.-.-', '\'': '.----.', '"': '.-..-.', '?': '..--..', '/': '-..-.', '=': '-...-', '+': '.-.-.', '-': '-....-', '(': '-.--.', ')': '-.--.-', '!': '-.-.--', ' ': '/' } # Exclamation mark is not in ITU-R recommendation # fmt: on __A : Any = {value: key for key, value in MORSE_CODE_DICT.items()} def __UpperCamelCase ( _A : str ) ->str: """simple docstring""" return " ".join(MORSE_CODE_DICT[char] for char in message.upper() ) def __UpperCamelCase ( _A : str ) ->str: """simple docstring""" return "".join(REVERSE_DICT[char] for char in message.split() ) def __UpperCamelCase ( ) ->None: """simple docstring""" lowerCamelCase_ ="""Morse code here!""" print(_A ) lowerCamelCase_ =encrypt(_A ) print(_A ) lowerCamelCase_ =decrypt(_A ) print(_A ) if __name__ == "__main__": main()
154
import argparse import torch from transformers import ( UniSpeechSatConfig, UniSpeechSatForAudioFrameClassification, UniSpeechSatForSequenceClassification, UniSpeechSatForXVector, WavaVecaFeatureExtractor, logging, ) logging.set_verbosity_info() __A : Tuple = logging.get_logger(__name__) def __UpperCamelCase ( _A : str , _A : str , _A : str ) ->int: """simple docstring""" lowerCamelCase_ =UniSpeechSatForSequenceClassification.from_pretrained(_A , config=_A ) lowerCamelCase_ =downstream_dict["""projector.weight"""] lowerCamelCase_ =downstream_dict["""projector.bias"""] lowerCamelCase_ =downstream_dict["""model.post_net.linear.weight"""] lowerCamelCase_ =downstream_dict["""model.post_net.linear.bias"""] return model def __UpperCamelCase ( _A : Optional[int] , _A : str , _A : Any ) ->Optional[int]: """simple docstring""" lowerCamelCase_ =UniSpeechSatForAudioFrameClassification.from_pretrained(_A , config=_A ) lowerCamelCase_ =downstream_dict["""model.linear.weight"""] lowerCamelCase_ =downstream_dict["""model.linear.bias"""] return model def __UpperCamelCase ( _A : Optional[Any] , _A : Optional[Any] , _A : Optional[Any] ) ->List[Any]: """simple docstring""" lowerCamelCase_ =UniSpeechSatForXVector.from_pretrained(_A , config=_A ) lowerCamelCase_ =downstream_dict["""connector.weight"""] lowerCamelCase_ =downstream_dict["""connector.bias"""] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): lowerCamelCase_ =downstream_dict[ f'model.framelevel_feature_extractor.module.{i}.kernel.weight' ] lowerCamelCase_ =downstream_dict[f'model.framelevel_feature_extractor.module.{i}.kernel.bias'] lowerCamelCase_ =downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""] lowerCamelCase_ =downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""] lowerCamelCase_ =downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""] lowerCamelCase_ =downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""] lowerCamelCase_ =downstream_dict["""objective.W"""] return model @torch.no_grad() def __UpperCamelCase ( _A : Any , _A : Optional[Any] , _A : Union[str, Any] , _A : str ) ->Union[str, Any]: """simple docstring""" lowerCamelCase_ =torch.load(_A , map_location="""cpu""" ) lowerCamelCase_ =checkpoint["""Downstream"""] lowerCamelCase_ =UniSpeechSatConfig.from_pretrained(_A ) lowerCamelCase_ =WavaVecaFeatureExtractor.from_pretrained( _A , return_attention_mask=_A , do_normalize=_A ) lowerCamelCase_ =hf_config.architectures[0] if arch.endswith("""ForSequenceClassification""" ): lowerCamelCase_ =convert_classification(_A , _A , _A ) elif arch.endswith("""ForAudioFrameClassification""" ): lowerCamelCase_ =convert_diarization(_A , _A , _A ) elif arch.endswith("""ForXVector""" ): lowerCamelCase_ =convert_xvector(_A , _A , _A ) else: raise NotImplementedError(f'S3PRL weights conversion is not supported for {arch}' ) if hf_config.use_weighted_layer_sum: lowerCamelCase_ =checkpoint["""Featurizer"""]["""weights"""] hf_feature_extractor.save_pretrained(_A ) hf_model.save_pretrained(_A ) if __name__ == "__main__": __A : Optional[int] = argparse.ArgumentParser() parser.add_argument( '--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.' ) parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.') parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.') __A : int = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
154
1
from heapq import heappop, heappush import numpy as np def SCREAMING_SNAKE_CASE_ ( __magic_name__ : np.ndarray , __magic_name__ : tuple[int, int] , __magic_name__ : tuple[int, int] , __magic_name__ : bool , ) -> tuple[float | int, list[tuple[int, int]]]: """simple docstring""" UpperCamelCase :Optional[int] = grid.shape UpperCamelCase :Dict = [-1, 1, 0, 0] UpperCamelCase :int = [0, 0, -1, 1] if allow_diagonal: dx += [-1, -1, 1, 1] dy += [-1, 1, -1, 1] UpperCamelCase :List[str] = [(0, source)], set() UpperCamelCase :Dict = np.full((rows, cols) , np.inf ) UpperCamelCase :Dict = 0 UpperCamelCase :List[Any] = np.empty((rows, cols) , dtype=__magic_name__ ) UpperCamelCase :List[Any] = None while queue: (UpperCamelCase) :List[str] = heappop(__magic_name__ ) if (x, y) in visited: continue visited.add((x, y) ) if (x, y) == destination: UpperCamelCase :Optional[int] = [] while (x, y) != source: path.append((x, y) ) UpperCamelCase :Tuple = predecessors[x, y] path.append(__magic_name__ ) # add the source manually path.reverse() return matrix[destination], path for i in range(len(__magic_name__ ) ): UpperCamelCase :Any = x + dx[i], y + dy[i] if 0 <= nx < rows and 0 <= ny < cols: UpperCamelCase :List[Any] = grid[nx][ny] if next_node == 1 and matrix[nx, ny] > dist + 1: heappush(__magic_name__ , (dist + 1, (nx, ny)) ) UpperCamelCase :Optional[Any] = dist + 1 UpperCamelCase :Any = (x, y) return np.inf, [] if __name__ == "__main__": import doctest doctest.testmod()
362
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/ import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, DDIMScheduler, StableDiffusionControlNetImgaImgPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class _SCREAMING_SNAKE_CASE ( _a , _a , _a , unittest.TestCase ): snake_case__ : Dict = StableDiffusionControlNetImgaImgPipeline snake_case__ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""} snake_case__ : Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS snake_case__ : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"""control_image"""} ) snake_case__ : str = IMAGE_TO_IMAGE_IMAGE_PARAMS def _A ( self : List[str] ): torch.manual_seed(0 ) UpperCamelCase :str = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) torch.manual_seed(0 ) UpperCamelCase :str = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) torch.manual_seed(0 ) UpperCamelCase :List[str] = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__lowerCamelCase , set_alpha_to_one=__lowerCamelCase , ) torch.manual_seed(0 ) UpperCamelCase :Tuple = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) UpperCamelCase :Any = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) UpperCamelCase :Union[str, Any] = CLIPTextModel(__lowerCamelCase ) UpperCamelCase :Dict = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) UpperCamelCase :Any = { """unet""": unet, """controlnet""": controlnet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def _A ( self : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : List[Any]=0 ): if str(__lowerCamelCase ).startswith("""mps""" ): UpperCamelCase :Optional[int] = torch.manual_seed(__lowerCamelCase ) else: UpperCamelCase :List[Any] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase ) UpperCamelCase :Optional[Any] = 2 UpperCamelCase :Optional[int] = randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__lowerCamelCase , device=torch.device(__lowerCamelCase ) , ) UpperCamelCase :Tuple = floats_tensor(control_image.shape , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase ) UpperCamelCase :str = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCamelCase :Optional[Any] = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert("""RGB""" ).resize((64, 64) ) UpperCamelCase :str = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", """image""": image, """control_image""": control_image, } return inputs def _A ( self : Dict ): return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def _A ( self : Union[str, Any] ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 ) def _A ( self : Optional[Any] ): self._test_inference_batch_single_identical(expected_max_diff=2E-3 ) class _SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ): snake_case__ : Optional[Any] = StableDiffusionControlNetImgaImgPipeline snake_case__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""} snake_case__ : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS snake_case__ : int = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess def _A ( self : List[Any] ): torch.manual_seed(0 ) UpperCamelCase :Dict = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) torch.manual_seed(0 ) def init_weights(__lowerCamelCase : Union[str, Any] ): if isinstance(__lowerCamelCase , torch.nn.Convad ): torch.nn.init.normal(m.weight ) m.bias.data.fill_(1.0 ) UpperCamelCase :Union[str, Any] = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(__lowerCamelCase ) torch.manual_seed(0 ) UpperCamelCase :Tuple = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(__lowerCamelCase ) torch.manual_seed(0 ) UpperCamelCase :str = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__lowerCamelCase , set_alpha_to_one=__lowerCamelCase , ) torch.manual_seed(0 ) UpperCamelCase :Any = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) UpperCamelCase :Tuple = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) UpperCamelCase :List[Any] = CLIPTextModel(__lowerCamelCase ) UpperCamelCase :Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) UpperCamelCase :Optional[Any] = MultiControlNetModel([controlneta, controlneta] ) UpperCamelCase :int = { """unet""": unet, """controlnet""": controlnet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def _A ( self : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any]=0 ): if str(__lowerCamelCase ).startswith("""mps""" ): UpperCamelCase :Dict = torch.manual_seed(__lowerCamelCase ) else: UpperCamelCase :Tuple = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase ) UpperCamelCase :Any = 2 UpperCamelCase :List[str] = [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__lowerCamelCase , device=torch.device(__lowerCamelCase ) , ), randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__lowerCamelCase , device=torch.device(__lowerCamelCase ) , ), ] UpperCamelCase :int = floats_tensor(control_image[0].shape , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase ) UpperCamelCase :List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCamelCase :Union[str, Any] = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert("""RGB""" ).resize((64, 64) ) UpperCamelCase :Optional[int] = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", """image""": image, """control_image""": control_image, } return inputs def _A ( self : List[str] ): UpperCamelCase :Union[str, Any] = self.get_dummy_components() UpperCamelCase :List[str] = self.pipeline_class(**__lowerCamelCase ) pipe.to(__lowerCamelCase ) UpperCamelCase :Optional[Any] = 10.0 UpperCamelCase :str = 4 UpperCamelCase :Optional[int] = self.get_dummy_inputs(__lowerCamelCase ) UpperCamelCase :str = steps UpperCamelCase :Tuple = scale UpperCamelCase :List[str] = pipe(**__lowerCamelCase )[0] UpperCamelCase :Optional[int] = self.get_dummy_inputs(__lowerCamelCase ) UpperCamelCase :List[Any] = steps UpperCamelCase :str = scale UpperCamelCase :int = pipe(**__lowerCamelCase , control_guidance_start=0.1 , control_guidance_end=0.2 )[0] UpperCamelCase :List[str] = self.get_dummy_inputs(__lowerCamelCase ) UpperCamelCase :Optional[Any] = steps UpperCamelCase :str = scale UpperCamelCase :Any = pipe(**__lowerCamelCase , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0] UpperCamelCase :Tuple = self.get_dummy_inputs(__lowerCamelCase ) UpperCamelCase :Union[str, Any] = steps UpperCamelCase :str = scale UpperCamelCase :Optional[int] = pipe(**__lowerCamelCase , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0] # make sure that all outputs are different assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 def _A ( self : Any ): return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def _A ( self : Union[str, Any] ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 ) def _A ( self : Dict ): self._test_inference_batch_single_identical(expected_max_diff=2E-3 ) def _A ( self : Any ): UpperCamelCase :List[str] = self.get_dummy_components() UpperCamelCase :List[str] = self.pipeline_class(**__lowerCamelCase ) pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) with tempfile.TemporaryDirectory() as tmpdir: try: # save_pretrained is not implemented for Multi-ControlNet pipe.save_pretrained(__lowerCamelCase ) except NotImplementedError: pass @slow @require_torch_gpu class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def _A ( self : int ): super().tearDown() gc.collect() torch.cuda.empty_cache() def _A ( self : List[str] ): UpperCamelCase :Tuple = ControlNetModel.from_pretrained("""lllyasviel/sd-controlnet-canny""" ) UpperCamelCase :List[Any] = StableDiffusionControlNetImgaImgPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , safety_checker=__lowerCamelCase , controlnet=__lowerCamelCase ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=__lowerCamelCase ) UpperCamelCase :List[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 ) UpperCamelCase :Optional[int] = """evil space-punk bird""" UpperCamelCase :List[Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ).resize((512, 512) ) UpperCamelCase :List[str] = load_image( """https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png""" ).resize((512, 512) ) UpperCamelCase :str = pipe( __lowerCamelCase , __lowerCamelCase , control_image=__lowerCamelCase , generator=__lowerCamelCase , output_type="""np""" , num_inference_steps=50 , strength=0.6 , ) UpperCamelCase :int = output.images[0] assert image.shape == (512, 512, 3) UpperCamelCase :Optional[int] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy""" ) assert np.abs(expected_image - image ).max() < 9E-2
62
0
import inspect import os import torch from transformers import AutoModel from transformers.testing_utils import mockenv_context from transformers.trainer_utils import set_seed import accelerate from accelerate.accelerator import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils.testing import ( AccelerateTestCase, TempDirTestCase, execute_subprocess_async, require_cuda, require_fsdp, require_multi_gpu, slow, ) from accelerate.utils.constants import ( FSDP_AUTO_WRAP_POLICY, FSDP_BACKWARD_PREFETCH, FSDP_SHARDING_STRATEGY, FSDP_STATE_DICT_TYPE, ) from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin from accelerate.utils.other import patch_environment set_seed(42) lowercase : Dict = """bert-base-cased""" lowercase : int = """fp16""" lowercase : Optional[int] = """bf16""" lowercase : str = [FPaa, BFaa] @require_fsdp @require_cuda class __snake_case ( lowerCAmelCase ): def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' super().setUp() lowercase : str = dict( ACCELERATE_USE_FSDP="""true""" ,MASTER_ADDR="""localhost""" ,MASTER_PORT="""10999""" ,RANK="""0""" ,LOCAL_RANK="""0""" ,WORLD_SIZE="""1""" ,) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy for i, strategy in enumerate(snake_case ): lowercase : Tuple = self.dist_env.copy() lowercase : Dict = f"{i + 1}" lowercase : Optional[int] = strategy with mockenv_context(**snake_case ): lowercase : Any = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.sharding_strategy ,ShardingStrategy(i + 1 ) ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch for i, prefetch_policy in enumerate(snake_case ): lowercase : Dict = self.dist_env.copy() lowercase : List[str] = prefetch_policy with mockenv_context(**snake_case ): lowercase : Tuple = FullyShardedDataParallelPlugin() if prefetch_policy == "NO_PREFETCH": self.assertIsNone(fsdp_plugin.backward_prefetch ) else: self.assertEqual(fsdp_plugin.backward_prefetch ,BackwardPrefetch(i + 1 ) ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType for i, state_dict_type in enumerate(snake_case ): lowercase : int = self.dist_env.copy() lowercase : List[str] = state_dict_type with mockenv_context(**snake_case ): lowercase : Optional[Any] = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.state_dict_type ,StateDictType(i + 1 ) ) if state_dict_type == "FULL_STATE_DICT": self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu ) self.assertTrue(fsdp_plugin.state_dict_config.ranka_only ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Optional[Any] = AutoModel.from_pretrained(snake_case ) for policy in FSDP_AUTO_WRAP_POLICY: lowercase : str = self.dist_env.copy() lowercase : Optional[Any] = policy if policy == "TRANSFORMER_BASED_WRAP": lowercase : List[Any] = """BertLayer""" elif policy == "SIZE_BASED_WRAP": lowercase : List[str] = """2000""" with mockenv_context(**snake_case ): lowercase : Optional[Any] = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(snake_case ) if policy == "NO_WRAP": self.assertIsNone(fsdp_plugin.auto_wrap_policy ) else: self.assertIsNotNone(fsdp_plugin.auto_wrap_policy ) lowercase : Union[str, Any] = self.dist_env.copy() lowercase : Optional[int] = """TRANSFORMER_BASED_WRAP""" lowercase : Optional[int] = """T5Layer""" with mockenv_context(**snake_case ): lowercase : str = FullyShardedDataParallelPlugin() with self.assertRaises(snake_case ) as cm: fsdp_plugin.set_auto_wrap_policy(snake_case ) self.assertTrue("""Could not find the transformer layer class to wrap in the model.""" in str(cm.exception ) ) lowercase : List[str] = self.dist_env.copy() lowercase : Tuple = """SIZE_BASED_WRAP""" lowercase : List[Any] = """0""" with mockenv_context(**snake_case ): lowercase : Any = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(snake_case ) self.assertIsNone(fsdp_plugin.auto_wrap_policy ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler for mp_dtype in dtypes: lowercase : str = self.dist_env.copy() lowercase : List[Any] = mp_dtype with mockenv_context(**snake_case ): lowercase : Dict = Accelerator() if mp_dtype == "fp16": lowercase : str = torch.floataa elif mp_dtype == "bf16": lowercase : Optional[int] = torch.bfloataa lowercase : Optional[Any] = MixedPrecision(param_dtype=snake_case ,reduce_dtype=snake_case ,buffer_dtype=snake_case ) self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy ,snake_case ) if mp_dtype == FPaa: self.assertTrue(isinstance(accelerator.scaler ,snake_case ) ) elif mp_dtype == BFaa: self.assertIsNone(accelerator.scaler ) AcceleratorState._reset_state(snake_case ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload for flag in [True, False]: lowercase : Dict = self.dist_env.copy() lowercase : Optional[int] = str(snake_case ).lower() with mockenv_context(**snake_case ): lowercase : Optional[Any] = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.cpu_offload ,CPUOffload(offload_params=snake_case ) ) @require_fsdp @require_multi_gpu @slow class __snake_case ( lowerCAmelCase ): def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' super().setUp() lowercase : Optional[int] = 0.82 lowercase : List[str] = [ """fsdp_shard_grad_op_transformer_based_wrap""", """fsdp_full_shard_transformer_based_wrap""", ] lowercase : int = { """multi_gpu_fp16""": 3200, """fsdp_shard_grad_op_transformer_based_wrap_fp16""": 2000, """fsdp_full_shard_transformer_based_wrap_fp16""": 1900, # Disabling below test as it overwhelms the RAM memory usage # on CI self-hosted runner leading to tests getting killed. # "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang } lowercase : Dict = 160 lowercase : Optional[Any] = 160 lowercase : int = inspect.getfile(accelerate.test_utils ) lowercase : Any = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps"""] ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Dict = os.path.join(self.test_scripts_folder ,"""test_performance.py""" ) lowercase : int = ["""accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp"""] for config in self.performance_configs: lowercase : Optional[int] = cmd.copy() for i, strategy in enumerate(snake_case ): if strategy.lower() in config: cmd_config.append(f"--fsdp_sharding_strategy={i+1}" ) break if "fp32" in config: cmd_config.append("""--mixed_precision=no""" ) else: cmd_config.append("""--mixed_precision=fp16""" ) if "cpu_offload" in config: cmd_config.append("""--fsdp_offload_params=True""" ) for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in config: cmd_config.append(f"--fsdp_auto_wrap_policy={policy}" ) break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" ) elif policy == "SIZE_BASED_WRAP": cmd_config.append("""--fsdp_min_num_params=2000""" ) cmd_config.extend( [ self.test_file_path, f"--output_dir={self.tmpdir}", f"--performance_lower_bound={self.performance_lower_bound}", ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(snake_case ,env=os.environ.copy() ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Union[str, Any] = os.path.join(self.test_scripts_folder ,"""test_checkpointing.py""" ) lowercase : int = [ """accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp""", """--mixed_precision=fp16""", """--fsdp_transformer_layer_cls_to_wrap=BertLayer""", ] for i, strategy in enumerate(snake_case ): lowercase : Any = cmd.copy() cmd_config.append(f"--fsdp_sharding_strategy={i+1}" ) if strategy != "FULL_SHARD": continue lowercase : Union[str, Any] = len(snake_case ) for state_dict_type in FSDP_STATE_DICT_TYPE: lowercase : Optional[int] = cmd_config[:state_dict_config_index] cmd_config.append(f"--fsdp_state_dict_type={state_dict_type}" ) cmd_config.extend( [ self.test_file_path, f"--output_dir={self.tmpdir}", """--partial_train_epoch=1""", ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(snake_case ,env=os.environ.copy() ) lowercase : Tuple = cmd_config[:-1] lowercase : Tuple = os.path.join(self.tmpdir ,"""epoch_0""" ) cmd_config.extend( [ f"--resume_from_checkpoint={resume_from_checkpoint}", ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(snake_case ,env=os.environ.copy() ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Union[str, Any] = os.path.join(self.test_scripts_folder ,"""test_peak_memory_usage.py""" ) lowercase : List[Any] = [ """accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", ] for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items(): lowercase : Dict = cmd.copy() if "fp16" in spec: cmd_config.extend(["""--mixed_precision=fp16"""] ) else: cmd_config.extend(["""--mixed_precision=no"""] ) if "multi_gpu" in spec: continue else: cmd_config.extend(["""--use_fsdp"""] ) for i, strategy in enumerate(snake_case ): if strategy.lower() in spec: cmd_config.append(f"--fsdp_sharding_strategy={i+1}" ) break if "cpu_offload" in spec: cmd_config.append("""--fsdp_offload_params=True""" ) for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in spec: cmd_config.append(f"--fsdp_auto_wrap_policy={policy}" ) break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" ) elif policy == "SIZE_BASED_WRAP": cmd_config.append("""--fsdp_min_num_params=2000""" ) cmd_config.extend( [ self.test_file_path, f"--output_dir={self.tmpdir}", f"--peak_memory_upper_bound={peak_mem_upper_bound}", f"--n_train={self.n_train}", f"--n_val={self.n_val}", ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(snake_case ,env=os.environ.copy() )
20
"""simple docstring""" import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def lowercase_ ( _snake_case ): # encoder.embeddings are double copied in original FLAVA return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() ) def lowercase_ ( _snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ : Any = {} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue SCREAMING_SNAKE_CASE__ : Optional[int] = key.replace("""heads.cmd.mim_head.cls.predictions""" ,"""mmm_image_head""" ) SCREAMING_SNAKE_CASE__ : Dict = key.replace("""heads.cmd.mlm_head.cls.predictions""" ,"""mmm_text_head""" ) SCREAMING_SNAKE_CASE__ : List[Any] = key.replace("""heads.cmd.itm_head.cls""" ,"""itm_head""" ) SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""heads.cmd.itm_head.pooler""" ,"""itm_head.pooler""" ) SCREAMING_SNAKE_CASE__ : int = key.replace("""heads.cmd.clip_head.logit_scale""" ,"""flava.logit_scale""" ) SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""heads.fairseq_mlm.cls.predictions""" ,"""mlm_head""" ) SCREAMING_SNAKE_CASE__ : str = key.replace("""heads.imagenet.mim_head.cls.predictions""" ,"""mim_head""" ) SCREAMING_SNAKE_CASE__ : List[str] = key.replace("""mm_text_projection""" ,"""flava.text_to_mm_projection""" ) SCREAMING_SNAKE_CASE__ : Dict = key.replace("""mm_image_projection""" ,"""flava.image_to_mm_projection""" ) SCREAMING_SNAKE_CASE__ : str = key.replace("""image_encoder.module""" ,"""flava.image_model""" ) SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""text_encoder.module""" ,"""flava.text_model""" ) SCREAMING_SNAKE_CASE__ : int = key.replace("""mm_encoder.module.encoder.cls_token""" ,"""flava.multimodal_model.cls_token""" ) SCREAMING_SNAKE_CASE__ : Dict = key.replace("""mm_encoder.module""" ,"""flava.multimodal_model""" ) SCREAMING_SNAKE_CASE__ : Any = key.replace("""text_projection""" ,"""flava.text_projection""" ) SCREAMING_SNAKE_CASE__ : List[Any] = key.replace("""image_projection""" ,"""flava.image_projection""" ) SCREAMING_SNAKE_CASE__ : Tuple = value.float() for key, value in codebook_state_dict.items(): SCREAMING_SNAKE_CASE__ : Optional[Any] = value return upgrade @torch.no_grad() def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case=None ): if config_path is not None: SCREAMING_SNAKE_CASE__ : Optional[Any] = FlavaConfig.from_pretrained(_snake_case ) else: SCREAMING_SNAKE_CASE__ : List[str] = FlavaConfig() SCREAMING_SNAKE_CASE__ : Optional[int] = FlavaForPreTraining(_snake_case ).eval() SCREAMING_SNAKE_CASE__ : List[Any] = convert_dalle_checkpoint(_snake_case ,_snake_case ,save_checkpoint=_snake_case ) if os.path.exists(_snake_case ): SCREAMING_SNAKE_CASE__ : List[str] = torch.load(_snake_case ,map_location="""cpu""" ) else: SCREAMING_SNAKE_CASE__ : Tuple = torch.hub.load_state_dict_from_url(_snake_case ,map_location="""cpu""" ) SCREAMING_SNAKE_CASE__ : Dict = upgrade_state_dict(_snake_case ,_snake_case ) hf_model.load_state_dict(_snake_case ) SCREAMING_SNAKE_CASE__ : Any = hf_model.state_dict() SCREAMING_SNAKE_CASE__ : Any = count_parameters(_snake_case ) SCREAMING_SNAKE_CASE__ : str = count_parameters(_snake_case ) + count_parameters(_snake_case ) assert torch.allclose(_snake_case ,_snake_case ,atol=1E-3 ) hf_model.save_pretrained(_snake_case ) if __name__ == "__main__": UpperCAmelCase__ : List[Any] = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint') parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') UpperCAmelCase__ : Optional[int] = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
25
0
import argparse import os from pathlib import Path import fairseq import torch from packaging import version from torch import nn from transformers import ( BartConfig, BartForConditionalGeneration, BartForSequenceClassification, BartModel, BartTokenizer, ) from transformers.utils import logging _a = ['bart.large', 'bart.large.mnli', 'bart.large.cnn', 'bart_xsum/model.pt'] _a = {'bart.large': BartModel, 'bart.large.mnli': BartForSequenceClassification} if version.parse(fairseq.__version__) < version.parse('0.9.0'): raise Exception('requires fairseq >= 0.9.0') logging.set_verbosity_info() _a = logging.get_logger(__name__) _a = ' Hello world! cécé herlolip' _a = [ ('model.classification_heads.mnli.dense.weight', 'classification_head.dense.weight'), ('model.classification_heads.mnli.dense.bias', 'classification_head.dense.bias'), ('model.classification_heads.mnli.out_proj.weight', 'classification_head.out_proj.weight'), ('model.classification_heads.mnli.out_proj.bias', 'classification_head.out_proj.bias'), ] def _A ( UpperCamelCase_ : Any) -> Optional[Any]: '''simple docstring''' __lowercase = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "_float_tensor", ] for k in ignore_keys: state_dict.pop(UpperCamelCase_, UpperCamelCase_) def _A ( UpperCamelCase_ : Optional[Any], UpperCamelCase_ : int, UpperCamelCase_ : Optional[int]) -> Dict: '''simple docstring''' __lowercase = dct.pop(UpperCamelCase_) __lowercase = val def _A ( UpperCamelCase_ : Union[str, Any]) -> List[Any]: '''simple docstring''' __lowercase = torch.load(UpperCamelCase_, map_location="cpu") __lowercase = torch.hub.load("pytorch/fairseq", "bart.large.cnn").eval() hub_interface.model.load_state_dict(sd["model"]) return hub_interface def _A ( UpperCamelCase_ : Union[str, Any]) -> Dict: '''simple docstring''' __lowercase ,__lowercase = emb.weight.shape __lowercase = nn.Linear(UpperCamelCase_, UpperCamelCase_, bias=UpperCamelCase_) __lowercase = emb.weight.data return lin_layer @torch.no_grad() def _A ( UpperCamelCase_ : Any, UpperCamelCase_ : Optional[int], UpperCamelCase_ : Optional[int]=None) -> int: '''simple docstring''' if not os.path.exists(UpperCamelCase_): __lowercase = torch.hub.load("pytorch/fairseq", UpperCamelCase_).eval() else: __lowercase = load_xsum_checkpoint(UpperCamelCase_) bart.model.upgrade_state_dict(bart.model.state_dict()) if hf_checkpoint_name is None: __lowercase = checkpoint_path.replace(".", "-") __lowercase = BartConfig.from_pretrained(UpperCamelCase_) __lowercase = bart.encode(UpperCamelCase_).unsqueeze(0) __lowercase = BartTokenizer.from_pretrained(UpperCamelCase_).encode(UpperCamelCase_, return_tensors="pt").unsqueeze(0) if not torch.eq(UpperCamelCase_, UpperCamelCase_).all(): raise ValueError( F"""converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}""") if checkpoint_path == "bart.large.mnli": __lowercase = bart.state_dict() remove_ignore_keys_(UpperCamelCase_) __lowercase = state_dict["model.decoder.embed_tokens.weight"] for src, dest in mnli_rename_keys: rename_key(UpperCamelCase_, UpperCamelCase_, UpperCamelCase_) __lowercase = BartForSequenceClassification(UpperCamelCase_).eval() model.load_state_dict(UpperCamelCase_) __lowercase = bart.predict("mnli", UpperCamelCase_, return_logits=UpperCamelCase_) __lowercase = model(UpperCamelCase_)[0] # logits else: # no classification heads to worry about __lowercase = bart.model.state_dict() remove_ignore_keys_(UpperCamelCase_) __lowercase = state_dict["decoder.embed_tokens.weight"] __lowercase = bart.extract_features(UpperCamelCase_) if hf_checkpoint_name == "facebook/bart-large": __lowercase = BartModel(UpperCamelCase_).eval() model.load_state_dict(UpperCamelCase_) __lowercase = model(UpperCamelCase_).model[0] else: __lowercase = BartForConditionalGeneration(UpperCamelCase_).eval() # an existing summarization ckpt model.model.load_state_dict(UpperCamelCase_) if hasattr(UpperCamelCase_, "lm_head"): __lowercase = make_linear_from_emb(model.model.shared) __lowercase = model.model(UpperCamelCase_)[0] # Check results if fairseq_output.shape != new_model_outputs.shape: raise ValueError( F"""`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}""") if (fairseq_output != new_model_outputs).any().item(): raise ValueError("Some values in `fairseq_output` are different from `new_model_outputs`") Path(UpperCamelCase_).mkdir(exist_ok=UpperCamelCase_) model.save_pretrained(UpperCamelCase_) if __name__ == "__main__": _a = argparse.ArgumentParser() # Required parameters parser.add_argument( 'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.' ) parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument( '--hf_config', default=None, type=str, help='Which huggingface architecture to use: bart-large-xsum' ) _a = parser.parse_args() convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
352
"""simple docstring""" import numpy # List of input, output pairs _a = ( ((5, 2, 3), 15), ((6, 5, 9), 25), ((11, 12, 13), 41), ((1, 1, 1), 8), ((11, 12, 13), 41), ) _a = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50)) _a = [2, 4, 1, 5] _a = len(train_data) _a = 0.009 def _A ( UpperCamelCase_ : str, UpperCamelCase_ : List[Any]="train") -> Optional[Any]: '''simple docstring''' return calculate_hypothesis_value(UpperCamelCase_, UpperCamelCase_) - output( UpperCamelCase_, UpperCamelCase_) def _A ( UpperCamelCase_ : List[Any]) -> Union[str, Any]: '''simple docstring''' __lowercase = 0 for i in range(len(UpperCamelCase_) - 1): hyp_val += data_input_tuple[i] * parameter_vector[i + 1] hyp_val += parameter_vector[0] return hyp_val def _A ( UpperCamelCase_ : Dict, UpperCamelCase_ : Optional[int]) -> Dict: '''simple docstring''' if data_set == "train": return train_data[example_no][1] elif data_set == "test": return test_data[example_no][1] return None def _A ( UpperCamelCase_ : Dict, UpperCamelCase_ : List[str]) -> int: '''simple docstring''' if data_set == "train": return _hypothesis_value(train_data[example_no][0]) elif data_set == "test": return _hypothesis_value(test_data[example_no][0]) return None def _A ( UpperCamelCase_ : Any, UpperCamelCase_ : Tuple=m) -> int: '''simple docstring''' __lowercase = 0 for i in range(UpperCamelCase_): if index == -1: summation_value += _error(UpperCamelCase_) else: summation_value += _error(UpperCamelCase_) * train_data[i][0][index] return summation_value def _A ( UpperCamelCase_ : str) -> str: '''simple docstring''' __lowercase = summation_of_cost_derivative(UpperCamelCase_, UpperCamelCase_) / m return cost_derivative_value def _A ( ) -> List[str]: '''simple docstring''' global parameter_vector # Tune these values to set a tolerance value for predicted output __lowercase = 0.000_002 __lowercase = 0 __lowercase = 0 while True: j += 1 __lowercase = [0, 0, 0, 0] for i in range(0, len(UpperCamelCase_)): __lowercase = get_cost_derivative(i - 1) __lowercase = ( parameter_vector[i] - LEARNING_RATE * cost_derivative ) if numpy.allclose( UpperCamelCase_, UpperCamelCase_, atol=UpperCamelCase_, rtol=UpperCamelCase_, ): break __lowercase = temp_parameter_vector print(("Number of iterations:", j)) def _A ( ) -> int: '''simple docstring''' for i in range(len(UpperCamelCase_)): print(("Actual output value:", output(UpperCamelCase_, "test"))) print(("Hypothesis output:", calculate_hypothesis_value(UpperCamelCase_, "test"))) if __name__ == "__main__": run_gradient_descent() print('\nTesting gradient descent for a linear hypothesis function.\n') test_gradient_descent()
144
0
import baseaa def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" return baseaa.baaencode(string.encode("utf-8" ) ) def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" return baseaa.baadecode(lowerCamelCase__ ).decode("utf-8" ) if __name__ == "__main__": lowerCAmelCase__ = '''Hello World!''' lowerCAmelCase__ = baseaa_encode(test) print(encoded) lowerCAmelCase__ = baseaa_decode(encoded) print(decoded)
130
import argparse import intel_extension_for_pytorch as ipex import torch from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline lowerCAmelCase__ = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False) parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''') parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''') lowerCAmelCase__ = parser.parse_args() lowerCAmelCase__ = '''cpu''' lowerCAmelCase__ = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings''' lowerCAmelCase__ = '''path-to-your-trained-model''' lowerCAmelCase__ = StableDiffusionPipeline.from_pretrained(model_id) if args.dpm: lowerCAmelCase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) lowerCAmelCase__ = pipe.to(device) # to channels last lowerCAmelCase__ = pipe.unet.to(memory_format=torch.channels_last) lowerCAmelCase__ = pipe.vae.to(memory_format=torch.channels_last) lowerCAmelCase__ = pipe.text_encoder.to(memory_format=torch.channels_last) if pipe.requires_safety_checker: lowerCAmelCase__ = pipe.safety_checker.to(memory_format=torch.channels_last) # optimize with ipex lowerCAmelCase__ = torch.randn(2, 4, 6_4, 6_4) lowerCAmelCase__ = torch.rand(1) * 9_9_9 lowerCAmelCase__ = torch.randn(2, 7_7, 7_6_8) lowerCAmelCase__ = (sample, timestep, encoder_hidden_status) try: lowerCAmelCase__ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example) except Exception: lowerCAmelCase__ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True) lowerCAmelCase__ = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True) lowerCAmelCase__ = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True) if pipe.requires_safety_checker: lowerCAmelCase__ = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True) # compute lowerCAmelCase__ = 6_6_6 lowerCAmelCase__ = torch.Generator(device).manual_seed(seed) lowerCAmelCase__ = {'''generator''': generator} if args.steps is not None: lowerCAmelCase__ = args.steps with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa): lowerCAmelCase__ = pipe(prompt, **generate_kwargs).images[0] # save image image.save('''generated.png''')
130
1
'''simple docstring''' import argparse import os import shutil from pathlib import Path import onnx import torch from packaging import version from torch.onnx import export from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline __UpperCAmelCase =version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11") def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False , ) -> List[Any]: output_path.parent.mkdir(parents=UpperCamelCase__ , exist_ok=UpperCamelCase__ ) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( UpperCamelCase__ , UpperCamelCase__ , f=output_path.as_posix() , input_names=UpperCamelCase__ , output_names=UpperCamelCase__ , dynamic_axes=UpperCamelCase__ , do_constant_folding=UpperCamelCase__ , use_external_data_format=UpperCamelCase__ , enable_onnx_checker=UpperCamelCase__ , opset_version=UpperCamelCase__ , ) else: export( UpperCamelCase__ , UpperCamelCase__ , f=output_path.as_posix() , input_names=UpperCamelCase__ , output_names=UpperCamelCase__ , dynamic_axes=UpperCamelCase__ , do_constant_folding=UpperCamelCase__ , opset_version=UpperCamelCase__ , ) @torch.no_grad() def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False ) -> Tuple: __lowerCamelCase = torch.floataa if fpaa else torch.floataa if fpaa and torch.cuda.is_available(): __lowerCamelCase = '''cuda''' elif fpaa and not torch.cuda.is_available(): raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' ) else: __lowerCamelCase = '''cpu''' __lowerCamelCase = StableDiffusionPipeline.from_pretrained(UpperCamelCase__ , torch_dtype=UpperCamelCase__ ).to(UpperCamelCase__ ) __lowerCamelCase = Path(UpperCamelCase__ ) # TEXT ENCODER __lowerCamelCase = pipeline.text_encoder.config.max_position_embeddings __lowerCamelCase = pipeline.text_encoder.config.hidden_size __lowerCamelCase = pipeline.tokenizer( '''A sample prompt''' , padding='''max_length''' , max_length=pipeline.tokenizer.model_max_length , truncation=UpperCamelCase__ , return_tensors='''pt''' , ) onnx_export( pipeline.text_encoder , model_args=(text_input.input_ids.to(device=UpperCamelCase__ , dtype=torch.intaa )) , output_path=output_path / '''text_encoder''' / '''model.onnx''' , ordered_input_names=['''input_ids'''] , output_names=['''last_hidden_state''', '''pooler_output'''] , dynamic_axes={ '''input_ids''': {0: '''batch''', 1: '''sequence'''}, } , opset=UpperCamelCase__ , ) del pipeline.text_encoder # UNET __lowerCamelCase = pipeline.unet.config.in_channels __lowerCamelCase = pipeline.unet.config.sample_size __lowerCamelCase = output_path / '''unet''' / '''model.onnx''' onnx_export( pipeline.unet , model_args=( torch.randn(2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ), torch.randn(2 ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ), torch.randn(2 , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ), False, ) , output_path=UpperCamelCase__ , ordered_input_names=['''sample''', '''timestep''', '''encoder_hidden_states''', '''return_dict'''] , output_names=['''out_sample'''] , dynamic_axes={ '''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''}, '''timestep''': {0: '''batch'''}, '''encoder_hidden_states''': {0: '''batch''', 1: '''sequence'''}, } , opset=UpperCamelCase__ , use_external_data_format=UpperCamelCase__ , ) __lowerCamelCase = str(unet_path.absolute().as_posix() ) __lowerCamelCase = os.path.dirname(UpperCamelCase__ ) __lowerCamelCase = onnx.load(UpperCamelCase__ ) # clean up existing tensor files shutil.rmtree(UpperCamelCase__ ) os.mkdir(UpperCamelCase__ ) # collate external tensor files into one onnx.save_model( UpperCamelCase__ , UpperCamelCase__ , save_as_external_data=UpperCamelCase__ , all_tensors_to_one_file=UpperCamelCase__ , location='''weights.pb''' , convert_attribute=UpperCamelCase__ , ) del pipeline.unet # VAE ENCODER __lowerCamelCase = pipeline.vae __lowerCamelCase = vae_encoder.config.in_channels __lowerCamelCase = vae_encoder.config.sample_size # need to get the raw tensor output (sample) from the encoder __lowerCamelCase = lambda UpperCamelCase__ , UpperCamelCase__ : vae_encoder.encode(UpperCamelCase__ , UpperCamelCase__ )[0].sample() onnx_export( UpperCamelCase__ , model_args=( torch.randn(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ), False, ) , output_path=output_path / '''vae_encoder''' / '''model.onnx''' , ordered_input_names=['''sample''', '''return_dict'''] , output_names=['''latent_sample'''] , dynamic_axes={ '''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''}, } , opset=UpperCamelCase__ , ) # VAE DECODER __lowerCamelCase = pipeline.vae __lowerCamelCase = vae_decoder.config.latent_channels __lowerCamelCase = vae_decoder.config.out_channels # forward only through the decoder part __lowerCamelCase = vae_encoder.decode onnx_export( UpperCamelCase__ , model_args=( torch.randn(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ), False, ) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={ '''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''}, } , opset=UpperCamelCase__ , ) del pipeline.vae # SAFETY CHECKER if pipeline.safety_checker is not None: __lowerCamelCase = pipeline.safety_checker __lowerCamelCase = safety_checker.config.vision_config.num_channels __lowerCamelCase = safety_checker.config.vision_config.image_size __lowerCamelCase = safety_checker.forward_onnx onnx_export( pipeline.safety_checker , model_args=( torch.randn( 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ), torch.randn(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ), ) , output_path=output_path / '''safety_checker''' / '''model.onnx''' , ordered_input_names=['''clip_input''', '''images'''] , output_names=['''out_images''', '''has_nsfw_concepts'''] , dynamic_axes={ '''clip_input''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''}, '''images''': {0: '''batch''', 1: '''height''', 2: '''width''', 3: '''channels'''}, } , opset=UpperCamelCase__ , ) del pipeline.safety_checker __lowerCamelCase = OnnxRuntimeModel.from_pretrained(output_path / '''safety_checker''' ) __lowerCamelCase = pipeline.feature_extractor else: __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = OnnxStableDiffusionPipeline( vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_encoder''' ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_decoder''' ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''text_encoder''' ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / '''unet''' ) , scheduler=pipeline.scheduler , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , requires_safety_checker=safety_checker is not None , ) onnx_pipeline.save_pretrained(UpperCamelCase__ ) print('''ONNX pipeline saved to''' , UpperCamelCase__ ) del pipeline del onnx_pipeline __lowerCamelCase = OnnxStableDiffusionPipeline.from_pretrained(UpperCamelCase__ , provider='''CPUExecutionProvider''' ) print('''ONNX pipeline is loadable''' ) if __name__ == "__main__": __UpperCAmelCase =argparse.ArgumentParser() parser.add_argument( "--model_path", type=str, required=True, help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).", ) parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.") parser.add_argument( "--opset", default=1_4, type=int, help="The version of the ONNX operator set to use.", ) parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode") __UpperCAmelCase =parser.parse_args() convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
370
'''simple docstring''' import unittest from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin __UpperCAmelCase =get_tests_dir("fixtures/spiece.model") @require_sentencepiece @require_tokenizers class a__ ( UpperCAmelCase__ , unittest.TestCase ): lowerCamelCase : Optional[Any] =DebertaVaTokenizer lowerCamelCase : Optional[int] =DebertaVaTokenizerFast lowerCamelCase : Optional[Any] =True lowerCamelCase : Tuple =True def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing __lowerCamelCase = DebertaVaTokenizer(a , unk_token='''<unk>''' ) tokenizer.save_pretrained(self.tmpdirname ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , a : Dict ): """simple docstring""" __lowerCamelCase = '''this is a test''' __lowerCamelCase = '''this is a test''' return input_text, output_text def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" __lowerCamelCase = '''<pad>''' __lowerCamelCase = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" __lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<pad>''' ) self.assertEqual(vocab_keys[1] , '''<unk>''' ) self.assertEqual(vocab_keys[-1] , '''[PAD]''' ) self.assertEqual(len(a ) , 3_00_01 ) def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 3_00_00 ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" __lowerCamelCase = ''' \tHeLLo!how \n Are yoU? ''' __lowerCamelCase = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?'''] # fmt: on __lowerCamelCase = DebertaVaTokenizer(a , do_lower_case=a ) __lowerCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) ) self.assertListEqual(a , a ) __lowerCamelCase = DebertaVaTokenizerFast(a , do_lower_case=a ) __lowerCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) ) self.assertListEqual(a , a ) @unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' ) def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" pass @unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" pass def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" __lowerCamelCase = '''I was born in 92000, and this is falsé.''' __lowerCamelCase = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ] # fmt: on __lowerCamelCase = DebertaVaTokenizer(a , split_by_punct=a ) __lowerCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) ) self.assertListEqual(a , a ) __lowerCamelCase = DebertaVaTokenizerFast(a , split_by_punct=a ) __lowerCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) ) self.assertListEqual(a , a ) def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" __lowerCamelCase = '''I was born in 92000, and this is falsé.''' __lowerCamelCase = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ] # fmt: on __lowerCamelCase = DebertaVaTokenizer(a , do_lower_case=a , split_by_punct=a ) __lowerCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) ) self.assertListEqual(a , a ) __lowerCamelCase = DebertaVaTokenizerFast(a , do_lower_case=a , split_by_punct=a ) __lowerCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) ) self.assertListEqual(a , a ) def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" __lowerCamelCase = '''I was born in 92000, and this is falsé.''' __lowerCamelCase = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ] # fmt: on __lowerCamelCase = DebertaVaTokenizer(a , do_lower_case=a , split_by_punct=a ) __lowerCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) ) self.assertListEqual(a , a ) __lowerCamelCase = DebertaVaTokenizerFast(a , do_lower_case=a , split_by_punct=a ) __lowerCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) ) self.assertListEqual(a , a ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" __lowerCamelCase = '''I was born in 92000, and this is falsé.''' __lowerCamelCase = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ] # fmt: on __lowerCamelCase = DebertaVaTokenizer(a , do_lower_case=a , split_by_punct=a ) __lowerCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) ) self.assertListEqual(a , a ) __lowerCamelCase = DebertaVaTokenizerFast(a , do_lower_case=a , split_by_punct=a ) __lowerCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) ) self.assertListEqual(a , a ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" __lowerCamelCase = ''' \tHeLLo!how \n Are yoU? ''' __lowerCamelCase = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?'''] # fmt: on __lowerCamelCase = DebertaVaTokenizer(a , do_lower_case=a , split_by_punct=a ) __lowerCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) ) self.assertListEqual(a , a ) __lowerCamelCase = DebertaVaTokenizerFast(a , do_lower_case=a , split_by_punct=a ) __lowerCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) ) self.assertListEqual(a , a ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = self.get_rust_tokenizer() __lowerCamelCase = '''I was born in 92000, and this is falsé.''' __lowerCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(a , add_special_tokens=a ) ) __lowerCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a , add_special_tokens=a ) ) self.assertListEqual(a , a ) __lowerCamelCase = tokenizer.encode(a , add_special_tokens=a ) __lowerCamelCase = rust_tokenizer.encode(a , add_special_tokens=a ) self.assertListEqual(a , a ) __lowerCamelCase = self.get_rust_tokenizer() __lowerCamelCase = tokenizer.encode(a ) __lowerCamelCase = rust_tokenizer.encode(a ) self.assertListEqual(a , a ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" __lowerCamelCase = '''This is a test''' __lowerCamelCase = [13, 1, 43_98, 25, 21, 12_89] __lowerCamelCase = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test'''] __lowerCamelCase = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test'''] __lowerCamelCase = DebertaVaTokenizer(a , keep_accents=a ) __lowerCamelCase = DebertaVaTokenizerFast(a , keep_accents=a ) __lowerCamelCase = tokenizer.encode(a , add_special_tokens=a ) self.assertListEqual(a , a ) __lowerCamelCase = tokenizer.tokenize(a ) self.assertListEqual(a , a ) __lowerCamelCase = tokenizer.convert_ids_to_tokens(a ) self.assertListEqual(a , a ) __lowerCamelCase = rust_tokenizer.encode(a , add_special_tokens=a ) self.assertListEqual(a , a ) __lowerCamelCase = rust_tokenizer.tokenize(a ) self.assertListEqual(a , a ) __lowerCamelCase = rust_tokenizer.convert_ids_to_tokens(a ) self.assertListEqual(a , a ) # fmt: off __lowerCamelCase = '''I was born in 92000, and this is falsé.''' __lowerCamelCase = [13, 1, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9] __lowerCamelCase = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ] __lowerCamelCase = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ] # fmt: on __lowerCamelCase = tokenizer.encode(a , add_special_tokens=a ) self.assertListEqual(a , a ) __lowerCamelCase = tokenizer.tokenize(a ) self.assertListEqual(a , a ) __lowerCamelCase = tokenizer.convert_ids_to_tokens(a ) self.assertListEqual(a , a ) __lowerCamelCase = rust_tokenizer.encode(a , add_special_tokens=a ) self.assertListEqual(a , a ) __lowerCamelCase = rust_tokenizer.tokenize(a ) self.assertListEqual(a , a ) __lowerCamelCase = rust_tokenizer.convert_ids_to_tokens(a ) self.assertListEqual(a , a ) def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" __lowerCamelCase = DebertaVaTokenizer(a ) __lowerCamelCase = tokenizer.encode('''sequence builders''' ) __lowerCamelCase = tokenizer.encode('''multi-sequence build''' ) __lowerCamelCase = tokenizer.build_inputs_with_special_tokens(a ) __lowerCamelCase = tokenizer.build_inputs_with_special_tokens(a , a ) self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , a ) self.assertEqual( [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , a , ) @slow def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" __lowerCamelCase = {'''input_ids''': [[1, 3_98_67, 36, 1_93_90, 4_86, 27, 3_50_52, 8_14_36, 18, 6_06_85, 12_25, 7, 3_50_52, 8_14_36, 18, 93_67, 1_68_99, 18, 1_59_37, 53, 5_94, 7_73, 18, 1_62_87, 3_04_65, 36, 1_59_37, 6, 4_11_39, 38, 3_69_79, 6_07_63, 1_91, 6, 3_41_32, 99, 6, 5_05_38, 3_90, 4_32_30, 6, 3_41_32, 27_79, 2_08_50, 14, 6_99, 10_72, 11_94, 36, 3_82, 1_09_01, 53, 7, 6_99, 10_72, 20_84, 36, 2_04_22, 6_30, 53, 19, 1_05, 30_49, 18_96, 10_53, 1_68_99, 15_06, 11, 3_79_78, 42_43, 7, 12_37, 3_18_69, 2_00, 1_65_66, 6_54, 6, 3_50_52, 8_14_36, 7, 5_56_30, 1_35_93, 4, 2], [1, 26, 1_50_11, 13, 6_67, 8, 10_53, 18, 2_36_11, 12_37, 7_23_56, 1_28_20, 34, 10_41_34, 12_09, 35, 1_33_13, 66_27, 21, 2_02, 3_47, 7, 1_64, 23_99, 11, 46, 44_85, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 12_32, 28_64, 1_57_85, 1_49_51, 1_05, 5, 85_81, 12_50, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=a , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
237
0
'''simple docstring''' from __future__ import annotations def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : list[list[int]] = [] create_all_state(1 , lowerCamelCase__ , lowerCamelCase__ , [] , lowerCamelCase__ ) return result def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ): '''simple docstring''' if level == 0: total_list.append(current_list[:] ) return for i in range(lowerCamelCase__ , total_number - level + 2 ): current_list.append(lowerCamelCase__ ) create_all_state(i + 1 , lowerCamelCase__ , level - 1 , lowerCamelCase__ , lowerCamelCase__ ) current_list.pop() def a ( lowerCamelCase__ ): '''simple docstring''' for i in total_list: print(*lowerCamelCase__ ) if __name__ == "__main__": lowerCamelCase :Dict = 4 lowerCamelCase :int = 2 lowerCamelCase :List[Any] = generate_all_combinations(n, k) print_all_state(total_list)
206
'''simple docstring''' import copy import inspect import unittest from transformers import PretrainedConfig, SwiftFormerConfig from transformers.testing_utils import ( require_torch, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwiftFormerForImageClassification, SwiftFormerModel from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _lowerCAmelCase : def __init__(self , lowercase , lowercase=13 , lowercase=3 , lowercase=True , lowercase=True , lowercase=0.1 , lowercase=0.1 , lowercase=224 , lowercase=1000 , lowercase=[3, 3, 6, 4] , lowercase=[48, 56, 112, 220] , ): A_ : Dict = parent A_ : List[Any] = batch_size A_ : Dict = num_channels A_ : Optional[Any] = is_training A_ : List[str] = use_labels A_ : List[Any] = hidden_dropout_prob A_ : Optional[int] = attention_probs_dropout_prob A_ : Tuple = num_labels A_ : List[str] = image_size A_ : str = layer_depths A_ : Optional[int] = embed_dims def _a (self ): A_ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A_ : int = None if self.use_labels: A_ : Tuple = ids_tensor([self.batch_size] , self.num_labels ) A_ : int = self.get_config() return config, pixel_values, labels def _a (self ): return SwiftFormerConfig( depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowercase , layer_scale_init_value=1E-5 , ) def _a (self , lowercase , lowercase , lowercase ): A_ : List[Any] = SwiftFormerModel(config=lowercase ) model.to(lowercase ) model.eval() A_ : Union[str, Any] = model(lowercase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) ) def _a (self , lowercase , lowercase , lowercase ): A_ : Any = self.num_labels A_ : Any = SwiftFormerForImageClassification(lowercase ) model.to(lowercase ) model.eval() A_ : Optional[int] = model(lowercase , labels=lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) A_ : int = SwiftFormerForImageClassification(lowercase ) model.to(lowercase ) model.eval() A_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A_ : Dict = model(lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _a (self ): ((A_), (A_), (A_)) : int = self.prepare_config_and_inputs() A_ : Any = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Optional[Any] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else () __SCREAMING_SNAKE_CASE : Optional[Any] = ( {'feature-extraction': SwiftFormerModel, 'image-classification': SwiftFormerForImageClassification} if is_torch_available() else {} ) __SCREAMING_SNAKE_CASE : int = False __SCREAMING_SNAKE_CASE : List[Any] = False __SCREAMING_SNAKE_CASE : List[str] = False __SCREAMING_SNAKE_CASE : Union[str, Any] = False __SCREAMING_SNAKE_CASE : Union[str, Any] = False def _a (self ): A_ : Optional[int] = SwiftFormerModelTester(self ) A_ : Any = ConfigTester( self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , ) def _a (self ): self.config_tester.run_common_tests() @unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" ) def _a (self ): pass def _a (self ): A_, A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : Optional[Any] = model_class(lowercase ) A_ : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowercase , nn.Linear ) ) def _a (self ): A_, A_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : str = model_class(lowercase ) A_ : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A_ : int = [*signature.parameters.keys()] A_ : Tuple = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowercase ) def _a (self ): A_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase ) def _a (self ): A_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase ) @slow def _a (self ): for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : Optional[Any] = SwiftFormerModel.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) @unittest.skip(reason="""SwiftFormer does not output attentions""" ) def _a (self ): pass def _a (self ): def check_hidden_states_output(lowercase , lowercase , lowercase ): A_ : str = model_class(lowercase ) model.to(lowercase ) model.eval() with torch.no_grad(): A_ : Optional[int] = model(**self._prepare_for_class(lowercase , lowercase ) ) A_ : Any = outputs.hidden_states A_ : Any = 8 self.assertEqual(len(lowercase ) , lowercase ) # TODO # SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width) # with the width and height being successively divided by 2, after every 2 blocks for i in range(len(lowercase ) ): self.assertEqual( hidden_states[i].shape , torch.Size( [ self.model_tester.batch_size, self.model_tester.embed_dims[i // 2], (self.model_tester.image_size // 4) // 2 ** (i // 2), (self.model_tester.image_size // 4) // 2 ** (i // 2), ] ) , ) A_, A_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : str = True check_hidden_states_output(lowercase , lowercase , lowercase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A_ : str = True check_hidden_states_output(lowercase , lowercase , lowercase ) def _a (self ): def _config_zero_init(lowercase ): A_ : Optional[Any] = copy.deepcopy(lowercase ) for key in configs_no_init.__dict__.keys(): if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key: setattr(lowercase , lowercase , 1E-10 ) if isinstance(getattr(lowercase , lowercase , lowercase ) , lowercase ): A_ : Any = _config_zero_init(getattr(lowercase , lowercase ) ) setattr(lowercase , lowercase , lowercase ) return configs_no_init A_, A_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() A_ : Any = _config_zero_init(lowercase ) for model_class in self.all_model_classes: A_ : List[str] = model_class(config=lowercase ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , ) @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def _a (self ): pass def a ( ): '''simple docstring''' A_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class _lowerCAmelCase ( unittest.TestCase ): @cached_property def _a (self ): return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None @slow def _a (self ): A_ : Any = SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(lowercase ) A_ : Dict = self.default_image_processor A_ : Dict = prepare_img() A_ : int = image_processor(images=lowercase , return_tensors="""pt""" ).to(lowercase ) # forward pass with torch.no_grad(): A_ : int = model(**lowercase ) # verify the logits A_ : int = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , lowercase ) A_ : List[str] = torch.tensor([[-2.1_703E00, 2.1_107E00, -2.0_811E00]] ).to(lowercase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1E-4 ) )
206
1
"""simple docstring""" import argparse import logging import os import sys import numpy as np import onnxruntime import torch from bart_onnx.generation_onnx import BARTBeamSearchGenerator from bart_onnx.reduce_onnx_size import remove_dup_initializers import transformers from transformers import BartForConditionalGeneration, BartTokenizer logging.basicConfig( format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s', datefmt='%Y-%m-%d %H:%M:%S', level=os.environ.get('LOGLEVEL', 'INFO').upper(), stream=sys.stdout, ) __SCREAMING_SNAKE_CASE : str = logging.getLogger(__name__) __SCREAMING_SNAKE_CASE : Optional[Any] = {'facebook/bart-base': BartForConditionalGeneration} __SCREAMING_SNAKE_CASE : int = {'facebook/bart-base': BartTokenizer} def _a ( ) -> Optional[int]: snake_case_ = argparse.ArgumentParser(description="""Export Bart model + Beam Search to ONNX graph.""" ) parser.add_argument( """--validation_file""" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help="""A csv or a json file containing the validation data.""" ) parser.add_argument( """--max_length""" , type=_SCREAMING_SNAKE_CASE , default=5 , help="""The maximum total input sequence length after tokenization.""" , ) parser.add_argument( """--num_beams""" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help=( """Number of beams to use for evaluation. This argument will be """ """passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.""" ) , ) parser.add_argument( """--model_name_or_path""" , type=_SCREAMING_SNAKE_CASE , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=_SCREAMING_SNAKE_CASE , ) parser.add_argument( """--config_name""" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help="""Pretrained config name or path if not the same as model_name""" , ) parser.add_argument( """--device""" , type=_SCREAMING_SNAKE_CASE , default="""cpu""" , help="""Device where the model will be run""" , ) parser.add_argument("""--output_file_path""" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help="""Where to store the final ONNX file.""" ) snake_case_ = parser.parse_args() return args def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="cpu" ) -> Union[str, Any]: snake_case_ = model_dict[model_name].from_pretrained(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE ) snake_case_ = tokenizer_dict[model_name].from_pretrained(_SCREAMING_SNAKE_CASE ) if model_name in ["facebook/bart-base"]: snake_case_ = 0 snake_case_ = None snake_case_ = 0 return huggingface_model, tokenizer def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int: model.eval() snake_case_ = None snake_case_ = torch.jit.script(BARTBeamSearchGenerator(_SCREAMING_SNAKE_CASE ) ) with torch.no_grad(): snake_case_ = """My friends are cool but they eat too many carbs.""" snake_case_ = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_024 , return_tensors="""pt""" ).to(model.device ) snake_case_ = model.generate( inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , num_beams=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , early_stopping=_SCREAMING_SNAKE_CASE , decoder_start_token_id=model.config.decoder_start_token_id , ) torch.onnx.export( _SCREAMING_SNAKE_CASE , ( inputs["""input_ids"""], inputs["""attention_mask"""], num_beams, max_length, model.config.decoder_start_token_id, ) , _SCREAMING_SNAKE_CASE , opset_version=14 , input_names=["""input_ids""", """attention_mask""", """num_beams""", """max_length""", """decoder_start_token_id"""] , output_names=["""output_ids"""] , dynamic_axes={ """input_ids""": {0: """batch""", 1: """seq"""}, """output_ids""": {0: """batch""", 1: """seq_out"""}, } , example_outputs=_SCREAMING_SNAKE_CASE , ) logger.info("""Model exported to {}""".format(_SCREAMING_SNAKE_CASE ) ) snake_case_ = remove_dup_initializers(os.path.abspath(_SCREAMING_SNAKE_CASE ) ) logger.info("""Deduplicated and optimized model written to {}""".format(_SCREAMING_SNAKE_CASE ) ) snake_case_ = onnxruntime.InferenceSession(_SCREAMING_SNAKE_CASE ) snake_case_ = ort_sess.run( _SCREAMING_SNAKE_CASE , { """input_ids""": inputs["""input_ids"""].cpu().numpy(), """attention_mask""": inputs["""attention_mask"""].cpu().numpy(), """num_beams""": np.array(_SCREAMING_SNAKE_CASE ), """max_length""": np.array(_SCREAMING_SNAKE_CASE ), """decoder_start_token_id""": np.array(model.config.decoder_start_token_id ), } , ) np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 ) logger.info("""Model outputs from torch and ONNX Runtime are similar.""" ) logger.info("""Success.""" ) def _a ( ) -> Optional[int]: snake_case_ = parse_args() snake_case_ = 5 snake_case_ = 4 # Make one log on every process with the configuration for debugging. logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , ) logger.setLevel(logging.INFO ) transformers.utils.logging.set_verbosity_error() snake_case_ = torch.device(args.device ) snake_case_ , snake_case_ = load_model_tokenizer(args.model_name_or_path , _SCREAMING_SNAKE_CASE ) if model.config.decoder_start_token_id is None: raise ValueError("""Make sure that `config.decoder_start_token_id` is correctly defined""" ) model.to(_SCREAMING_SNAKE_CASE ) if args.max_length: snake_case_ = args.max_length if args.num_beams: snake_case_ = args.num_beams if args.output_file_path: snake_case_ = args.output_file_path else: snake_case_ = """BART.onnx""" logger.info("""Exporting model to ONNX""" ) export_and_validate_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
358
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> int: snake_case_ = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ("""cls_token""", """vit.embeddings.cls_token"""), ("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""), ("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""), ("""pos_embed""", """vit.embeddings.position_embeddings"""), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""norm.weight""", """layernorm.weight"""), ("""norm.bias""", """layernorm.bias"""), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" snake_case_ = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("""norm.weight""", """vit.layernorm.weight"""), ("""norm.bias""", """vit.layernorm.bias"""), ("""head.weight""", """classifier.weight"""), ("""head.bias""", """classifier.bias"""), ] ) return rename_keys def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> List[Any]: for i in range(config.num_hidden_layers ): if base_model: snake_case_ = """""" else: snake_case_ = """vit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) snake_case_ = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" ) snake_case_ = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict snake_case_ = in_proj_weight[ : config.hidden_size, : ] snake_case_ = in_proj_bias[: config.hidden_size] snake_case_ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] snake_case_ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] snake_case_ = in_proj_weight[ -config.hidden_size :, : ] snake_case_ = in_proj_bias[-config.hidden_size :] def _a ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: snake_case_ = ["""head.weight""", """head.bias"""] for k in ignore_keys: state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int: snake_case_ = dct.pop(_SCREAMING_SNAKE_CASE ) snake_case_ = val def _a ( ) -> Dict: snake_case_ = """http://images.cocodataset.org/val2017/000000039769.jpg""" snake_case_ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True ) -> Any: snake_case_ = ViTConfig() # patch_size if model_name[-1] == "8": snake_case_ = 8 # set labels if required if not base_model: snake_case_ = 1_000 snake_case_ = """huggingface/label-files""" snake_case_ = """imagenet-1k-id2label.json""" snake_case_ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) ) snake_case_ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} snake_case_ = idalabel snake_case_ = {v: k for k, v in idalabel.items()} # size of the architecture if model_name in ["dino_vits8", "dino_vits16"]: snake_case_ = 384 snake_case_ = 1_536 snake_case_ = 12 snake_case_ = 6 # load original model from torch hub snake_case_ = torch.hub.load("""facebookresearch/dino:main""" , _SCREAMING_SNAKE_CASE ) original_model.eval() # load state_dict of original model, remove and rename some keys snake_case_ = original_model.state_dict() if base_model: remove_classification_head_(_SCREAMING_SNAKE_CASE ) snake_case_ = create_rename_keys(_SCREAMING_SNAKE_CASE , base_model=_SCREAMING_SNAKE_CASE ) for src, dest in rename_keys: rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) read_in_q_k_v(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # load HuggingFace model if base_model: snake_case_ = ViTModel(_SCREAMING_SNAKE_CASE , add_pooling_layer=_SCREAMING_SNAKE_CASE ).eval() else: snake_case_ = ViTForImageClassification(_SCREAMING_SNAKE_CASE ).eval() model.load_state_dict(_SCREAMING_SNAKE_CASE ) # Check outputs on an image, prepared by ViTImageProcessor snake_case_ = ViTImageProcessor() snake_case_ = image_processor(images=prepare_img() , return_tensors="""pt""" ) snake_case_ = encoding["""pixel_values"""] snake_case_ = model(_SCREAMING_SNAKE_CASE ) if base_model: snake_case_ = original_model(_SCREAMING_SNAKE_CASE ) assert torch.allclose(_SCREAMING_SNAKE_CASE , outputs.last_hidden_state[:, 0, :] , atol=1E-1 ) else: snake_case_ = original_model(_SCREAMING_SNAKE_CASE ) assert logits.shape == outputs.logits.shape assert torch.allclose(_SCREAMING_SNAKE_CASE , outputs.logits , atol=1E-3 ) Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='dino_vitb16', type=str, help='Name of the model trained with DINO you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--base_model', action='store_true', help='Whether to only convert the base model (no projection head weights).', ) parser.set_defaults(base_model=True) __SCREAMING_SNAKE_CASE : int = parser.parse_args() convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
233
0
import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available from transformers.models.gpta.tokenization_gpta import GPTaTokenizer from transformers.testing_utils import require_keras_nlp, require_tf, slow if is_tf_available(): import tensorflow as tf if is_keras_nlp_available(): from transformers.models.gpta import TFGPTaTokenizer __snake_case : Union[str, Any] = ["""gpt2"""] __snake_case : Tuple = """gpt2""" if is_tf_available(): class A__(tf.Module ): """simple docstring""" def __init__( self , _lowercase ) -> Union[str, Any]: super().__init__() a_ : Tuple = tokenizer a_ : str = AutoConfig.from_pretrained(_lowercase ) a_ : Dict = TFGPTaLMHeadModel.from_config(_lowercase ) @tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="""text""" ),) ) def UpperCamelCase__ ( self , _lowercase ) -> Tuple: a_ : List[str] = self.tokenizer(_lowercase ) a_ : List[Any] = tokenized["""input_ids"""].to_tensor() a_ : Dict = tf.cast(input_ids_dense > 0 , tf.intaa ) # input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN]) a_ : Dict = self.model(input_ids=_lowercase , attention_mask=_lowercase )["""logits"""] return outputs @require_tf @require_keras_nlp class A__(unittest.TestCase ): """simple docstring""" def UpperCamelCase__ ( self ) -> List[str]: super().setUp() a_ : Tuple = [GPTaTokenizer.from_pretrained(_lowercase ) for checkpoint in (TOKENIZER_CHECKPOINTS)] a_ : int = [TFGPTaTokenizer.from_pretrained(_lowercase ) for checkpoint in TOKENIZER_CHECKPOINTS] assert len(self.tokenizers ) == len(self.tf_tokenizers ) a_ : Union[str, Any] = [ """This is a straightforward English test sentence.""", """This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""", """Now we're going to add some Chinese: 一 二 三 一二三""", """And some much more rare Chinese: 齉 堃 齉堃""", """Je vais aussi écrire en français pour tester les accents""", """Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""", ] a_ : Any = list(zip(self.test_sentences , self.test_sentences[::-1] ) ) def UpperCamelCase__ ( self ) -> Union[str, Any]: for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ): for test_inputs in self.test_sentences: a_ : Dict = tokenizer([test_inputs] , return_tensors="""tf""" ) a_ : Optional[int] = tf_tokenizer([test_inputs] ) for key in python_outputs.keys(): # convert them to numpy to avoid messing with ragged tensors a_ : Dict = python_outputs[key].numpy() a_ : Tuple = tf_outputs[key].numpy() self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) ) self.assertTrue(tf.reduce_all(tf.cast(_lowercase , tf.intaa ) == tf_outputs_values ) ) @slow def UpperCamelCase__ ( self ) -> str: for tf_tokenizer in self.tf_tokenizers: a_ : List[Any] = tf.function(_lowercase ) for test_inputs in self.test_sentences: a_ : Tuple = tf.constant(_lowercase ) a_ : int = compiled_tokenizer(_lowercase ) a_ : List[str] = tf_tokenizer(_lowercase ) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) ) @slow def UpperCamelCase__ ( self ) -> int: for tf_tokenizer in self.tf_tokenizers: a_ : Dict = ModelToSave(tokenizer=_lowercase ) a_ : int = tf.convert_to_tensor([self.test_sentences[0]] ) a_ : Dict = model.serving(_lowercase ) # Build model with some sample inputs with TemporaryDirectory() as tempdir: a_ : Optional[Any] = Path(_lowercase ) / """saved.model""" tf.saved_model.save(_lowercase , _lowercase , signatures={"""serving_default""": model.serving} ) a_ : Union[str, Any] = tf.saved_model.load(_lowercase ) a_ : Union[str, Any] = loaded_model.signatures["""serving_default"""](_lowercase )["""output_0"""] # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertTrue(tf.reduce_all(out == loaded_output ) ) @slow def UpperCamelCase__ ( self ) -> Dict: for tf_tokenizer in self.tf_tokenizers: a_ : Optional[Any] = tf.convert_to_tensor([self.test_sentences[0]] ) a_ : Dict = tf_tokenizer(_lowercase ) # Build model with some sample inputs a_ : Any = tf_tokenizer.get_config() a_ : List[Any] = TFGPTaTokenizer.from_config(_lowercase ) a_ : Any = model_from_config(_lowercase ) for key in from_config_output.keys(): self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) ) @slow def UpperCamelCase__ ( self ) -> str: for tf_tokenizer in self.tf_tokenizers: # for the test to run a_ : List[Any] = 123_123 for max_length in [3, 5, 1_024]: a_ : Dict = tf.convert_to_tensor([self.test_sentences[0]] ) a_ : List[str] = tf_tokenizer(_lowercase , max_length=_lowercase ) a_ : Optional[Any] = out["""input_ids"""].numpy().shape[1] assert out_length == max_length
248
import json import os import unittest from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors @require_tokenizers class A__(a_, unittest.TestCase ): """simple docstring""" _A : Optional[Any] = MvpTokenizer _A : List[Any] = MvpTokenizerFast _A : Dict = True _A : Optional[Any] = filter_roberta_detectors def UpperCamelCase__ ( self ) -> Union[str, Any]: super().setUp() a_ : Dict = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] a_ : int = dict(zip(_lowercase , range(len(_lowercase ) ) ) ) a_ : List[Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] a_ : Optional[int] = {"""unk_token""": """<unk>"""} a_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) a_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(_lowercase ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(_lowercase ) ) def UpperCamelCase__ ( self , **_lowercase ) -> Optional[Any]: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowercase ) def UpperCamelCase__ ( self , **_lowercase ) -> Union[str, Any]: kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_lowercase ) def UpperCamelCase__ ( self , _lowercase ) -> int: return "lower newer", "lower newer" @cached_property def UpperCamelCase__ ( self ) -> List[Any]: return MvpTokenizer.from_pretrained("""RUCAIBox/mvp""" ) @cached_property def UpperCamelCase__ ( self ) -> Union[str, Any]: return MvpTokenizerFast.from_pretrained("""RUCAIBox/mvp""" ) @require_torch def UpperCamelCase__ ( self ) -> List[str]: a_ : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] a_ : List[str] = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: a_ : Optional[Any] = tokenizer(_lowercase , max_length=len(_lowercase ) , padding=_lowercase , return_tensors="""pt""" ) self.assertIsInstance(_lowercase , _lowercase ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) a_ : Dict = batch.input_ids.tolist()[0] self.assertListEqual(_lowercase , _lowercase ) # Test that special tokens are reset @require_torch def UpperCamelCase__ ( self ) -> Union[str, Any]: a_ : List[str] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: a_ : List[str] = tokenizer(_lowercase , padding=_lowercase , return_tensors="""pt""" ) # check if input_ids are returned and no labels self.assertIn("""input_ids""" , _lowercase ) self.assertIn("""attention_mask""" , _lowercase ) self.assertNotIn("""labels""" , _lowercase ) self.assertNotIn("""decoder_attention_mask""" , _lowercase ) @require_torch def UpperCamelCase__ ( self ) -> Union[str, Any]: a_ : List[Any] = [ """Summary of the text.""", """Another summary.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: a_ : int = tokenizer(text_target=_lowercase , max_length=32 , padding="""max_length""" , return_tensors="""pt""" ) self.assertEqual(32 , targets["""input_ids"""].shape[1] ) @require_torch def UpperCamelCase__ ( self ) -> Any: for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: a_ : int = tokenizer( ["""I am a small frog""" * 1_024, """I am a small frog"""] , padding=_lowercase , truncation=_lowercase , return_tensors="""pt""" ) self.assertIsInstance(_lowercase , _lowercase ) self.assertEqual(batch.input_ids.shape , (2, 1_024) ) @require_torch def UpperCamelCase__ ( self ) -> List[str]: a_ : Tuple = ["""A long paragraph for summarization."""] a_ : Optional[Any] = [ """Summary of the text.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: a_ : List[Any] = tokenizer(_lowercase , text_target=_lowercase , return_tensors="""pt""" ) a_ : Union[str, Any] = inputs["""input_ids"""] a_ : Dict = inputs["""labels"""] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) def UpperCamelCase__ ( self ) -> int: pass def UpperCamelCase__ ( self ) -> Union[str, Any]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): a_ : List[str] = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase ) a_ : List[str] = self.tokenizer_class.from_pretrained(_lowercase , **_lowercase ) a_ : Optional[int] = """A, <mask> AllenNLP sentence.""" a_ : Union[str, Any] = tokenizer_r.encode_plus(_lowercase , add_special_tokens=_lowercase , return_token_type_ids=_lowercase ) a_ : Tuple = tokenizer_p.encode_plus(_lowercase , add_special_tokens=_lowercase , return_token_type_ids=_lowercase ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , ) a_ : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] ) a_ : str = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual( _lowercase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) self.assertSequenceEqual( _lowercase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
248
1
from ...configuration_utils import PretrainedConfig from ...utils import logging a__ : Optional[int] = logging.get_logger(__name__) a__ : Optional[int] = { '''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''', '''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''', '''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''', '''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''', '''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''', '''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''', } class a_ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = 'rwkv' __SCREAMING_SNAKE_CASE : List[str] = {'max_position_embeddings': 'context_length'} def __init__( self , _lowerCamelCase=5_0277 , _lowerCamelCase=1024 , _lowerCamelCase=4096 , _lowerCamelCase=32 , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=1e-5 , _lowerCamelCase=0 , _lowerCamelCase=0 , _lowerCamelCase=6 , _lowerCamelCase=False , _lowerCamelCase=True , **_lowerCamelCase , ) ->Dict: SCREAMING_SNAKE_CASE : Optional[int] = vocab_size SCREAMING_SNAKE_CASE : List[Any] = context_length SCREAMING_SNAKE_CASE : List[str] = hidden_size SCREAMING_SNAKE_CASE : Dict = num_hidden_layers SCREAMING_SNAKE_CASE : Tuple = attention_hidden_size if attention_hidden_size is not None else hidden_size SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size if intermediate_size is not None else 4 * hidden_size SCREAMING_SNAKE_CASE : int = layer_norm_epsilon SCREAMING_SNAKE_CASE : Dict = rescale_every SCREAMING_SNAKE_CASE : Optional[int] = use_cache SCREAMING_SNAKE_CASE : str = bos_token_id SCREAMING_SNAKE_CASE : Any = eos_token_id super().__init__( tie_word_embeddings=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
19
import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a__ : str = logging.get_logger(__name__) a__ : Optional[Any] = {'''vocab_file''': '''vocab.json'''} a__ : str = { '''vocab_file''': { '''mgp-str''': '''https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json''', } } a__ : Tuple = {'''mgp-str''': 27} class a_ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE : Dict = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , _lowerCamelCase , _lowerCamelCase="[GO]" , _lowerCamelCase="[GO]" , _lowerCamelCase="[s]" , _lowerCamelCase="[GO]" , **_lowerCamelCase ) ->Dict: super().__init__( unk_token=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , pad_token=_lowerCamelCase , **_lowerCamelCase , ) with open(_lowerCamelCase , encoding='''utf-8''' ) as vocab_handle: SCREAMING_SNAKE_CASE : List[Any] = json.load(_lowerCamelCase ) SCREAMING_SNAKE_CASE : int = {v: k for k, v in self.vocab.items()} @property def __lowerCAmelCase ( self ) ->List[Any]: return len(self.vocab ) def __lowerCAmelCase ( self ) ->Union[str, Any]: return dict(self.vocab , **self.added_tokens_encoder ) def __lowerCAmelCase ( self , _lowerCamelCase ) ->int: SCREAMING_SNAKE_CASE : Union[str, Any] = [] for s in text: char_tokens.extend(_lowerCamelCase ) return char_tokens def __lowerCAmelCase ( self , _lowerCamelCase ) ->Dict: return self.vocab.get(_lowerCamelCase , self.vocab.get(self.unk_token ) ) def __lowerCAmelCase ( self , _lowerCamelCase ) ->int: return self.decoder.get(_lowerCamelCase ) def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->Tuple[str]: if not os.path.isdir(_lowerCamelCase ): logger.error('''Vocabulary path ({}) should be a directory'''.format(_lowerCamelCase ) ) return SCREAMING_SNAKE_CASE : str = os.path.join( _lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.vocab , indent=2 , sort_keys=_lowerCamelCase , ensure_ascii=_lowerCamelCase ) + '''\n''' ) return (vocab_file,)
19
1
"""simple docstring""" # Usage: # ./gen-card-facebook-wmt19.py import os from pathlib import Path def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[int] , _lowercase : Tuple , _lowercase : Any ) ->Optional[Any]: '''simple docstring''' a : Any = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, oder?", } # BLUE scores as follows: # "pair": [fairseq, transformers] a : Union[str, Any] = { "ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"], "en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"], "en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"], "de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"], } a : List[Any] = F"""{src_lang}-{tgt_lang}""" a : Optional[int] = F""" --- language: - {src_lang} - {tgt_lang} thumbnail: tags: - translation - wmt19 - facebook license: apache-2.0 datasets: - wmt19 metrics: - bleu --- # FSMT ## Model description This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}. For more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616). The abbreviation FSMT stands for FairSeqMachineTranslation All four models are available: * [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru) * [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en) * [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de) * [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en) ## Intended uses & limitations #### How to use ```python from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = \"facebook/wmt19-{src_lang}-{tgt_lang}\" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) input = \"{texts[src_lang]}\" input_ids = tokenizer.encode(input, return_tensors=\"pt\") outputs = model.generate(input_ids) decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) print(decoded) # {texts[tgt_lang]} ``` #### Limitations and bias - The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981) ## Training data Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616). ## Eval results pair | fairseq | transformers -------|---------|---------- {pair} | {scores[pair][0]} | {scores[pair][1]} The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support: - model ensemble, therefore the best performing checkpoint was ported (``model4.pt``). - re-ranking The score was calculated using this code: ```bash git clone https://github.com/huggingface/transformers cd transformers export PAIR={pair} export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=15 mkdir -p $DATA_DIR sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS ``` note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`. ## Data Sources - [training, etc.](http://www.statmt.org/wmt19/) - [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561) ### BibTeX entry and citation info ```bibtex @inproceedings{{..., year={{2020}}, title={{Facebook FAIR's WMT19 News Translation Task Submission}}, author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}}, booktitle={{Proc. of WMT}}, }} ``` ## TODO - port model ensemble (fairseq uses 4 model checkpoints) """ os.makedirs(_lowercase , exist_ok=_lowercase ) a : Tuple = os.path.join(_lowercase , "README.md" ) print(F"""Generating {path}""" ) with open(_lowercase , "w" , encoding="utf-8" ) as f: f.write(_lowercase ) # make sure we are under the root of the project a : int = Path(__file__).resolve().parent.parent.parent a : Union[str, Any] = repo_dir / '''model_cards''' for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: a , a , a : List[str] = model_name.split('''-''') a : List[str] = model_cards_dir / '''facebook''' / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
105
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD torch.set_grad_enabled(False) def __UpperCamelCase ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str=False ): __a : Any = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"module.blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") ) rename_keys.append((f"module.blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") ) rename_keys.append( (f"module.blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") ) rename_keys.append((f"module.blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") ) rename_keys.append((f"module.blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") ) rename_keys.append((f"module.blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") ) rename_keys.append((f"module.blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") ) rename_keys.append((f"module.blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") ) rename_keys.append((f"module.blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") ) rename_keys.append((f"module.blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") ) # projection layer + position embeddings rename_keys.extend( [ ('''module.cls_token''', '''vit.embeddings.cls_token'''), ('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''), ('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''), ('''module.pos_embed''', '''vit.embeddings.position_embeddings'''), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('''module.norm.weight''', '''layernorm.weight'''), ('''module.norm.bias''', '''layernorm.bias'''), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" __a : Optional[int] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('''norm.weight''', '''vit.layernorm.weight'''), ('''norm.bias''', '''vit.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ] ) return rename_keys def __UpperCamelCase ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[Any]=False ): for i in range(config.num_hidden_layers ): if base_model: __a : Dict = '''''' else: __a : str = '''vit.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) __a : List[Any] = state_dict.pop(f"module.blocks.{i}.attn.qkv.weight" ) __a : Tuple = state_dict.pop(f"module.blocks.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict __a : Dict = in_proj_weight[ : config.hidden_size, : ] __a : str = in_proj_bias[: config.hidden_size] __a : int = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] __a : Dict = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] __a : str = in_proj_weight[ -config.hidden_size :, : ] __a : Optional[Any] = in_proj_bias[-config.hidden_size :] def __UpperCamelCase ( lowerCAmelCase__ : Optional[int] ): __a : Dict = ['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ ) def __UpperCamelCase ( lowerCAmelCase__ : Optional[Any] ): # projection head is used in the self-supervised pre-training in MSN, # for downstream task it's not needed. __a : str = [ '''module.fc.fc1.weight''', '''module.fc.fc1.bias''', '''module.fc.bn1.weight''', '''module.fc.bn1.bias''', '''module.fc.bn1.running_mean''', '''module.fc.bn1.running_var''', '''module.fc.bn1.num_batches_tracked''', '''module.fc.fc2.weight''', '''module.fc.fc2.bias''', '''module.fc.bn2.weight''', '''module.fc.bn2.bias''', '''module.fc.bn2.running_mean''', '''module.fc.bn2.running_var''', '''module.fc.bn2.num_batches_tracked''', '''module.fc.fc3.weight''', '''module.fc.fc3.bias''', ] for k in ignore_keys: state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ ) def __UpperCamelCase ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[Any] ): __a : Optional[Any] = dct.pop(lowerCAmelCase__ ) __a : Optional[Any] = val def __UpperCamelCase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : str ): __a : str = ViTMSNConfig() __a : List[Any] = 1_0_0_0 __a : Union[str, Any] = '''datasets/huggingface/label-files''' __a : Optional[int] = '''imagenet-1k-id2label.json''' __a : Optional[int] = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ ) , '''r''' ) ) __a : Dict = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()} __a : Tuple = idalabel __a : int = {v: k for k, v in idalabel.items()} if "s16" in checkpoint_url: __a : Union[str, Any] = 3_8_4 __a : Union[str, Any] = 1_5_3_6 __a : Union[str, Any] = 6 elif "l16" in checkpoint_url: __a : str = 1_0_2_4 __a : Union[str, Any] = 4_0_9_6 __a : Optional[int] = 2_4 __a : int = 1_6 __a : List[Any] = 0.1 elif "b4" in checkpoint_url: __a : int = 4 elif "l7" in checkpoint_url: __a : int = 7 __a : List[Any] = 1_0_2_4 __a : Union[str, Any] = 4_0_9_6 __a : List[str] = 2_4 __a : int = 1_6 __a : Tuple = 0.1 __a : Dict = ViTMSNModel(lowerCAmelCase__ ) __a : Union[str, Any] = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , map_location='''cpu''' )['''target_encoder'''] __a : Optional[int] = ViTImageProcessor(size=config.image_size ) remove_projection_head(lowerCAmelCase__ ) __a : str = create_rename_keys(lowerCAmelCase__ , base_model=lowerCAmelCase__ ) for src, dest in rename_keys: rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) read_in_q_k_v(lowerCAmelCase__ , lowerCAmelCase__ , base_model=lowerCAmelCase__ ) model.load_state_dict(lowerCAmelCase__ ) model.eval() __a : str = '''http://images.cocodataset.org/val2017/000000039769.jpg''' __a : str = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw ) __a : Any = ViTImageProcessor( size=config.image_size , image_mean=lowerCAmelCase__ , image_std=lowerCAmelCase__ ) __a : Tuple = image_processor(images=lowerCAmelCase__ , return_tensors='''pt''' ) # forward pass torch.manual_seed(2 ) __a : str = model(**lowerCAmelCase__ ) __a : List[Any] = outputs.last_hidden_state # The following Colab Notebook was used to generate these outputs: # https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb if "s16" in checkpoint_url: __a : Dict = torch.tensor([[-1.09_15, -1.48_76, -1.18_09]] ) elif "b16" in checkpoint_url: __a : List[str] = torch.tensor([[14.28_89, -18.90_45, 11.72_81]] ) elif "l16" in checkpoint_url: __a : str = torch.tensor([[41.50_28, -22.86_81, 45.64_75]] ) elif "b4" in checkpoint_url: __a : Union[str, Any] = torch.tensor([[-4.38_68, 5.29_32, -0.41_37]] ) else: __a : Dict = torch.tensor([[-0.17_92, -0.64_65, 2.42_63]] ) # verify logits assert torch.allclose(last_hidden_state[:, 0, :3] , lowerCAmelCase__ , atol=1e-4 ) print(f"Saving model to {pytorch_dump_folder_path}" ) model.save_pretrained(lowerCAmelCase__ ) print(f"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(lowerCAmelCase__ ) if __name__ == "__main__": lowercase__ =argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar', type=str, help='URL of the checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) lowercase__ =parser.parse_args() convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
216
0
import math from typing import Any, Callable, List, Optional, Tuple, Union import numpy as np import torch from ...models import TaFilmDecoder from ...schedulers import DDPMScheduler from ...utils import is_onnx_available, logging, randn_tensor if is_onnx_available(): from ..onnx_utils import OnnxRuntimeModel from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline from .continous_encoder import SpectrogramContEncoder from .notes_encoder import SpectrogramNotesEncoder lowerCAmelCase__ : List[str] =logging.get_logger(__name__) # pylint: disable=invalid-name lowerCAmelCase__ : Optional[Any] =2_56 class __lowercase (__SCREAMING_SNAKE_CASE ): """simple docstring""" _UpperCAmelCase = ["""melgan"""] def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ): """simple docstring""" super().__init__() # From MELGAN SCREAMING_SNAKE_CASE_ : Tuple = math.log(1E-5 ) # Matches MelGAN training. SCREAMING_SNAKE_CASE_ : str = 4.0 # Largest value for most examples SCREAMING_SNAKE_CASE_ : List[Any] = 1_2_8 self.register_modules( notes_encoder=lowerCAmelCase__ , continuous_encoder=lowerCAmelCase__ , decoder=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , melgan=lowerCAmelCase__ , ) def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__=(-1.0, 1.0) , lowerCAmelCase__=False ): """simple docstring""" SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = output_range if clip: SCREAMING_SNAKE_CASE_ : str = torch.clip(lowerCAmelCase__ , self.min_value , self.max_value ) # Scale to [0, 1]. SCREAMING_SNAKE_CASE_ : Dict = (features - self.min_value) / (self.max_value - self.min_value) # Scale to [min_out, max_out]. return zero_one * (max_out - min_out) + min_out def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__=(-1.0, 1.0) , lowerCAmelCase__=False ): """simple docstring""" SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = input_range SCREAMING_SNAKE_CASE_ : List[str] = torch.clip(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) if clip else outputs # Scale to [0, 1]. SCREAMING_SNAKE_CASE_ : List[Any] = (outputs - min_out) / (max_out - min_out) # Scale to [self.min_value, self.max_value]. return zero_one * (self.max_value - self.min_value) + self.min_value def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = input_tokens > 0 SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self.notes_encoder( encoder_input_tokens=lowerCAmelCase__ , encoder_inputs_mask=lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = self.continuous_encoder( encoder_inputs=lowerCAmelCase__ , encoder_inputs_mask=lowerCAmelCase__ ) return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)] def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = noise_time if not torch.is_tensor(lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ : Any = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device ) elif torch.is_tensor(lowerCAmelCase__ ) and len(timesteps.shape ) == 0: SCREAMING_SNAKE_CASE_ : Union[str, Any] = timesteps[None].to(input_tokens.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML SCREAMING_SNAKE_CASE_ : List[Any] = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device ) SCREAMING_SNAKE_CASE_ : Optional[int] = self.decoder( encodings_and_masks=lowerCAmelCase__ , decoder_input_tokens=lowerCAmelCase__ , decoder_noise_time=lowerCAmelCase__ ) return logits @torch.no_grad() def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = 1_0_0 , lowerCAmelCase__ = True , lowerCAmelCase__ = "numpy" , lowerCAmelCase__ = None , lowerCAmelCase__ = 1 , ): """simple docstring""" if (callback_steps is None) or ( callback_steps is not None and (not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or callback_steps <= 0) ): raise ValueError( F'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' F''' {type(lowerCAmelCase__ )}.''' ) SCREAMING_SNAKE_CASE_ : Dict = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa ) SCREAMING_SNAKE_CASE_ : Tuple = np.zeros([1, 0, self.n_dims] , np.floataa ) SCREAMING_SNAKE_CASE_ : int = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=lowerCAmelCase__ , device=self.device ) for i, encoder_input_tokens in enumerate(lowerCAmelCase__ ): if i == 0: SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.from_numpy(pred_mel[:1].copy() ).to( device=self.device , dtype=self.decoder.dtype ) # The first chunk has no previous context. SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=lowerCAmelCase__ , device=self.device ) else: # The full song pipeline does not feed in a context feature, so the mask # will be all 0s after the feature converter. Because we know we're # feeding in a full context chunk from the previous prediction, set it # to all 1s. SCREAMING_SNAKE_CASE_ : Dict = ones SCREAMING_SNAKE_CASE_ : int = self.scale_features( lowerCAmelCase__ , output_range=[-1.0, 1.0] , clip=lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Optional[int] = self.encode( input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=lowerCAmelCase__ , continuous_mask=lowerCAmelCase__ , ) # Sample encoder_continuous_inputs shaped gaussian noise to begin loop SCREAMING_SNAKE_CASE_ : Optional[Any] = randn_tensor( shape=encoder_continuous_inputs.shape , generator=lowerCAmelCase__ , device=self.device , dtype=self.decoder.dtype , ) # set step values self.scheduler.set_timesteps(lowerCAmelCase__ ) # Denoising diffusion loop for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.decode( encodings_and_masks=lowerCAmelCase__ , input_tokens=lowerCAmelCase__ , noise_time=t / self.scheduler.config.num_train_timesteps , ) # Compute previous output: x_t -> x_t-1 SCREAMING_SNAKE_CASE_ : Any = self.scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ).prev_sample SCREAMING_SNAKE_CASE_ : List[Any] = self.scale_to_features(lowerCAmelCase__ , input_range=[-1.0, 1.0] ) SCREAMING_SNAKE_CASE_ : Optional[int] = mel[:1] SCREAMING_SNAKE_CASE_ : str = mel.cpu().float().numpy() SCREAMING_SNAKE_CASE_ : List[str] = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 ) # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(lowerCAmelCase__ , lowerCAmelCase__ ) logger.info('Generated segment' , lowerCAmelCase__ ) if output_type == "numpy" and not is_onnx_available(): raise ValueError( 'Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.' ) elif output_type == "numpy" and self.melgan is None: raise ValueError( 'Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.' ) if output_type == "numpy": SCREAMING_SNAKE_CASE_ : int = self.melgan(input_features=full_pred_mel.astype(np.floataa ) ) else: SCREAMING_SNAKE_CASE_ : Tuple = full_pred_mel if not return_dict: return (output,) return AudioPipelineOutput(audios=lowerCAmelCase__ )
162
from typing import Any def a__ ( A__, A__, A__, A__, A__, ): _validation( A__, A__, A__, A__, A__, ) # Creates data structures and fill initial step SCREAMING_SNAKE_CASE_ : dict = {} SCREAMING_SNAKE_CASE_ : dict = {} for state in states_space: SCREAMING_SNAKE_CASE_ : int = observations_space[0] SCREAMING_SNAKE_CASE_ : Union[str, Any] = ( initial_probabilities[state] * emission_probabilities[state][observation] ) SCREAMING_SNAKE_CASE_ : str = None # Fills the data structure with the probabilities of # different transitions and pointers to previous states for o in range(1, len(A__ ) ): SCREAMING_SNAKE_CASE_ : List[str] = observations_space[o] SCREAMING_SNAKE_CASE_ : str = observations_space[o - 1] for state in states_space: # Calculates the argmax for probability function SCREAMING_SNAKE_CASE_ : Union[str, Any] = '' SCREAMING_SNAKE_CASE_ : str = -1 for k_state in states_space: SCREAMING_SNAKE_CASE_ : List[str] = ( probabilities[(k_state, prior_observation)] * transition_probabilities[k_state][state] * emission_probabilities[state][observation] ) if probability > max_probability: SCREAMING_SNAKE_CASE_ : Tuple = probability SCREAMING_SNAKE_CASE_ : Optional[int] = k_state # Update probabilities and pointers dicts SCREAMING_SNAKE_CASE_ : List[Any] = ( probabilities[(arg_max, prior_observation)] * transition_probabilities[arg_max][state] * emission_probabilities[state][observation] ) SCREAMING_SNAKE_CASE_ : Tuple = arg_max # The final observation SCREAMING_SNAKE_CASE_ : Optional[int] = observations_space[len(A__ ) - 1] # argmax for given final observation SCREAMING_SNAKE_CASE_ : Union[str, Any] = '' SCREAMING_SNAKE_CASE_ : List[str] = -1 for k_state in states_space: SCREAMING_SNAKE_CASE_ : int = probabilities[(k_state, final_observation)] if probability > max_probability: SCREAMING_SNAKE_CASE_ : List[Any] = probability SCREAMING_SNAKE_CASE_ : Tuple = k_state SCREAMING_SNAKE_CASE_ : Optional[Any] = arg_max # Process pointers backwards SCREAMING_SNAKE_CASE_ : Union[str, Any] = last_state SCREAMING_SNAKE_CASE_ : List[str] = [] for o in range(len(A__ ) - 1, -1, -1 ): result.append(A__ ) SCREAMING_SNAKE_CASE_ : Tuple = pointers[previous, observations_space[o]] result.reverse() return result def a__ ( A__, A__, A__, A__, A__, ): _validate_not_empty( A__, A__, A__, A__, A__, ) _validate_lists(A__, A__ ) _validate_dicts( A__, A__, A__ ) def a__ ( A__, A__, A__, A__, A__, ): if not all( [ observations_space, states_space, initial_probabilities, transition_probabilities, emission_probabilities, ] ): raise ValueError('There\'s an empty parameter' ) def a__ ( A__, A__ ): _validate_list(A__, 'observations_space' ) _validate_list(A__, 'states_space' ) def a__ ( A__, A__ ): if not isinstance(_object, A__ ): SCREAMING_SNAKE_CASE_ : List[str] = F'''{var_name} must be a list''' raise ValueError(A__ ) else: for x in _object: if not isinstance(A__, A__ ): SCREAMING_SNAKE_CASE_ : Dict = F'''{var_name} must be a list of strings''' raise ValueError(A__ ) def a__ ( A__, A__, A__, ): _validate_dict(A__, 'initial_probabilities', A__ ) _validate_nested_dict(A__, 'transition_probabilities' ) _validate_nested_dict(A__, 'emission_probabilities' ) def a__ ( A__, A__ ): _validate_dict(_object, A__, A__ ) for x in _object.values(): _validate_dict(A__, A__, A__, A__ ) def a__ ( A__, A__, A__, A__ = False ): if not isinstance(_object, A__ ): SCREAMING_SNAKE_CASE_ : Dict = F'''{var_name} must be a dict''' raise ValueError(A__ ) if not all(isinstance(A__, A__ ) for x in _object ): SCREAMING_SNAKE_CASE_ : Optional[int] = F'''{var_name} all keys must be strings''' raise ValueError(A__ ) if not all(isinstance(A__, A__ ) for x in _object.values() ): SCREAMING_SNAKE_CASE_ : Optional[Any] = 'nested dictionary ' if nested else '' SCREAMING_SNAKE_CASE_ : Any = F'''{var_name} {nested_text}all values must be {value_type.__name__}''' raise ValueError(A__ ) if __name__ == "__main__": from doctest import testmod testmod()
162
1
'''simple docstring''' class UpperCAmelCase : def __init__( self : str , __snake_case : List[str] ) -> Union[str, Any]: _lowerCAmelCase = val _lowerCAmelCase = None _lowerCAmelCase = None def lowercase__ ( self : Optional[int] , __snake_case : str ) -> str: if self.val: if val < self.val: if self.left is None: _lowerCAmelCase = Node(__snake_case ) else: self.left.insert(__snake_case ) elif val > self.val: if self.right is None: _lowerCAmelCase = Node(__snake_case ) else: self.right.insert(__snake_case ) else: _lowerCAmelCase = val def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" if root: inorder(root.left , lowerCAmelCase ) res.append(root.val ) inorder(root.right , lowerCAmelCase ) def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" if len(lowerCAmelCase ) == 0: return arr _lowerCAmelCase = Node(arr[0] ) for i in range(1 , len(lowerCAmelCase ) ): root.insert(arr[i] ) # Traverse BST in order. _lowerCAmelCase = [] inorder(lowerCAmelCase , lowerCAmelCase ) return res if __name__ == "__main__": print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
70
'''simple docstring''' from __future__ import annotations import math def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(lowerCAmelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True A__ : Optional[Any] =[num for num in range(3, 10_00_01, 2) if not is_prime(num)] def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" if not isinstance(lowerCAmelCase , lowerCAmelCase ): raise ValueError("""n must be an integer""" ) if n <= 0: raise ValueError("""n must be >= 0""" ) _lowerCAmelCase = [] for num in range(len(lowerCAmelCase ) ): _lowerCAmelCase = 0 while 2 * i * i <= odd_composites[num]: _lowerCAmelCase = odd_composites[num] - 2 * i * i if is_prime(lowerCAmelCase ): break i += 1 else: list_nums.append(odd_composites[num] ) if len(lowerCAmelCase ) == n: return list_nums return [] def UpperCamelCase__ ( ): """simple docstring""" return compute_nums(1 )[0] if __name__ == "__main__": print(F"""{solution() = }""")
70
1
# This is the module that test_patching.py uses to test patch_submodule() import os # noqa: this is just for tests import os as renamed_os # noqa: this is just for tests from os import path # noqa: this is just for tests from os import path as renamed_path # noqa: this is just for tests from os.path import join # noqa: this is just for tests from os.path import join as renamed_join # noqa: this is just for tests _UpperCAmelCase : List[str] = open # noqa: we just need to have a builtin inside this module to test it properly
158
import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging _UpperCAmelCase : List[Any] = logging.get_logger(__name__) _UpperCAmelCase : str = {"vocab_file": "vocab.json", "merges_file": "merges.txt"} # See all LED models at https://huggingface.co/models?filter=LED _UpperCAmelCase : List[str] = { "vocab_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json", }, "merges_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt", }, "tokenizer_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json", }, } _UpperCAmelCase : Optional[Any] = { "allenai/led-base-16384": 16384, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def UpperCAmelCase__ ( ): lowercase :int = ( list(range(ord("!" ), ord("~" ) + 1 ) ) + list(range(ord("¡" ), ord("¬" ) + 1 ) ) + list(range(ord("®" ), ord("ÿ" ) + 1 ) ) ) lowercase :Dict = bs[:] lowercase :List[Any] = 0 for b in range(2**8 ): if b not in bs: bs.append(lowerCamelCase ) cs.append(2**8 + n ) n += 1 lowercase :List[str] = [chr(lowerCamelCase ) for n in cs] return dict(zip(lowerCamelCase, lowerCamelCase ) ) def UpperCAmelCase__ ( lowerCamelCase ): lowercase :List[Any] = set() lowercase :Any = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowercase :List[str] = char return pairs class __lowerCAmelCase ( lowerCAmelCase): _a = VOCAB_FILES_NAMES _a = PRETRAINED_VOCAB_FILES_MAP _a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _a = ['''input_ids''', '''attention_mask'''] def __init__( self: Optional[int] , _lowerCAmelCase: Tuple , _lowerCAmelCase: Tuple , _lowerCAmelCase: Union[str, Any]="replace" , _lowerCAmelCase: int="<s>" , _lowerCAmelCase: int="</s>" , _lowerCAmelCase: int="</s>" , _lowerCAmelCase: Optional[int]="<s>" , _lowerCAmelCase: Optional[int]="<unk>" , _lowerCAmelCase: Any="<pad>" , _lowerCAmelCase: Optional[Any]="<mask>" , _lowerCAmelCase: Union[str, Any]=False , **_lowerCAmelCase: Dict , ): lowercase :Union[str, Any] = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else bos_token lowercase :List[str] = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else eos_token lowercase :Any = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else sep_token lowercase :Tuple = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else cls_token lowercase :List[Any] = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else unk_token lowercase :str = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowercase :Tuple = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else mask_token super().__init__( errors=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , **_lowerCAmelCase , ) with open(_lowerCAmelCase , encoding="utf-8" ) as vocab_handle: lowercase :List[str] = json.load(_lowerCAmelCase ) lowercase :Union[str, Any] = {v: k for k, v in self.encoder.items()} lowercase :Dict = errors # how to handle errors in decoding lowercase :Any = bytes_to_unicode() lowercase :str = {v: k for k, v in self.byte_encoder.items()} with open(_lowerCAmelCase , encoding="utf-8" ) as merges_handle: lowercase :List[Any] = merges_handle.read().split("\n" )[1:-1] lowercase :Tuple = [tuple(merge.split() ) for merge in bpe_merges] lowercase :Optional[int] = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) ) lowercase :Tuple = {} lowercase :List[Any] = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions lowercase :Optional[int] = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def SCREAMING_SNAKE_CASE ( self: int ): return len(self.encoder ) def SCREAMING_SNAKE_CASE ( self: Dict ): return dict(self.encoder , **self.added_tokens_encoder ) def SCREAMING_SNAKE_CASE ( self: int , _lowerCAmelCase: Optional[int] ): if token in self.cache: return self.cache[token] lowercase :Tuple = tuple(_lowerCAmelCase ) lowercase :List[str] = get_pairs(_lowerCAmelCase ) if not pairs: return token while True: lowercase :List[str] = min(_lowerCAmelCase , key=lambda _lowerCAmelCase : self.bpe_ranks.get(_lowerCAmelCase , float("inf" ) ) ) if bigram not in self.bpe_ranks: break lowercase , lowercase :List[Any] = bigram lowercase :str = [] lowercase :Tuple = 0 while i < len(_lowerCAmelCase ): try: lowercase :List[str] = word.index(_lowerCAmelCase , _lowerCAmelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowercase :Optional[Any] = j if word[i] == first and i < len(_lowerCAmelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowercase :Dict = tuple(_lowerCAmelCase ) lowercase :Optional[Any] = new_word if len(_lowerCAmelCase ) == 1: break else: lowercase :List[str] = get_pairs(_lowerCAmelCase ) lowercase :str = " ".join(_lowerCAmelCase ) lowercase :Optional[Any] = word return word def SCREAMING_SNAKE_CASE ( self: Any , _lowerCAmelCase: Optional[int] ): lowercase :str = [] for token in re.findall(self.pat , _lowerCAmelCase ): lowercase :str = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowerCAmelCase ).split(" " ) ) return bpe_tokens def SCREAMING_SNAKE_CASE ( self: Optional[int] , _lowerCAmelCase: str ): return self.encoder.get(_lowerCAmelCase , self.encoder.get(self.unk_token ) ) def SCREAMING_SNAKE_CASE ( self: Optional[int] , _lowerCAmelCase: Tuple ): return self.decoder.get(_lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self: Tuple , _lowerCAmelCase: List[str] ): lowercase :Optional[int] = "".join(_lowerCAmelCase ) lowercase :List[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def SCREAMING_SNAKE_CASE ( self: List[Any] , _lowerCAmelCase: str , _lowerCAmelCase: Optional[str] = None ): if not os.path.isdir(_lowerCAmelCase ): logger.error(F"Vocabulary path ({save_directory}) should be a directory" ) return lowercase :List[str] = os.path.join( _lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) lowercase :Optional[int] = os.path.join( _lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(_lowerCAmelCase , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowerCAmelCase , ensure_ascii=_lowerCAmelCase ) + "\n" ) lowercase :Tuple = 0 with open(_lowerCAmelCase , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowerCAmelCase : kv[1] ): if index != token_index: logger.warning( F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!" ) lowercase :Tuple = token_index writer.write(" ".join(_lowerCAmelCase ) + "\n" ) index += 1 return vocab_file, merge_file def SCREAMING_SNAKE_CASE ( self: int , _lowerCAmelCase: List[int] , _lowerCAmelCase: Optional[List[int]] = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowercase :Tuple = [self.cls_token_id] lowercase :int = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def SCREAMING_SNAKE_CASE ( self: List[Any] , _lowerCAmelCase: List[int] , _lowerCAmelCase: Optional[List[int]] = None , _lowerCAmelCase: bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(_lowerCAmelCase )) + [1] return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1] def SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _lowerCAmelCase: List[int] , _lowerCAmelCase: Optional[List[int]] = None ): lowercase :List[str] = [self.sep_token_id] lowercase :Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def SCREAMING_SNAKE_CASE ( self: Tuple , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: int=False , **_lowerCAmelCase: Dict ): lowercase :Tuple = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(_lowerCAmelCase ) > 0 and not text[0].isspace()): lowercase :List[Any] = " " + text return (text, kwargs) def SCREAMING_SNAKE_CASE ( self: List[Any] , _lowerCAmelCase: Union[Dict[str, EncodedInput], BatchEncoding] , _lowerCAmelCase: Optional[int] = None , _lowerCAmelCase: PaddingStrategy = PaddingStrategy.DO_NOT_PAD , _lowerCAmelCase: Optional[int] = None , _lowerCAmelCase: Optional[bool] = None , ): lowercase :Tuple = super()._pad( encoded_inputs=_lowerCAmelCase , max_length=_lowerCAmelCase , padding_strategy=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , ) # Load from model defaults if return_attention_mask is None: lowercase :Union[str, Any] = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: lowercase :Any = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. lowercase :Optional[int] = len(encoded_inputs["global_attention_mask"] ) != len(_lowerCAmelCase ) if needs_to_be_padded: lowercase :Optional[int] = len(_lowerCAmelCase ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` lowercase :List[Any] = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": lowercase :int = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
158
1
from collections.abc import Generator def __lowerCamelCase ( ): """simple docstring""" lowercase__ , lowercase__ : Dict = 0, 1 while True: lowercase__ , lowercase__ : int = b, a + b yield b def __lowerCamelCase ( lowerCamelCase__ = 1_000 ): """simple docstring""" lowercase__ : Union[str, Any] = 1 lowercase__ : Dict = fibonacci_generator() while len(str(next(SCREAMING_SNAKE_CASE__ ) ) ) < n: answer += 1 return answer + 1 if __name__ == "__main__": print(solution(int(str(input()).strip())))
130
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _A = logging.get_logger(__name__) _A = '▁' _A = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'} _A = { 'vocab_file': { 'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model', }, 'monolingual_vocab_file': { 'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt', }, } _A = {'vinai/bartpho-syllable': 1024} class UpperCAmelCase__ ( A_ ): """simple docstring""" UpperCAmelCase__ : Any = VOCAB_FILES_NAMES UpperCAmelCase__ : int = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ : str = ["input_ids", "attention_mask"] def __init__( self , A_ , A_ , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_ = None , **A_ , ) -> None: # Mask token behave like a normal word, i.e. include the space before it __UpperCamelCase =AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token __UpperCamelCase ={} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , ) __UpperCamelCase =vocab_file __UpperCamelCase =monolingual_vocab_file __UpperCamelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(A_ ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility __UpperCamelCase ={} __UpperCamelCase =0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(A_ ) not in self.fairseq_tokens_to_ids: __UpperCamelCase =cnt cnt += 1 with open(A_ , 'r' , encoding='utf-8' ) as f: for line in f.readlines(): __UpperCamelCase =line.strip().split()[0] __UpperCamelCase =len(self.fairseq_tokens_to_ids ) if str(A_ ) not in self.fairseq_tokens_to_ids: __UpperCamelCase =len(self.fairseq_tokens_to_ids ) __UpperCamelCase ={v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ) -> Any: __UpperCamelCase =self.__dict__.copy() __UpperCamelCase =None __UpperCamelCase =self.sp_model.serialized_model_proto() return state def __setstate__( self , A_ ) -> List[str]: __UpperCamelCase =d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): __UpperCamelCase ={} __UpperCamelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def _a ( self , A_ , A_ = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __UpperCamelCase =[self.cls_token_id] __UpperCamelCase =[self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _a ( self , A_ , A_ = None , A_ = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ ) if token_ids_a is None: return [1] + ([0] * len(A_ )) + [1] return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1] def _a ( self , A_ , A_ = None ) -> List[int]: __UpperCamelCase =[self.sep_token_id] __UpperCamelCase =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _a ( self ) -> Any: return len(self.fairseq_ids_to_tokens ) def _a ( self ) -> Union[str, Any]: __UpperCamelCase ={self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _a ( self , A_ ) -> List[str]: return self.sp_model.encode(A_ , out_type=A_ ) def _a ( self , A_ ) -> str: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def _a ( self , A_ ) -> int: return self.fairseq_ids_to_tokens[index] def _a ( self , A_ ) -> List[Any]: __UpperCamelCase =''.join(A_ ).replace(A_ , ' ' ).strip() return out_string def _a ( self , A_ , A_ = None ) -> Tuple[str]: if not os.path.isdir(A_ ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return __UpperCamelCase =os.path.join( A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) __UpperCamelCase =os.path.join( A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'] , ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , A_ ) elif not os.path.isfile(self.vocab_file ): with open(A_ , 'wb' ) as fi: __UpperCamelCase =self.sp_model.serialized_model_proto() fi.write(A_ ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( A_ ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file , A_ ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(A_ , 'w' , encoding='utf-8' ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(f'{str(A_ )} \n' ) return out_vocab_file, out_monolingual_vocab_file
62
0
from argparse import ArgumentParser, Namespace from typing import Any, List, Optional from ..pipelines import Pipeline, get_supported_tasks, pipeline from ..utils import logging from . import BaseTransformersCLICommand try: from fastapi import Body, FastAPI, HTTPException from fastapi.routing import APIRoute from pydantic import BaseModel from starlette.responses import JSONResponse from uvicorn import run snake_case : int = True except (ImportError, AttributeError): snake_case : List[Any] = object def __lowerCamelCase ( *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : str ): """simple docstring""" pass snake_case : Any = False snake_case : int = logging.get_logger('''transformers-cli/serving''') def __lowerCamelCase ( UpperCAmelCase_ : Namespace ): """simple docstring""" a :Dict = pipeline( task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , ) return ServeCommand(UpperCAmelCase_ , args.host , args.port , args.workers ) class _snake_case ( _snake_case ): SCREAMING_SNAKE_CASE__ = 42 class _snake_case ( _snake_case ): SCREAMING_SNAKE_CASE__ = 42 SCREAMING_SNAKE_CASE__ = 42 class _snake_case ( _snake_case ): SCREAMING_SNAKE_CASE__ = 42 class _snake_case ( _snake_case ): SCREAMING_SNAKE_CASE__ = 42 class _snake_case ( _snake_case ): @staticmethod def SCREAMING_SNAKE_CASE__ ( _lowerCamelCase ): a :List[str] = parser.add_parser( '''serve''' , help='''CLI tool to run inference requests through REST and GraphQL endpoints.''' ) serve_parser.add_argument( '''--task''' , type=_lowerCamelCase , choices=get_supported_tasks() , help='''The task to run the pipeline on''' , ) serve_parser.add_argument('''--host''' , type=_lowerCamelCase , default='''localhost''' , help='''Interface the server will listen on.''' ) serve_parser.add_argument('''--port''' , type=_lowerCamelCase , default=8888 , help='''Port the serving will listen to.''' ) serve_parser.add_argument('''--workers''' , type=_lowerCamelCase , default=1 , help='''Number of http workers''' ) serve_parser.add_argument('''--model''' , type=_lowerCamelCase , help='''Model\'s name or path to stored model.''' ) serve_parser.add_argument('''--config''' , type=_lowerCamelCase , help='''Model\'s config name or path to stored model.''' ) serve_parser.add_argument('''--tokenizer''' , type=_lowerCamelCase , help='''Tokenizer name to use.''' ) serve_parser.add_argument( '''--device''' , type=_lowerCamelCase , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , ) serve_parser.set_defaults(func=_lowerCamelCase ) def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): a :Optional[int] = pipeline a :Optional[Any] = host a :Optional[Any] = port a :List[Any] = workers if not _serve_dependencies_installed: raise RuntimeError( '''Using serve command requires FastAPI and uvicorn. ''' '''Please install transformers with [serving]: pip install "transformers[serving]".''' '''Or install FastAPI and uvicorn separately.''' ) else: logger.info(F'''Serving model over {host}:{port}''' ) a :Any = FastAPI( routes=[ APIRoute( '''/''' , self.model_info , response_model=_lowerCamelCase , response_class=_lowerCamelCase , methods=['''GET'''] , ), APIRoute( '''/tokenize''' , self.tokenize , response_model=_lowerCamelCase , response_class=_lowerCamelCase , methods=['''POST'''] , ), APIRoute( '''/detokenize''' , self.detokenize , response_model=_lowerCamelCase , response_class=_lowerCamelCase , methods=['''POST'''] , ), APIRoute( '''/forward''' , self.forward , response_model=_lowerCamelCase , response_class=_lowerCamelCase , methods=['''POST'''] , ), ] , timeout=600 , ) def SCREAMING_SNAKE_CASE__ ( self ): run(self._app , host=self.host , port=self.port , workers=self.workers ) def SCREAMING_SNAKE_CASE__ ( self ): return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase = Body(_lowerCamelCase , embed=_lowerCamelCase ) , _lowerCamelCase = Body(_lowerCamelCase , embed=_lowerCamelCase ) ): try: a :Any = self._pipeline.tokenizer.tokenize(_lowerCamelCase ) if return_ids: a :List[Any] = self._pipeline.tokenizer.convert_tokens_to_ids(_lowerCamelCase ) return ServeTokenizeResult(tokens=_lowerCamelCase , tokens_ids=_lowerCamelCase ) else: return ServeTokenizeResult(tokens=_lowerCamelCase ) except Exception as e: raise HTTPException(status_code=500 , detail={'''model''': '''''', '''error''': str(_lowerCamelCase )} ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase = Body(_lowerCamelCase , embed=_lowerCamelCase ) , _lowerCamelCase = Body(_lowerCamelCase , embed=_lowerCamelCase ) , _lowerCamelCase = Body(_lowerCamelCase , embed=_lowerCamelCase ) , ): try: a :str = self._pipeline.tokenizer.decode(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) return ServeDeTokenizeResult(model='''''' , text=_lowerCamelCase ) except Exception as e: raise HTTPException(status_code=500 , detail={'''model''': '''''', '''error''': str(_lowerCamelCase )} ) async def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase=Body(_lowerCamelCase , embed=_lowerCamelCase ) ): # Check we don't have empty string if len(_lowerCamelCase ) == 0: return ServeForwardResult(output=[] , attention=[] ) try: # Forward through the model a :Optional[int] = self._pipeline(_lowerCamelCase ) return ServeForwardResult(output=_lowerCamelCase ) except Exception as e: raise HTTPException(500 , {'''error''': str(_lowerCamelCase )} )
281
import os import pytest from datasets import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, ) snake_case : Any = pytest.mark.integration @pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] ) def __lowerCamelCase ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any] ): """simple docstring""" inspect_dataset(UpperCAmelCase_ , UpperCAmelCase_ ) a :List[Any] = path + '''.py''' assert script_name in os.listdir(UpperCAmelCase_ ) assert "__pycache__" not in os.listdir(UpperCAmelCase_ ) @pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' ) @pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' ) @pytest.mark.parametrize('''path''' , ['''accuracy'''] ) def __lowerCamelCase ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any ): """simple docstring""" inspect_metric(UpperCAmelCase_ , UpperCAmelCase_ ) a :Dict = path + '''.py''' assert script_name in os.listdir(UpperCAmelCase_ ) assert "__pycache__" not in os.listdir(UpperCAmelCase_ ) @pytest.mark.parametrize( '''path, config_name, expected_splits''' , [ ('''squad''', '''plain_text''', ['''train''', '''validation''']), ('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']), ('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']), ] , ) def __lowerCamelCase ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str ): """simple docstring""" a :List[str] = get_dataset_config_info(UpperCAmelCase_ , config_name=UpperCAmelCase_ ) assert info.config_name == config_name assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( '''path, config_name, expected_exception''' , [ ('''paws''', None, ValueError), ] , ) def __lowerCamelCase ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple ): """simple docstring""" with pytest.raises(UpperCAmelCase_ ): get_dataset_config_info(UpperCAmelCase_ , config_name=UpperCAmelCase_ ) @pytest.mark.parametrize( '''path, expected''' , [ ('''squad''', '''plain_text'''), ('''acronym_identification''', '''default'''), ('''lhoestq/squad''', '''plain_text'''), ('''lhoestq/test''', '''default'''), ('''lhoestq/demo1''', '''lhoestq--demo1'''), ('''dalle-mini/wit''', '''dalle-mini--wit'''), ] , ) def __lowerCamelCase ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int ): """simple docstring""" a :List[str] = get_dataset_config_names(UpperCAmelCase_ ) assert expected in config_names @pytest.mark.parametrize( '''path, expected_configs, expected_splits_in_first_config''' , [ ('''squad''', ['''plain_text'''], ['''train''', '''validation''']), ('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']), ('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']), ] , ) def __lowerCamelCase ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int] ): """simple docstring""" a :Optional[int] = get_dataset_infos(UpperCAmelCase_ ) assert list(infos.keys() ) == expected_configs a :Union[str, Any] = expected_configs[0] assert expected_config in infos a :List[Any] = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits_in_first_config @pytest.mark.parametrize( '''path, expected_config, expected_splits''' , [ ('''squad''', '''plain_text''', ['''train''', '''validation''']), ('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']), ('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']), ] , ) def __lowerCamelCase ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] ): """simple docstring""" a :Union[str, Any] = get_dataset_infos(UpperCAmelCase_ ) assert expected_config in infos a :int = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( '''path, config_name, expected_exception''' , [ ('''paws''', None, ValueError), ] , ) def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str ): """simple docstring""" with pytest.raises(UpperCAmelCase_ ): get_dataset_split_names(UpperCAmelCase_ , config_name=UpperCAmelCase_ )
281
1
"""simple docstring""" import unittest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TextaTextGenerationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch from transformers.utils import is_torch_available from .test_pipelines_common import ANY if is_torch_available(): import torch @is_pipeline_test class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): __lowerCAmelCase : Any = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING __lowerCAmelCase : Union[str, Any] = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int: '''simple docstring''' UpperCAmelCase : Any = TextaTextGenerationPipeline(model=snake_case__ , tokenizer=snake_case__ ) return generator, ["Something to write", "Something else"] def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: '''simple docstring''' UpperCAmelCase : Dict = generator("""Something there""" ) self.assertEqual(snake_case__ , [{"""generated_text""": ANY(snake_case__ )}] ) # These are encoder decoder, they don't just append to incoming string self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) ) UpperCAmelCase : Optional[int] = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=snake_case__ ) self.assertEqual( snake_case__ , [ [{"""generated_text""": ANY(snake_case__ )}, {"""generated_text""": ANY(snake_case__ )}], [{"""generated_text""": ANY(snake_case__ )}, {"""generated_text""": ANY(snake_case__ )}], ] , ) UpperCAmelCase : List[str] = generator( ["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=snake_case__ ) self.assertEqual( snake_case__ , [ [{"""generated_text""": ANY(snake_case__ )}, {"""generated_text""": ANY(snake_case__ )}], [{"""generated_text""": ANY(snake_case__ )}, {"""generated_text""": ANY(snake_case__ )}], ] , ) with self.assertRaises(snake_case__ ): generator(4 ) @require_torch def SCREAMING_SNAKE_CASE ( self ) -> Tuple: '''simple docstring''' UpperCAmelCase : Dict = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" ) # do_sample=False necessary for reproducibility UpperCAmelCase : List[Any] = generator("""Something there""" , do_sample=snake_case__ ) self.assertEqual(snake_case__ , [{"""generated_text""": """"""}] ) UpperCAmelCase : List[str] = 3 UpperCAmelCase : int = generator( """Something there""" , num_return_sequences=snake_case__ , num_beams=snake_case__ , ) UpperCAmelCase : Dict = [ {"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide Beide"}, {"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide"}, {"generated_text": ""}, ] self.assertEqual(snake_case__ , snake_case__ ) UpperCAmelCase : Union[str, Any] = generator("""This is a test""" , do_sample=snake_case__ , num_return_sequences=2 , return_tensors=snake_case__ ) self.assertEqual( snake_case__ , [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ] , ) UpperCAmelCase : List[str] = generator.model.config.eos_token_id UpperCAmelCase : Dict = "<pad>" UpperCAmelCase : Optional[int] = generator( ["""This is a test""", """This is a second test"""] , do_sample=snake_case__ , num_return_sequences=2 , batch_size=2 , return_tensors=snake_case__ , ) self.assertEqual( snake_case__ , [ [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ], [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ], ] , ) @require_tf def SCREAMING_SNAKE_CASE ( self ) -> List[Any]: '''simple docstring''' UpperCAmelCase : str = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" ) # do_sample=False necessary for reproducibility UpperCAmelCase : str = generator("""Something there""" , do_sample=snake_case__ ) self.assertEqual(snake_case__ , [{"""generated_text""": """"""}] )
109
"""simple docstring""" import json import os from dataclasses import dataclass from functools import partial from typing import Callable import flax.linen as nn import jax import jax.numpy as jnp import joblib import optax import wandb from flax import jax_utils, struct, traverse_util from flax.serialization import from_bytes, to_bytes from flax.training import train_state from flax.training.common_utils import shard from tqdm.auto import tqdm from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule class lowercase__ ( snake_case__ ): _UpperCAmelCase :BigBirdConfig _UpperCAmelCase :jnp.dtype = jnp.floataa _UpperCAmelCase :bool = True def UpperCAmelCase__ ( self : Dict ): super().setup() lowerCamelCase_ : List[str] =nn.Dense(5 , dtype=self.dtype ) def __call__( self : Dict , *snake_case__ : Optional[int] , **snake_case__ : Any ): lowerCamelCase_ : int =super().__call__(*snake_case__ , **snake_case__ ) lowerCamelCase_ : Tuple =self.cls(outputs[2] ) return outputs[:2] + (cls_out,) class lowercase__ ( snake_case__ ): _UpperCAmelCase :List[str] = FlaxBigBirdForNaturalQuestionsModule def _snake_case ( lowerCamelCase__ : Tuple , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : int ) -> List[str]: def cross_entropy(lowerCamelCase__ : int , lowerCamelCase__ : List[Any] , lowerCamelCase__ : int=None ): lowerCamelCase_ : List[str] =logits.shape[-1] lowerCamelCase_ : List[str] =(labels[..., None] == jnp.arange(lowerCamelCase__ )[None]).astype("f4" ) lowerCamelCase_ : str =jax.nn.log_softmax(lowerCamelCase__ , axis=-1 ) lowerCamelCase_ : Tuple =-jnp.sum(labels * logits , axis=-1 ) if reduction is not None: lowerCamelCase_ : str =reduction(lowerCamelCase__ ) return loss lowerCamelCase_ : int =partial(lowerCamelCase__ , reduction=jnp.mean ) lowerCamelCase_ : int =cross_entropy(lowerCamelCase__ , lowerCamelCase__ ) lowerCamelCase_ : Any =cross_entropy(lowerCamelCase__ , lowerCamelCase__ ) lowerCamelCase_ : List[str] =cross_entropy(lowerCamelCase__ , lowerCamelCase__ ) return (start_loss + end_loss + pooled_loss) / 3 @dataclass class lowercase__ : _UpperCAmelCase :str = "google/bigbird-roberta-base" _UpperCAmelCase :int = 3000 _UpperCAmelCase :int = 10500 _UpperCAmelCase :int = 128 _UpperCAmelCase :int = 3 _UpperCAmelCase :int = 1 _UpperCAmelCase :int = 5 # tx_args _UpperCAmelCase :float = 3e-5 _UpperCAmelCase :float = 0.0 _UpperCAmelCase :int = 20000 _UpperCAmelCase :float = 0.00_95 _UpperCAmelCase :str = "bigbird-roberta-natural-questions" _UpperCAmelCase :str = "training-expt" _UpperCAmelCase :str = "data/nq-training.jsonl" _UpperCAmelCase :str = "data/nq-validation.jsonl" def UpperCAmelCase__ ( self : Union[str, Any] ): os.makedirs(self.base_dir , exist_ok=snake_case__ ) lowerCamelCase_ : Tuple =os.path.join(self.base_dir , self.save_dir ) lowerCamelCase_ : Optional[Any] =self.batch_size_per_device * jax.device_count() @dataclass class lowercase__ : _UpperCAmelCase :int _UpperCAmelCase :int = 4096 # no dynamic padding on TPUs def __call__( self : List[str] , snake_case__ : List[str] ): lowerCamelCase_ : Optional[int] =self.collate_fn(snake_case__ ) lowerCamelCase_ : List[str] =jax.tree_util.tree_map(snake_case__ , snake_case__ ) return batch def UpperCAmelCase__ ( self : str , snake_case__ : Dict ): lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] =self.fetch_inputs(features["input_ids"] ) lowerCamelCase_ : Dict ={ "input_ids": jnp.array(snake_case__ , dtype=jnp.intaa ), "attention_mask": jnp.array(snake_case__ , dtype=jnp.intaa ), "start_labels": jnp.array(features["start_token"] , dtype=jnp.intaa ), "end_labels": jnp.array(features["end_token"] , dtype=jnp.intaa ), "pooled_labels": jnp.array(features["category"] , dtype=jnp.intaa ), } return batch def UpperCAmelCase__ ( self : List[Any] , snake_case__ : list ): lowerCamelCase_ : Any =[self._fetch_inputs(snake_case__ ) for ids in input_ids] return zip(*snake_case__ ) def UpperCAmelCase__ ( self : int , snake_case__ : list ): lowerCamelCase_ : List[Any] =[1 for _ in range(len(snake_case__ ) )] while len(snake_case__ ) < self.max_length: input_ids.append(self.pad_id ) attention_mask.append(0 ) return input_ids, attention_mask def _snake_case ( lowerCamelCase__ : int , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[int]=None ) -> Optional[int]: if seed is not None: lowerCamelCase_ : Union[str, Any] =dataset.shuffle(seed=lowerCamelCase__ ) for i in range(len(lowerCamelCase__ ) // batch_size ): lowerCamelCase_ : Any =dataset[i * batch_size : (i + 1) * batch_size] yield dict(lowerCamelCase__ ) @partial(jax.pmap , axis_name="batch" ) def _snake_case ( lowerCamelCase__ : Dict , lowerCamelCase__ : List[Any] , **lowerCamelCase__ : Tuple ) -> int: def loss_fn(lowerCamelCase__ : Optional[int] ): lowerCamelCase_ : List[Any] =model_inputs.pop("start_labels" ) lowerCamelCase_ : Dict =model_inputs.pop("end_labels" ) lowerCamelCase_ : Any =model_inputs.pop("pooled_labels" ) lowerCamelCase_ : Tuple =state.apply_fn(**lowerCamelCase__ , params=lowerCamelCase__ , dropout_rng=lowerCamelCase__ , train=lowerCamelCase__ ) lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Any =outputs return state.loss_fn( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) lowerCamelCase_ , lowerCamelCase_ : Optional[Any] =jax.random.split(lowerCamelCase__ ) lowerCamelCase_ : Union[str, Any] =jax.value_and_grad(lowerCamelCase__ ) lowerCamelCase_ , lowerCamelCase_ : Tuple =grad_fn(state.params ) lowerCamelCase_ : List[Any] =jax.lax.pmean({"loss": loss} , axis_name="batch" ) lowerCamelCase_ : int =jax.lax.pmean(lowerCamelCase__ , "batch" ) lowerCamelCase_ : List[Any] =state.apply_gradients(grads=lowerCamelCase__ ) return state, metrics, new_drp_rng @partial(jax.pmap , axis_name="batch" ) def _snake_case ( lowerCamelCase__ : List[str] , **lowerCamelCase__ : Union[str, Any] ) -> Dict: lowerCamelCase_ : Dict =model_inputs.pop("start_labels" ) lowerCamelCase_ : List[Any] =model_inputs.pop("end_labels" ) lowerCamelCase_ : Union[str, Any] =model_inputs.pop("pooled_labels" ) lowerCamelCase_ : Tuple =state.apply_fn(**lowerCamelCase__ , params=state.params , train=lowerCamelCase__ ) lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] =outputs lowerCamelCase_ : int =state.loss_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) lowerCamelCase_ : str =jax.lax.pmean({"loss": loss} , axis_name="batch" ) return metrics class lowercase__ ( train_state.TrainState ): _UpperCAmelCase :Callable = struct.field(pytree_node=snake_case__ ) @dataclass class lowercase__ : _UpperCAmelCase :Args _UpperCAmelCase :Callable _UpperCAmelCase :Callable _UpperCAmelCase :Callable _UpperCAmelCase :Callable _UpperCAmelCase :wandb _UpperCAmelCase :Callable = None def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : str=None ): lowerCamelCase_ : int =model.params lowerCamelCase_ : Optional[Any] =TrainState.create( apply_fn=model.__call__ , params=snake_case__ , tx=snake_case__ , loss_fn=snake_case__ , ) if ckpt_dir is not None: lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Any =restore_checkpoint(snake_case__ , snake_case__ ) lowerCamelCase_ : Tuple ={ "lr": args.lr, "init_lr": args.init_lr, "warmup_steps": args.warmup_steps, "num_train_steps": num_train_steps, "weight_decay": args.weight_decay, } lowerCamelCase_ , lowerCamelCase_ : Tuple =build_tx(**snake_case__ ) lowerCamelCase_ : Union[str, Any] =train_state.TrainState( step=snake_case__ , apply_fn=model.__call__ , params=snake_case__ , tx=snake_case__ , opt_state=snake_case__ , ) lowerCamelCase_ : int =args lowerCamelCase_ : Union[str, Any] =data_collator lowerCamelCase_ : Dict =lr lowerCamelCase_ : Optional[Any] =params lowerCamelCase_ : Dict =jax_utils.replicate(snake_case__ ) return state def UpperCAmelCase__ ( self : Dict , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : List[str] ): lowerCamelCase_ : str =self.args lowerCamelCase_ : List[Any] =len(snake_case__ ) // args.batch_size lowerCamelCase_ : Optional[int] =jax.random.PRNGKey(0 ) lowerCamelCase_ : Dict =jax.random.split(snake_case__ , jax.device_count() ) for epoch in range(args.max_epochs ): lowerCamelCase_ : int =jnp.array(0 , dtype=jnp.floataa ) lowerCamelCase_ : List[Any] =get_batched_dataset(snake_case__ , args.batch_size , seed=snake_case__ ) lowerCamelCase_ : Dict =0 for batch in tqdm(snake_case__ , total=snake_case__ , desc=F"""Running EPOCH-{epoch}""" ): lowerCamelCase_ : str =self.data_collator(snake_case__ ) lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Any =self.train_step_fn(snake_case__ , snake_case__ , **snake_case__ ) running_loss += jax_utils.unreplicate(metrics["loss"] ) i += 1 if i % args.logging_steps == 0: lowerCamelCase_ : Tuple =jax_utils.unreplicate(state.step ) lowerCamelCase_ : Optional[Any] =running_loss.item() / i lowerCamelCase_ : Any =self.scheduler_fn(state_step - 1 ) lowerCamelCase_ : Optional[Any] =self.evaluate(snake_case__ , snake_case__ ) lowerCamelCase_ : str ={ "step": state_step.item(), "eval_loss": eval_loss.item(), "tr_loss": tr_loss, "lr": lr.item(), } tqdm.write(str(snake_case__ ) ) self.logger.log(snake_case__ , commit=snake_case__ ) if i % args.save_steps == 0: self.save_checkpoint(args.save_dir + F"""-e{epoch}-s{i}""" , state=snake_case__ ) def UpperCAmelCase__ ( self : str , snake_case__ : Dict , snake_case__ : Union[str, Any] ): lowerCamelCase_ : List[Any] =get_batched_dataset(snake_case__ , self.args.batch_size ) lowerCamelCase_ : List[str] =len(snake_case__ ) // self.args.batch_size lowerCamelCase_ : Tuple =jnp.array(0 , dtype=jnp.floataa ) lowerCamelCase_ : Any =0 for batch in tqdm(snake_case__ , total=snake_case__ , desc="Evaluating ... " ): lowerCamelCase_ : Optional[Any] =self.data_collator(snake_case__ ) lowerCamelCase_ : List[str] =self.val_step_fn(snake_case__ , **snake_case__ ) running_loss += jax_utils.unreplicate(metrics["loss"] ) i += 1 return running_loss / i def UpperCAmelCase__ ( self : str , snake_case__ : Optional[int] , snake_case__ : Any ): lowerCamelCase_ : List[Any] =jax_utils.unreplicate(snake_case__ ) print(F"""SAVING CHECKPOINT IN {save_dir}""" , end=" ... " ) self.model_save_fn(snake_case__ , params=state.params ) with open(os.path.join(snake_case__ , "opt_state.msgpack" ) , "wb" ) as f: f.write(to_bytes(state.opt_state ) ) joblib.dump(self.args , os.path.join(snake_case__ , "args.joblib" ) ) joblib.dump(self.data_collator , os.path.join(snake_case__ , "data_collator.joblib" ) ) with open(os.path.join(snake_case__ , "training_state.json" ) , "w" ) as f: json.dump({"step": state.step.item()} , snake_case__ ) print("DONE" ) def _snake_case ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Any ) -> List[Any]: print(F"""RESTORING CHECKPOINT FROM {save_dir}""" , end=" ... " ) with open(os.path.join(lowerCamelCase__ , "flax_model.msgpack" ) , "rb" ) as f: lowerCamelCase_ : Any =from_bytes(state.params , f.read() ) with open(os.path.join(lowerCamelCase__ , "opt_state.msgpack" ) , "rb" ) as f: lowerCamelCase_ : Optional[Any] =from_bytes(state.opt_state , f.read() ) lowerCamelCase_ : List[Any] =joblib.load(os.path.join(lowerCamelCase__ , "args.joblib" ) ) lowerCamelCase_ : int =joblib.load(os.path.join(lowerCamelCase__ , "data_collator.joblib" ) ) with open(os.path.join(lowerCamelCase__ , "training_state.json" ) , "r" ) as f: lowerCamelCase_ : Optional[Any] =json.load(lowerCamelCase__ ) lowerCamelCase_ : Optional[Any] =training_state["step"] print("DONE" ) return params, opt_state, step, args, data_collator def _snake_case ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Any ) -> str: lowerCamelCase_ : Dict =num_train_steps - warmup_steps lowerCamelCase_ : Optional[Any] =optax.linear_schedule(init_value=lowerCamelCase__ , end_value=lowerCamelCase__ , transition_steps=lowerCamelCase__ ) lowerCamelCase_ : List[Any] =optax.linear_schedule(init_value=lowerCamelCase__ , end_value=1e-7 , transition_steps=lowerCamelCase__ ) lowerCamelCase_ : Dict =optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] ) return lr def _snake_case ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[int] ) -> List[str]: def weight_decay_mask(lowerCamelCase__ : str ): lowerCamelCase_ : Union[str, Any] =traverse_util.flatten_dict(lowerCamelCase__ ) lowerCamelCase_ : Any ={k: (v[-1] != "bias" and v[-2:] != ("LayerNorm", "scale")) for k, v in params.items()} return traverse_util.unflatten_dict(lowerCamelCase__ ) lowerCamelCase_ : Dict =scheduler_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) lowerCamelCase_ : List[str] =optax.adamw(learning_rate=lowerCamelCase__ , weight_decay=lowerCamelCase__ , mask=lowerCamelCase__ ) return tx, lr
144
0
'''simple docstring''' snake_case__ = frozenset( [ """prompt""", """height""", """width""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", """cross_attention_kwargs""", ] ) snake_case__ = frozenset(["""prompt""", """negative_prompt"""]) snake_case__ = frozenset([]) snake_case__ = frozenset(["""image"""]) snake_case__ = frozenset( [ """image""", """height""", """width""", """guidance_scale""", ] ) snake_case__ = frozenset(["""image"""]) snake_case__ = frozenset( [ """prompt""", """image""", """height""", """width""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", ] ) snake_case__ = frozenset(["""prompt""", """image""", """negative_prompt"""]) snake_case__ = frozenset( [ # Text guided image variation with an image mask """prompt""", """image""", """mask_image""", """height""", """width""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", ] ) snake_case__ = frozenset(["""prompt""", """image""", """mask_image""", """negative_prompt"""]) snake_case__ = frozenset( [ # image variation with an image mask """image""", """mask_image""", """height""", """width""", """guidance_scale""", ] ) snake_case__ = frozenset(["""image""", """mask_image"""]) snake_case__ = frozenset( [ """example_image""", """image""", """mask_image""", """height""", """width""", """guidance_scale""", ] ) snake_case__ = frozenset(["""example_image""", """image""", """mask_image"""]) snake_case__ = frozenset(["""class_labels"""]) snake_case__ = frozenset(["""class_labels"""]) snake_case__ = frozenset(["""batch_size"""]) snake_case__ = frozenset([]) snake_case__ = frozenset(["""batch_size"""]) snake_case__ = frozenset([]) snake_case__ = frozenset( [ """prompt""", """audio_length_in_s""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", """cross_attention_kwargs""", ] ) snake_case__ = frozenset(["""prompt""", """negative_prompt"""]) snake_case__ = frozenset(["""input_tokens"""]) snake_case__ = frozenset(["""input_tokens"""])
4
'''simple docstring''' import heapq def snake_case__ ( lowerCamelCase__ : dict ) -> set[int]: A_ : list[list] = [] # for each node and his adjacency list add them and the rank of the node to queue # using heapq module the queue will be filled like a Priority Queue # heapq works with a min priority queue, so I used -1*len(v) to build it for key, value in graph.items(): # O(log(n)) heapq.heappush(lowerCamelCase__ , [-1 * len(lowerCamelCase__ ), (key, value)] ) # chosen_vertices = set of chosen vertices A_ : str = set() # while queue isn't empty and there are still edges # (queue[0][0] is the rank of the node with max rank) while queue and queue[0][0] != 0: # extract vertex with max rank from queue and add it to chosen_vertices A_ : Tuple = heapq.heappop(lowerCamelCase__ )[1][0] chosen_vertices.add(lowerCamelCase__ ) # Remove all arcs adjacent to argmax for elem in queue: # if v haven't adjacent node, skip if elem[0] == 0: continue # if argmax is reachable from elem # remove argmax from elem's adjacent list and update his rank if argmax in elem[1][1]: A_ : List[str] = elem[1][1].index(lowerCamelCase__ ) del elem[1][1][index] elem[0] += 1 # re-order the queue heapq.heapify(lowerCamelCase__ ) return chosen_vertices if __name__ == "__main__": import doctest doctest.testmod() snake_case__ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} print(F'Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}')
4
1
import re import string from collections import Counter import sacrebleu import sacremoses from packaging import version import datasets _snake_case : Optional[int] = "\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n" _snake_case : Any = "\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n" _snake_case : List[str] = "\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n" def lowerCAmelCase_ ( __lowerCamelCase ): def remove_articles(__lowerCamelCase ): __snake_case : List[Any] = re.compile(R"\b(a|an|the)\b" , re.UNICODE ) return re.sub(__lowerCamelCase , " " , __lowerCamelCase ) def white_space_fix(__lowerCamelCase ): return " ".join(text.split() ) def remove_punc(__lowerCamelCase ): __snake_case : Dict = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(__lowerCamelCase ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(__lowerCamelCase ) ) ) ) def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ): return int(normalize_answer(__lowerCamelCase ) == normalize_answer(__lowerCamelCase ) ) def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ): __snake_case : str = [any(compute_exact(__lowerCamelCase , __lowerCamelCase ) for ref in refs ) for pred, refs in zip(__lowerCamelCase , __lowerCamelCase )] return (sum(__lowerCamelCase ) / len(__lowerCamelCase )) * 1_0_0 def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): __snake_case : Union[str, Any] = [rgram for rgrams in rgramslist for rgram in rgrams] __snake_case : Tuple = Counter(__lowerCamelCase ) __snake_case : Optional[int] = Counter(__lowerCamelCase ) __snake_case : Tuple = Counter() for sgram, scount in sgramcounter.items(): __snake_case : str = scount * numref __snake_case : Optional[Any] = Counter(__lowerCamelCase ) __snake_case : Optional[int] = Counter() for cgram, ccount in cgramcounter.items(): __snake_case : str = ccount * numref # KEEP __snake_case : int = sgramcounter_rep & cgramcounter_rep __snake_case : int = keepgramcounter_rep & rgramcounter __snake_case : str = sgramcounter_rep & rgramcounter __snake_case : List[Any] = 0 __snake_case : Union[str, Any] = 0 for keepgram in keepgramcountergood_rep: keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram] # Fix an alleged bug [2] in the keep score computation. # keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram] keeptmpscorea += keepgramcountergood_rep[keepgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. __snake_case : List[Any] = 1 __snake_case : List[str] = 1 if len(__lowerCamelCase ) > 0: __snake_case : int = keeptmpscorea / len(__lowerCamelCase ) if len(__lowerCamelCase ) > 0: # Fix an alleged bug [2] in the keep score computation. # keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep) __snake_case : Any = keeptmpscorea / sum(keepgramcounterall_rep.values() ) __snake_case : str = 0 if keepscore_precision > 0 or keepscore_recall > 0: __snake_case : Optional[Any] = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall) # DELETION __snake_case : List[Any] = sgramcounter_rep - cgramcounter_rep __snake_case : Optional[Any] = delgramcounter_rep - rgramcounter __snake_case : Optional[Any] = sgramcounter_rep - rgramcounter __snake_case : Dict = 0 __snake_case : Optional[int] = 0 for delgram in delgramcountergood_rep: deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram] deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. __snake_case : str = 1 if len(__lowerCamelCase ) > 0: __snake_case : List[str] = deltmpscorea / len(__lowerCamelCase ) # ADDITION __snake_case : Any = set(__lowerCamelCase ) - set(__lowerCamelCase ) __snake_case : int = set(__lowerCamelCase ) & set(__lowerCamelCase ) __snake_case : Dict = set(__lowerCamelCase ) - set(__lowerCamelCase ) __snake_case : int = 0 for addgram in addgramcountergood: addtmpscore += 1 # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. __snake_case : Any = 1 __snake_case : int = 1 if len(__lowerCamelCase ) > 0: __snake_case : Union[str, Any] = addtmpscore / len(__lowerCamelCase ) if len(__lowerCamelCase ) > 0: __snake_case : List[str] = addtmpscore / len(__lowerCamelCase ) __snake_case : Optional[Any] = 0 if addscore_precision > 0 or addscore_recall > 0: __snake_case : Any = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall) return (keepscore, delscore_precision, addscore) def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): __snake_case : Optional[int] = len(__lowerCamelCase ) __snake_case : int = ssent.split(" " ) __snake_case : Optional[Any] = csent.split(" " ) __snake_case : Dict = [] __snake_case : List[str] = [] __snake_case : Any = [] __snake_case : Any = [] __snake_case : Any = [] __snake_case : Tuple = [] __snake_case : List[str] = [] __snake_case : List[Any] = [] __snake_case : Optional[Any] = [] __snake_case : List[Any] = [] for rsent in rsents: __snake_case : Optional[Any] = rsent.split(" " ) __snake_case : Tuple = [] __snake_case : Dict = [] __snake_case : Dict = [] ragramslist.append(__lowerCamelCase ) for i in range(0 , len(__lowerCamelCase ) - 1 ): if i < len(__lowerCamelCase ) - 1: __snake_case : List[str] = ragrams[i] + " " + ragrams[i + 1] ragrams.append(__lowerCamelCase ) if i < len(__lowerCamelCase ) - 2: __snake_case : int = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] ragrams.append(__lowerCamelCase ) if i < len(__lowerCamelCase ) - 3: __snake_case : int = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] + " " + ragrams[i + 3] ragrams.append(__lowerCamelCase ) ragramslist.append(__lowerCamelCase ) ragramslist.append(__lowerCamelCase ) ragramslist.append(__lowerCamelCase ) for i in range(0 , len(__lowerCamelCase ) - 1 ): if i < len(__lowerCamelCase ) - 1: __snake_case : Optional[int] = sagrams[i] + " " + sagrams[i + 1] sagrams.append(__lowerCamelCase ) if i < len(__lowerCamelCase ) - 2: __snake_case : Optional[Any] = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] sagrams.append(__lowerCamelCase ) if i < len(__lowerCamelCase ) - 3: __snake_case : List[Any] = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] + " " + sagrams[i + 3] sagrams.append(__lowerCamelCase ) for i in range(0 , len(__lowerCamelCase ) - 1 ): if i < len(__lowerCamelCase ) - 1: __snake_case : List[str] = cagrams[i] + " " + cagrams[i + 1] cagrams.append(__lowerCamelCase ) if i < len(__lowerCamelCase ) - 2: __snake_case : List[str] = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] cagrams.append(__lowerCamelCase ) if i < len(__lowerCamelCase ) - 3: __snake_case : List[str] = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] + " " + cagrams[i + 3] cagrams.append(__lowerCamelCase ) ((__snake_case) , (__snake_case) , (__snake_case)) : List[str] = SARIngram(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) ((__snake_case) , (__snake_case) , (__snake_case)) : int = SARIngram(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) ((__snake_case) , (__snake_case) , (__snake_case)) : List[Any] = SARIngram(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) ((__snake_case) , (__snake_case) , (__snake_case)) : List[str] = SARIngram(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) __snake_case : Any = sum([keepascore, keepascore, keepascore, keepascore] ) / 4 __snake_case : Optional[Any] = sum([delascore, delascore, delascore, delascore] ) / 4 __snake_case : Optional[Any] = sum([addascore, addascore, addascore, addascore] ) / 4 __snake_case : Dict = (avgkeepscore + avgdelscore + avgaddscore) / 3 return finalscore def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase = True , __lowerCamelCase = "13a" , __lowerCamelCase = True ): # Normalization is requried for the ASSET dataset (one of the primary # datasets in sentence simplification) to allow using space # to split the sentence. Even though Wiki-Auto and TURK datasets, # do not require normalization, we do it for consistency. # Code adapted from the EASSE library [1] written by the authors of the ASSET dataset. # [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7 if lowercase: __snake_case : Dict = sentence.lower() if tokenizer in ["13a", "intl"]: if version.parse(sacrebleu.__version__ ).major >= 2: __snake_case : str = sacrebleu.metrics.bleu._get_tokenizer(__lowerCamelCase )()(__lowerCamelCase ) else: __snake_case : Tuple = sacrebleu.TOKENIZERS[tokenizer]()(__lowerCamelCase ) elif tokenizer == "moses": __snake_case : List[Any] = sacremoses.MosesTokenizer().tokenize(__lowerCamelCase , return_str=__lowerCamelCase , escape=__lowerCamelCase ) elif tokenizer == "penn": __snake_case : Optional[Any] = sacremoses.MosesTokenizer().penn_tokenize(__lowerCamelCase , return_str=__lowerCamelCase ) else: __snake_case : int = sentence if not return_str: __snake_case : List[str] = normalized_sent.split() return normalized_sent def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): if not (len(__lowerCamelCase ) == len(__lowerCamelCase ) == len(__lowerCamelCase )): raise ValueError("Sources length must match predictions and references lengths." ) __snake_case : List[str] = 0 for src, pred, refs in zip(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): sari_score += SARIsent(normalize(__lowerCamelCase ) , normalize(__lowerCamelCase ) , [normalize(__lowerCamelCase ) for sent in refs] ) __snake_case : Any = sari_score / len(__lowerCamelCase ) return 1_0_0 * sari_score def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase="exp" , __lowerCamelCase=None , __lowerCamelCase=False , __lowerCamelCase=False , __lowerCamelCase=False , ): __snake_case : List[str] = len(references[0] ) if any(len(__lowerCamelCase ) != references_per_prediction for refs in references ): raise ValueError("Sacrebleu requires the same number of references for each prediction" ) __snake_case : Optional[Any] = [[refs[i] for refs in references] for i in range(__lowerCamelCase )] __snake_case : Any = sacrebleu.corpus_bleu( __lowerCamelCase , __lowerCamelCase , smooth_method=__lowerCamelCase , smooth_value=__lowerCamelCase , force=__lowerCamelCase , lowercase=__lowerCamelCase , use_effective_order=__lowerCamelCase , ) return output.score @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a (datasets.Metric ): """simple docstring""" def __snake_case ( self : Any ) -> Union[str, Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ), } ) , codebase_urls=[ "https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py", "https://github.com/cocoxu/simplification/blob/master/SARI.py", "https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py", "https://github.com/mjpost/sacreBLEU", ] , reference_urls=[ "https://www.aclweb.org/anthology/Q16-1029.pdf", "https://github.com/mjpost/sacreBLEU", "https://en.wikipedia.org/wiki/BLEU", "https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213", ] , ) def __snake_case ( self : List[Any] , lowerCamelCase : str , lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] ) -> str: __snake_case : Dict = {} result.update({"sari": compute_sari(sources=lowerCamelCase , predictions=lowerCamelCase , references=lowerCamelCase )} ) result.update({"sacrebleu": compute_sacrebleu(predictions=lowerCamelCase , references=lowerCamelCase )} ) result.update({"exact": compute_em(predictions=lowerCamelCase , references=lowerCamelCase )} ) return result
123
from __future__ import annotations _snake_case : Any = "Muhammad Umer Farooq" _snake_case : Optional[int] = "MIT" _snake_case : Union[str, Any] = "1.0.0" _snake_case : Optional[Any] = "Muhammad Umer Farooq" _snake_case : List[Any] = "contact@muhammadumerfarooq.me" _snake_case : Dict = "Alpha" import re from html.parser import HTMLParser from urllib import parse import requests class a (_lowerCAmelCase ): """simple docstring""" def __init__( self : Tuple , lowerCamelCase : str ) -> None: super().__init__() __snake_case : list[str] = [] __snake_case : Any = domain def __snake_case ( self : List[str] , lowerCamelCase : str , lowerCamelCase : list[tuple[str, str | None]] ) -> None: # Only parse the 'anchor' tag. if tag == "a": # Check the list of defined attributes. for name, value in attrs: # If href is defined, and not empty nor # print it. if name == "href" and value != "#" and value != "": # If not already in urls. if value not in self.urls: __snake_case : Any = parse.urljoin(self.domain , lowerCamelCase ) self.urls.append(lowerCamelCase ) def lowerCAmelCase_ ( __lowerCamelCase ): return ".".join(get_sub_domain_name(__lowerCamelCase ).split("." )[-2:] ) def lowerCAmelCase_ ( __lowerCamelCase ): return parse.urlparse(__lowerCamelCase ).netloc def lowerCAmelCase_ ( __lowerCamelCase = "https://github.com" ): __snake_case : Tuple = get_domain_name(__lowerCamelCase ) # Initialize the parser __snake_case : Dict = Parser(__lowerCamelCase ) try: # Open URL __snake_case : Any = requests.get(__lowerCamelCase ) # pass the raw HTML to the parser to get links parser.feed(r.text ) # Get links and loop through __snake_case : List[str] = set() for link in parser.urls: # open URL. # read = requests.get(link) try: __snake_case : List[str] = requests.get(__lowerCamelCase ) # Get the valid email. __snake_case : Any = re.findall("[a-zA-Z0-9]+@" + domain , read.text ) # If not in list then append it. for email in emails: valid_emails.add(__lowerCamelCase ) except ValueError: pass except ValueError: raise SystemExit(1 ) # Finally return a sorted list of email addresses with no duplicates. return sorted(__lowerCamelCase ) if __name__ == "__main__": _snake_case : Union[str, Any] = emails_from_url("https://github.com") print(f'''{len(emails)} emails found:''') print("\n".join(sorted(emails)))
123
1
import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed lowercase : Optional[int] = { "distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), "roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), "bert": (BertConfig, BertForMaskedLM, BertTokenizer), "gpt2": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def UpperCAmelCase_ (_lowerCAmelCase : Any ): assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts ) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config ) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights ) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def UpperCAmelCase_ (_lowerCAmelCase : Optional[int] , _lowerCAmelCase : int ): if args.student_type == "roberta": __UpperCamelCase : Any = False elif args.student_type == "gpt2": __UpperCamelCase : Any = False def UpperCAmelCase_ (_lowerCAmelCase : Any , _lowerCAmelCase : Any ): if args.student_type == "roberta": __UpperCamelCase : Dict = False def UpperCAmelCase_ (): __UpperCamelCase : List[Any] = argparse.ArgumentParser(description="Training" ) parser.add_argument("--force" , action="store_true" , help="Overwrite dump_path if it already exists." ) parser.add_argument( "--dump_path" , type=_lowerCAmelCase , required=_lowerCAmelCase , help="The output directory (log, checkpoints, parameters, etc.)" ) parser.add_argument( "--data_file" , type=_lowerCAmelCase , required=_lowerCAmelCase , help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence." , ) parser.add_argument( "--student_type" , type=_lowerCAmelCase , choices=["distilbert", "roberta", "gpt2"] , required=_lowerCAmelCase , help="The student type (DistilBERT, RoBERTa)." , ) parser.add_argument("--student_config" , type=_lowerCAmelCase , required=_lowerCAmelCase , help="Path to the student configuration." ) parser.add_argument( "--student_pretrained_weights" , default=_lowerCAmelCase , type=_lowerCAmelCase , help="Load student initialization checkpoint." ) parser.add_argument( "--teacher_type" , choices=["bert", "roberta", "gpt2"] , required=_lowerCAmelCase , help="Teacher type (BERT, RoBERTa)." ) parser.add_argument("--teacher_name" , type=_lowerCAmelCase , required=_lowerCAmelCase , help="The teacher model." ) parser.add_argument("--temperature" , default=2.0 , type=_lowerCAmelCase , help="Temperature for the softmax temperature." ) parser.add_argument( "--alpha_ce" , default=0.5 , type=_lowerCAmelCase , help="Linear weight for the distillation loss. Must be >=0." ) parser.add_argument( "--alpha_mlm" , default=0.0 , type=_lowerCAmelCase , help="Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag." , ) parser.add_argument("--alpha_clm" , default=0.5 , type=_lowerCAmelCase , help="Linear weight for the CLM loss. Must be >=0." ) parser.add_argument("--alpha_mse" , default=0.0 , type=_lowerCAmelCase , help="Linear weight of the MSE loss. Must be >=0." ) parser.add_argument( "--alpha_cos" , default=0.0 , type=_lowerCAmelCase , help="Linear weight of the cosine embedding loss. Must be >=0." ) parser.add_argument( "--mlm" , action="store_true" , help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM." ) parser.add_argument( "--mlm_mask_prop" , default=0.15 , type=_lowerCAmelCase , help="Proportion of tokens for which we need to make a prediction." , ) parser.add_argument("--word_mask" , default=0.8 , type=_lowerCAmelCase , help="Proportion of tokens to mask out." ) parser.add_argument("--word_keep" , default=0.1 , type=_lowerCAmelCase , help="Proportion of tokens to keep." ) parser.add_argument("--word_rand" , default=0.1 , type=_lowerCAmelCase , help="Proportion of tokens to randomly replace." ) parser.add_argument( "--mlm_smoothing" , default=0.7 , type=_lowerCAmelCase , help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec)." , ) parser.add_argument("--token_counts" , type=_lowerCAmelCase , help="The token counts in the data_file for MLM." ) parser.add_argument( "--restrict_ce_to_mask" , action="store_true" , help="If true, compute the distillation loss only the [MLM] prediction distribution." , ) parser.add_argument( "--freeze_pos_embs" , action="store_true" , help="Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only." , ) parser.add_argument( "--freeze_token_type_embds" , action="store_true" , help="Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only." , ) parser.add_argument("--n_epoch" , type=_lowerCAmelCase , default=3 , help="Number of pass on the whole dataset." ) parser.add_argument("--batch_size" , type=_lowerCAmelCase , default=5 , help="Batch size (for each process)." ) parser.add_argument( "--group_by_size" , action="store_false" , help="If true, group sequences that have similar length into the same batch. Default is true." , ) parser.add_argument( "--gradient_accumulation_steps" , type=_lowerCAmelCase , default=50 , help="Gradient accumulation for larger training batches." , ) parser.add_argument("--warmup_prop" , default=0.05 , type=_lowerCAmelCase , help="Linear warmup proportion." ) parser.add_argument("--weight_decay" , default=0.0 , type=_lowerCAmelCase , help="Weight decay if we apply some." ) parser.add_argument("--learning_rate" , default=5E-4 , type=_lowerCAmelCase , help="The initial learning rate for Adam." ) parser.add_argument("--adam_epsilon" , default=1E-6 , type=_lowerCAmelCase , help="Epsilon for Adam optimizer." ) parser.add_argument("--max_grad_norm" , default=5.0 , type=_lowerCAmelCase , help="Max gradient norm." ) parser.add_argument("--initializer_range" , default=0.02 , type=_lowerCAmelCase , help="Random initialization range." ) parser.add_argument( "--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , ) parser.add_argument( "--fp16_opt_level" , type=_lowerCAmelCase , default="O1" , help=( "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html" ) , ) parser.add_argument("--n_gpu" , type=_lowerCAmelCase , default=1 , help="Number of GPUs in the node." ) parser.add_argument("--local_rank" , type=_lowerCAmelCase , default=-1 , help="Distributed training - Local rank" ) parser.add_argument("--seed" , type=_lowerCAmelCase , default=56 , help="Random seed" ) parser.add_argument("--log_interval" , type=_lowerCAmelCase , default=5_00 , help="Tensorboard logging interval." ) parser.add_argument("--checkpoint_interval" , type=_lowerCAmelCase , default=40_00 , help="Checkpoint interval." ) __UpperCamelCase : List[Any] = parser.parse_args() sanity_checks(_lowerCAmelCase ) # ARGS # init_gpu_params(_lowerCAmelCase ) set_seed(_lowerCAmelCase ) if args.is_master: if os.path.exists(args.dump_path ): if not args.force: raise ValueError( F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite''' " itUse `--force` if you want to overwrite it" ) else: shutil.rmtree(args.dump_path ) if not os.path.exists(args.dump_path ): os.makedirs(args.dump_path ) logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' ) # SAVE PARAMS # logger.info(F'''Param: {args}''' ) with open(os.path.join(args.dump_path , "parameters.json" ) , "w" ) as f: json.dump(vars(_lowerCAmelCase ) , _lowerCAmelCase , indent=4 ) git_log(args.dump_path ) __UpperCamelCase : Optional[int] = MODEL_CLASSES[args.student_type] __UpperCamelCase : str = MODEL_CLASSES[args.teacher_type] # TOKENIZER # __UpperCamelCase : List[Any] = teacher_tokenizer_class.from_pretrained(args.teacher_name ) __UpperCamelCase : Optional[Any] = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): __UpperCamelCase : Optional[int] = tokenizer.all_special_tokens.index(_lowerCAmelCase ) __UpperCamelCase : Dict = tokenizer.all_special_ids[idx] logger.info(F'''Special tokens {special_tok_ids}''' ) __UpperCamelCase : List[Any] = special_tok_ids __UpperCamelCase : int = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(F'''Loading data from {args.data_file}''' ) with open(args.data_file , "rb" ) as fp: __UpperCamelCase : List[str] = pickle.load(_lowerCAmelCase ) if args.mlm: logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' ) with open(args.token_counts , "rb" ) as fp: __UpperCamelCase : List[str] = pickle.load(_lowerCAmelCase ) __UpperCamelCase : Optional[int] = np.maximum(_lowerCAmelCase , 1 ) ** -args.mlm_smoothing for idx in special_tok_ids.values(): __UpperCamelCase : Union[str, Any] = 0.0 # do not predict special tokens __UpperCamelCase : List[str] = torch.from_numpy(_lowerCAmelCase ) else: __UpperCamelCase : List[str] = None __UpperCamelCase : int = LmSeqsDataset(params=_lowerCAmelCase , data=_lowerCAmelCase ) logger.info("Data loader created." ) # STUDENT # logger.info(F'''Loading student config from {args.student_config}''' ) __UpperCamelCase : Any = student_config_class.from_pretrained(args.student_config ) __UpperCamelCase : Optional[Any] = True if args.student_pretrained_weights is not None: logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' ) __UpperCamelCase : Union[str, Any] = student_model_class.from_pretrained(args.student_pretrained_weights , config=_lowerCAmelCase ) else: __UpperCamelCase : str = student_model_class(_lowerCAmelCase ) if args.n_gpu > 0: student.to(F'''cuda:{args.local_rank}''' ) logger.info("Student loaded." ) # TEACHER # __UpperCamelCase : str = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=_lowerCAmelCase ) if args.n_gpu > 0: teacher.to(F'''cuda:{args.local_rank}''' ) logger.info(F'''Teacher loaded from {args.teacher_name}.''' ) # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(_lowerCAmelCase , _lowerCAmelCase ) if args.freeze_token_type_embds: freeze_token_type_embeddings(_lowerCAmelCase , _lowerCAmelCase ) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0 ) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() __UpperCamelCase : Tuple = Distiller( params=_lowerCAmelCase , dataset=_lowerCAmelCase , token_probs=_lowerCAmelCase , student=_lowerCAmelCase , teacher=_lowerCAmelCase ) distiller.train() logger.info("Let's go get some drinks." ) if __name__ == "__main__": main()
353
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Value from .base import TaskTemplate @dataclass(frozen=lowerCamelCase__ ) class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ): """simple docstring""" lowercase : str = field(default='text-classification' , metadata={'include_in_asdict_even_if_is_default': True} ) lowercase : ClassVar[Features] = Features({'text': Value('string' )} ) lowercase : ClassVar[Features] = Features({'labels': ClassLabel} ) lowercase : str = "text" lowercase : str = "labels" def __lowerCamelCase ( self , __UpperCamelCase ) -> List[str]: '''simple docstring''' if self.label_column not in features: raise ValueError(f'''Column {self.label_column} is not present in features.''' ) if not isinstance(features[self.label_column] , __UpperCamelCase ): raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' ) __UpperCamelCase : int = copy.deepcopy(self ) __UpperCamelCase : List[Any] = self.label_schema.copy() __UpperCamelCase : Union[str, Any] = features[self.label_column] __UpperCamelCase : Optional[Any] = label_schema return task_template @property def __lowerCamelCase ( self ) -> Dict[str, str]: '''simple docstring''' return { self.text_column: "text", self.label_column: "labels", }
171
0
"""simple docstring""" import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert import BertTokenizer UpperCAmelCase : List[str] = logging.get_logger(__name__) UpperCAmelCase : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} UpperCAmelCase : Union[str, Any] = { 'vocab_file': { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json' ), }, } UpperCAmelCase : List[str] = { 'vocab_file': { 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json' ), }, } UpperCAmelCase : Tuple = { 'vocab_file': { 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json' ), }, } UpperCAmelCase : List[Any] = { 'facebook/dpr-ctx_encoder-single-nq-base': 512, 'facebook/dpr-ctx_encoder-multiset-base': 512, } UpperCAmelCase : Optional[Any] = { 'facebook/dpr-question_encoder-single-nq-base': 512, 'facebook/dpr-question_encoder-multiset-base': 512, } UpperCAmelCase : str = { 'facebook/dpr-reader-single-nq-base': 512, 'facebook/dpr-reader-multiset-base': 512, } UpperCAmelCase : Tuple = { 'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True}, } UpperCAmelCase : List[Any] = { 'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True}, } UpperCAmelCase : Dict = { 'facebook/dpr-reader-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-reader-multiset-base': {'do_lower_case': True}, } class lowerCamelCase__ ( A ): """simple docstring""" __a = VOCAB_FILES_NAMES __a = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP __a = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __a = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION class lowerCamelCase__ ( A ): """simple docstring""" __a = VOCAB_FILES_NAMES __a = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP __a = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __a = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCAmelCase : List[Any] = collections.namedtuple( 'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text'] ) UpperCAmelCase : Dict = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits']) UpperCAmelCase : int = R'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n ' @add_start_docstrings(A ) class lowerCamelCase__ : """simple docstring""" def __call__( self : Any , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[str] = None , UpperCamelCase : Union[bool, str] = False , UpperCamelCase : Union[bool, str] = False , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[Union[str, TensorType]] = None , UpperCamelCase : Optional[bool] = None , **UpperCamelCase : int , ): '''simple docstring''' if titles is None and texts is None: return super().__call__( UpperCamelCase , padding=UpperCamelCase , truncation=UpperCamelCase , max_length=UpperCamelCase , return_tensors=UpperCamelCase , return_attention_mask=UpperCamelCase , **UpperCamelCase , ) elif titles is None or texts is None: __UpperCAmelCase : str = titles if texts is None else texts return super().__call__( UpperCamelCase , UpperCamelCase , padding=UpperCamelCase , truncation=UpperCamelCase , max_length=UpperCamelCase , return_tensors=UpperCamelCase , return_attention_mask=UpperCamelCase , **UpperCamelCase , ) __UpperCAmelCase : Any = titles if not isinstance(UpperCamelCase , UpperCamelCase ) else [titles] __UpperCAmelCase : Optional[int] = texts if not isinstance(UpperCamelCase , UpperCamelCase ) else [texts] __UpperCAmelCase : int = len(UpperCamelCase ) __UpperCAmelCase : str = questions if not isinstance(UpperCamelCase , UpperCamelCase ) else [questions] * n_passages if len(UpperCamelCase ) != len(UpperCamelCase ): raise ValueError( f'''There should be as many titles than texts but got {len(UpperCamelCase )} titles and {len(UpperCamelCase )} texts.''' ) __UpperCAmelCase : Optional[Any] = super().__call__(UpperCamelCase , UpperCamelCase , padding=UpperCamelCase , truncation=UpperCamelCase )["""input_ids"""] __UpperCAmelCase : Optional[Any] = super().__call__(UpperCamelCase , add_special_tokens=UpperCamelCase , padding=UpperCamelCase , truncation=UpperCamelCase )["""input_ids"""] __UpperCAmelCase : Any = { """input_ids""": [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(UpperCamelCase , UpperCamelCase ) ] } if return_attention_mask is not False: __UpperCAmelCase : Tuple = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) __UpperCAmelCase : Dict = attention_mask return self.pad(UpperCamelCase , padding=UpperCamelCase , max_length=UpperCamelCase , return_tensors=UpperCamelCase ) def lowerCamelCase__ ( self : Any , UpperCamelCase : BatchEncoding , UpperCamelCase : DPRReaderOutput , UpperCamelCase : int = 16 , UpperCamelCase : int = 64 , UpperCamelCase : int = 4 , ): '''simple docstring''' __UpperCAmelCase : List[str] = reader_input["""input_ids"""] __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = reader_output[:3] __UpperCAmelCase : Optional[int] = len(UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = sorted(range(UpperCamelCase ) , reverse=UpperCamelCase , key=relevance_logits.__getitem__ ) __UpperCAmelCase : List[DPRReaderOutput] = [] for doc_id in sorted_docs: __UpperCAmelCase : str = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence __UpperCAmelCase : Tuple = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: __UpperCAmelCase : int = sequence_ids.index(self.pad_token_id ) else: __UpperCAmelCase : Union[str, Any] = len(UpperCamelCase ) __UpperCAmelCase : Any = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCamelCase , top_spans=UpperCamelCase , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCamelCase , start_index=UpperCamelCase , end_index=UpperCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(UpperCamelCase ) >= num_spans: break return nbest_spans_predictions[:num_spans] def lowerCamelCase__ ( self : Tuple , UpperCamelCase : List[int] , UpperCamelCase : List[int] , UpperCamelCase : int , UpperCamelCase : int , ): '''simple docstring''' __UpperCAmelCase : List[Any] = [] for start_index, start_score in enumerate(UpperCamelCase ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) __UpperCAmelCase : str = sorted(UpperCamelCase , key=lambda UpperCamelCase : x[1] , reverse=UpperCamelCase ) __UpperCAmelCase : Tuple = [] for (start_index, end_index), score in scores: if start_index > end_index: raise ValueError(f'''Wrong span indices: [{start_index}:{end_index}]''' ) __UpperCAmelCase : Dict = end_index - start_index + 1 if length > max_answer_length: raise ValueError(f'''Span is too long: {length} > {max_answer_length}''' ) if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(UpperCamelCase ) == top_spans: break return chosen_span_intervals @add_end_docstrings(A ) class lowerCamelCase__ ( A , A ): """simple docstring""" __a = VOCAB_FILES_NAMES __a = READER_PRETRAINED_VOCAB_FILES_MAP __a = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __a = READER_PRETRAINED_INIT_CONFIGURATION __a = ["""input_ids""", """attention_mask"""]
115
"""simple docstring""" from importlib import import_module from .logging import get_logger UpperCAmelCase : Any = get_logger(__name__) class lowerCamelCase__ : """simple docstring""" def __init__( self : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : int=None ): '''simple docstring''' __UpperCAmelCase : Tuple = attrs or [] if module is not None: for key in module.__dict__: if key in attrs or not key.startswith("""__""" ): setattr(self , UpperCamelCase , getattr(UpperCamelCase , UpperCamelCase ) ) __UpperCAmelCase : Any = module._original_module if isinstance(UpperCamelCase , _PatchedModuleObj ) else module class lowerCamelCase__ : """simple docstring""" __a = [] def __init__( self : str , UpperCamelCase : Tuple , UpperCamelCase : str , UpperCamelCase : List[str] , UpperCamelCase : Optional[Any]=None ): '''simple docstring''' __UpperCAmelCase : int = obj __UpperCAmelCase : Union[str, Any] = target __UpperCAmelCase : List[str] = new __UpperCAmelCase : Optional[int] = target.split(""".""" )[0] __UpperCAmelCase : Tuple = {} __UpperCAmelCase : Union[str, Any] = attrs or [] def __enter__( self : Dict ): '''simple docstring''' *__UpperCAmelCase ,__UpperCAmelCase : str = self.target.split(""".""" ) # Patch modules: # it's used to patch attributes of submodules like "os.path.join"; # in this case we need to patch "os" and "os.path" for i in range(len(UpperCamelCase ) ): try: __UpperCAmelCase : List[Any] = import_module(""".""".join(submodules[: i + 1] ) ) except ModuleNotFoundError: continue # We iterate over all the globals in self.obj in case we find "os" or "os.path" for attr in self.obj.__dir__(): __UpperCAmelCase : List[Any] = getattr(self.obj , UpperCamelCase ) # We don't check for the name of the global, but rather if its value *is* "os" or "os.path". # This allows to patch renamed modules like "from os import path as ospath". if obj_attr is submodule or ( (isinstance(UpperCamelCase , _PatchedModuleObj ) and obj_attr._original_module is submodule) ): __UpperCAmelCase : Tuple = obj_attr # patch at top level setattr(self.obj , UpperCamelCase , _PatchedModuleObj(UpperCamelCase , attrs=self.attrs ) ) __UpperCAmelCase : int = getattr(self.obj , UpperCamelCase ) # construct lower levels patches for key in submodules[i + 1 :]: setattr(UpperCamelCase , UpperCamelCase , _PatchedModuleObj(getattr(UpperCamelCase , UpperCamelCase , UpperCamelCase ) , attrs=self.attrs ) ) __UpperCAmelCase : Optional[int] = getattr(UpperCamelCase , UpperCamelCase ) # finally set the target attribute setattr(UpperCamelCase , UpperCamelCase , self.new ) # Patch attribute itself: # it's used for builtins like "open", # and also to patch "os.path.join" we may also need to patch "join" # itself if it was imported as "from os.path import join". if submodules: # if it's an attribute of a submodule like "os.path.join" try: __UpperCAmelCase : int = getattr(import_module(""".""".join(UpperCamelCase ) ) , UpperCamelCase ) except (AttributeError, ModuleNotFoundError): return # We iterate over all the globals in self.obj in case we find "os.path.join" for attr in self.obj.__dir__(): # We don't check for the name of the global, but rather if its value *is* "os.path.join". # This allows to patch renamed attributes like "from os.path import join as pjoin". if getattr(self.obj , UpperCamelCase ) is attr_value: __UpperCAmelCase : Union[str, Any] = getattr(self.obj , UpperCamelCase ) setattr(self.obj , UpperCamelCase , self.new ) elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open" __UpperCAmelCase : str = globals()["""__builtins__"""][target_attr] setattr(self.obj , UpperCamelCase , self.new ) else: raise RuntimeError(f'''Tried to patch attribute {target_attr} instead of a submodule.''' ) def __exit__( self : str , *UpperCamelCase : Optional[int] ): '''simple docstring''' for attr in list(self.original ): setattr(self.obj , UpperCamelCase , self.original.pop(UpperCamelCase ) ) def lowerCamelCase__ ( self : int ): '''simple docstring''' self.__enter__() self._active_patches.append(self ) def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' try: self._active_patches.remove(self ) except ValueError: # If the patch hasn't been started this will fail return None return self.__exit__()
115
1
'''simple docstring''' import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.local_sgd import LocalSGD ######################################################################## # This is a fully working simple example to use Accelerate # with LocalSGD, which is a method to synchronize model # parameters every K batches. It is different, but complementary # to gradient accumulation. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## UpperCamelCase__ : List[str] = 16 UpperCamelCase__ : Tuple = 32 def UpperCAmelCase ( a_ , a_ = 1_6 ) -> str: """simple docstring""" A_ : str = AutoTokenizer.from_pretrained("""bert-base-cased""" ) A_ : Union[str, Any] = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(a_ ): # max_length=None => use the model max length (it's actually the default) A_ : List[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=a_ , max_length=a_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): A_ : Tuple = datasets.map( a_ , batched=a_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library A_ : int = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(a_ ): # On TPU it's best to pad everything to the same length or training will be very slow. A_ : List[str] = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": A_ : Optional[int] = 1_6 elif accelerator.mixed_precision != "no": A_ : Tuple = 8 else: A_ : Optional[Any] = None return tokenizer.pad( a_ , padding="""longest""" , max_length=a_ , pad_to_multiple_of=a_ , return_tensors="""pt""" , ) # Instantiate dataloaders. A_ : List[Any] = DataLoader( tokenized_datasets["""train"""] , shuffle=a_ , collate_fn=a_ , batch_size=a_ ) A_ : List[str] = DataLoader( tokenized_datasets["""validation"""] , shuffle=a_ , collate_fn=a_ , batch_size=a_ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1": from accelerate.test_utils.training import mocked_dataloaders UpperCamelCase__ : Any = mocked_dataloaders # noqa: F811 def UpperCAmelCase ( a_ , a_ ) -> Optional[Any]: """simple docstring""" if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , a_ ) == "1": A_ : Optional[int] = 2 # New Code # A_ : Any = int(args.gradient_accumulation_steps ) A_ : List[str] = int(args.local_sgd_steps ) # Initialize accelerator A_ : int = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=a_ ) if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]: raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs A_ : Any = config["""lr"""] A_ : Tuple = int(config["""num_epochs"""] ) A_ : Union[str, Any] = int(config["""seed"""] ) A_ : Dict = int(config["""batch_size"""] ) A_ : Optional[int] = evaluate.load("""glue""" , """mrpc""" ) set_seed(a_ ) A_ : Optional[Any] = get_dataloaders(a_ , a_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) A_ : str = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=a_ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). A_ : Dict = model.to(accelerator.device ) # Instantiate optimizer A_ : int = AdamW(params=model.parameters() , lr=a_ ) # Instantiate scheduler A_ : List[str] = get_linear_schedule_with_warmup( optimizer=a_ , num_warmup_steps=1_0_0 , num_training_steps=(len(a_ ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. A_ : Union[str, Any] = accelerator.prepare( a_ , a_ , a_ , a_ , a_ ) # Now we train the model for epoch in range(a_ ): model.train() with LocalSGD( accelerator=a_ , model=a_ , local_sgd_steps=a_ , enabled=local_sgd_steps is not None ) as local_sgd: for step, batch in enumerate(a_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(a_ ): A_ : Any = model(**a_ ) A_ : int = output.loss accelerator.backward(a_ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # LocalSGD-specific line local_sgd.step() model.eval() for step, batch in enumerate(a_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): A_ : int = model(**a_ ) A_ : List[str] = outputs.logits.argmax(dim=-1 ) A_ : int = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=a_ , references=a_ , ) A_ : Dict = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"epoch {epoch}:" , a_ ) def UpperCAmelCase ( ) -> Any: """simple docstring""" A_ : Any = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=a_ , default=a_ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) # New Code # parser.add_argument( """--gradient_accumulation_steps""" , type=a_ , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , ) parser.add_argument( """--local_sgd_steps""" , type=a_ , default=8 , help="""Number of local SGD steps or None to disable local SGD""" ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) A_ : List[str] = parser.parse_args() A_ : List[Any] = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6} training_function(a_ , a_ ) if __name__ == "__main__": main()
371
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase__ : Any = { 'configuration_luke': ['LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LukeConfig'], 'tokenization_luke': ['LukeTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : int = [ 'LUKE_PRETRAINED_MODEL_ARCHIVE_LIST', 'LukeForEntityClassification', 'LukeForEntityPairClassification', 'LukeForEntitySpanClassification', 'LukeForMultipleChoice', 'LukeForQuestionAnswering', 'LukeForSequenceClassification', 'LukeForTokenClassification', 'LukeForMaskedLM', 'LukeModel', 'LukePreTrainedModel', ] if TYPE_CHECKING: from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig from .tokenization_luke import LukeTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_luke import ( LUKE_PRETRAINED_MODEL_ARCHIVE_LIST, LukeForEntityClassification, LukeForEntityPairClassification, LukeForEntitySpanClassification, LukeForMaskedLM, LukeForMultipleChoice, LukeForQuestionAnswering, LukeForSequenceClassification, LukeForTokenClassification, LukeModel, LukePreTrainedModel, ) else: import sys UpperCamelCase__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
164
0
import inspect import unittest from transformers import DecisionTransformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import DecisionTransformerModel from transformers.models.decision_transformer.modeling_decision_transformer import ( DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) class _SCREAMING_SNAKE_CASE : def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=6 , lowercase=17 , lowercase=23 , lowercase=11 , lowercase=True , ) -> int: lowerCamelCase_ = parent lowerCamelCase_ = batch_size lowerCamelCase_ = seq_length lowerCamelCase_ = act_dim lowerCamelCase_ = state_dim lowerCamelCase_ = hidden_size lowerCamelCase_ = max_length lowerCamelCase_ = is_training def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]: lowerCamelCase_ = floats_tensor((self.batch_size, self.seq_length, self.state_dim) ) lowerCamelCase_ = floats_tensor((self.batch_size, self.seq_length, self.act_dim) ) lowerCamelCase_ = floats_tensor((self.batch_size, self.seq_length, 1) ) lowerCamelCase_ = floats_tensor((self.batch_size, self.seq_length, 1) ) lowerCamelCase_ = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000 ) lowerCamelCase_ = random_attention_mask((self.batch_size, self.seq_length) ) lowerCamelCase_ = self.get_config() return ( config, states, actions, rewards, returns_to_go, timesteps, attention_mask, ) def SCREAMING_SNAKE_CASE_( self ) -> Any: return DecisionTransformerConfig( batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , ) def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> List[str]: lowerCamelCase_ = DecisionTransformerModel(config=lowercase ) model.to(lowercase ) model.eval() lowerCamelCase_ = model(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) self.parent.assertEqual(result.state_preds.shape , states.shape ) self.parent.assertEqual(result.action_preds.shape , actions.shape ) self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]: lowerCamelCase_ = self.prepare_config_and_inputs() ( ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ) = config_and_inputs lowerCamelCase_ = { "states": states, "actions": actions, "rewards": rewards, "returns_to_go": returns_to_go, "timesteps": timesteps, "attention_mask": attention_mask, } return config, inputs_dict @require_torch class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ): lowerCAmelCase__ = (DecisionTransformerModel,) if is_torch_available() else () lowerCAmelCase__ = () lowerCAmelCase__ = {'feature-extraction': DecisionTransformerModel} if is_torch_available() else {} # Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids lowerCAmelCase__ = False # Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]: lowerCamelCase_ = DecisionTransformerModelTester(self ) lowerCamelCase_ = ConfigTester(self , config_class=lowercase , hidden_size=37 ) def SCREAMING_SNAKE_CASE_( self ) -> Any: self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]: lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase ) @slow def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]: for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = DecisionTransformerModel.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) def SCREAMING_SNAKE_CASE_( self ) -> Any: lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ = model_class(lowercase ) lowerCamelCase_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase_ = [*signature.parameters.keys()] lowerCamelCase_ = [ "states", "actions", "rewards", "returns_to_go", "timesteps", "attention_mask", ] self.assertListEqual(arg_names[: len(lowercase )] , lowercase ) @require_torch class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE_( self ) -> List[Any]: lowerCamelCase_ = 2 # number of steps of autoregressive prediction we will perform lowerCamelCase_ = 10 # defined by the RL environment, may be normalized lowerCamelCase_ = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" ) lowerCamelCase_ = model.to(lowercase ) lowerCamelCase_ = model.config torch.manual_seed(0 ) lowerCamelCase_ = torch.randn(1 , 1 , config.state_dim ).to(device=lowercase , dtype=torch.floataa ) # env.reset() lowerCamelCase_ = torch.tensor( [[0.2_4_2_7_9_3, -0.2_8_6_9_3_0_7_4, 0.8_7_4_2_6_1_3], [0.6_7_8_1_5_2_7_4, -0.0_8_1_0_1_0_8_5, -0.1_2_9_5_2_1_4_7]] , device=lowercase ) lowerCamelCase_ = torch.tensor(lowercase , device=lowercase , dtype=torch.floataa ).reshape(1 , 1 , 1 ) lowerCamelCase_ = state lowerCamelCase_ = torch.zeros(1 , 0 , config.act_dim , device=lowercase , dtype=torch.floataa ) lowerCamelCase_ = torch.zeros(1 , 0 , device=lowercase , dtype=torch.floataa ) lowerCamelCase_ = torch.tensor(0 , device=lowercase , dtype=torch.long ).reshape(1 , 1 ) for step in range(lowercase ): lowerCamelCase_ = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=lowercase )] , dim=1 ) lowerCamelCase_ = torch.cat([rewards, torch.zeros(1 , 1 , device=lowercase )] , dim=1 ) lowerCamelCase_ = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device ) with torch.no_grad(): lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = model( states=lowercase , actions=lowercase , rewards=lowercase , returns_to_go=lowercase , timesteps=lowercase , attention_mask=lowercase , return_dict=lowercase , ) self.assertEqual(action_pred.shape , actions.shape ) self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) ) lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = ( # env.step(action) torch.randn(1 , 1 , config.state_dim ).to(device=lowercase , dtype=torch.floataa ), 1.0, False, {}, ) lowerCamelCase_ = action_pred[0, -1] lowerCamelCase_ = torch.cat([states, state] , dim=1 ) lowerCamelCase_ = returns_to_go[0, -1] - reward lowerCamelCase_ = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 ) lowerCamelCase_ = torch.cat( [timesteps, torch.ones((1, 1) , device=lowercase , dtype=torch.long ) * (step + 1)] , dim=1 )
19
import math def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ): if 0 not in (x, y): # We use the relation x^y = y*log10(x), where 10 is the base. return y * math.logaa(lowerCamelCase__ ) else: if x == 0: # 0 raised to any number is 0 return 0 elif y == 0: return 1 # any number raised to 0 is 1 raise AssertionError("This should never happen" ) if __name__ == "__main__": # Main function # Read two numbers from input and typecast them to int using map function. # Here x is the base and y is the power. __A ='''Enter the base and the power separated by a comma: ''' __A, __A =map(int, input(prompt).split(''',''')) __A, __A =map(int, input(prompt).split(''',''')) # We find the log of each number, using the function res(), which takes two # arguments. __A =res(xa, ya) __A =res(xa, ya) # We check for the largest number if resa > resa: print('''Largest number is''', xa, '''^''', ya) elif resa > resa: print('''Largest number is''', xa, '''^''', ya) else: print('''Both are equal''')
19
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _snake_case : Tuple = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : Dict = ["XGLMTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : List[str] = ["XGLMTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : str = [ "XGLM_PRETRAINED_MODEL_ARCHIVE_LIST", "XGLMForCausalLM", "XGLMModel", "XGLMPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : List[Any] = [ "FlaxXGLMForCausalLM", "FlaxXGLMModel", "FlaxXGLMPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : int = [ "TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST", "TFXGLMForCausalLM", "TFXGLMModel", "TFXGLMPreTrainedModel", ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys _snake_case : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure)
134
import argparse import json import pickle from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() _snake_case : Union[str, Any] = logging.get_logger(__name__) def lowerCAmelCase_ ( __lowerCamelCase ): __snake_case : Dict = SwinConfig.from_pretrained( "microsoft/swin-tiny-patch4-window7-224" , out_features=["stage1", "stage2", "stage3", "stage4"] ) __snake_case : List[Any] = MaskFormerConfig(backbone_config=__lowerCamelCase ) __snake_case : List[Any] = "huggingface/label-files" if "ade20k-full" in model_name: # this should be ok __snake_case : Any = 8_4_7 __snake_case : List[Any] = "maskformer-ade20k-full-id2label.json" elif "ade" in model_name: # this should be ok __snake_case : Optional[int] = 1_5_0 __snake_case : int = "ade20k-id2label.json" elif "coco-stuff" in model_name: # this should be ok __snake_case : Optional[Any] = 1_7_1 __snake_case : List[str] = "maskformer-coco-stuff-id2label.json" elif "coco" in model_name: # TODO __snake_case : Optional[int] = 1_3_3 __snake_case : int = "coco-panoptic-id2label.json" elif "cityscapes" in model_name: # this should be ok __snake_case : Union[str, Any] = 1_9 __snake_case : Dict = "cityscapes-id2label.json" elif "vistas" in model_name: # this should be ok __snake_case : Any = 6_5 __snake_case : Any = "mapillary-vistas-id2label.json" __snake_case : str = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="dataset" ) , "r" ) ) __snake_case : Tuple = {int(__lowerCamelCase ): v for k, v in idalabel.items()} return config def lowerCAmelCase_ ( __lowerCamelCase ): __snake_case : Dict = [] # stem # fmt: off rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") ) rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") ) rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") ) rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') ) if i < 3: rename_keys.append((F'backbone.layers.{i}.downsample.reduction.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight') ) rename_keys.append((F'backbone.layers.{i}.downsample.norm.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight') ) rename_keys.append((F'backbone.layers.{i}.downsample.norm.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias') ) rename_keys.append((F'backbone.norm{i}.weight', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.weight') ) rename_keys.append((F'backbone.norm{i}.bias', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.bias') ) # FPN rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") ) rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") ) rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") ) for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ): rename_keys.append((F'sem_seg_head.adapter_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight') ) rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight') ) rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias') ) rename_keys.append((F'sem_seg_head.layer_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight') ) rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight') ) rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias') ) rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") ) rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") ) # Transformer decoder for idx in range(config.decoder_config.decoder_layers ): # self-attention out projection rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight') ) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias') ) # cross-attention out projection rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight') ) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias') ) # MLP 1 rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight', F'model.transformer_module.decoder.layers.{idx}.fc1.weight') ) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias', F'model.transformer_module.decoder.layers.{idx}.fc1.bias') ) # MLP 2 rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight', F'model.transformer_module.decoder.layers.{idx}.fc2.weight') ) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias', F'model.transformer_module.decoder.layers.{idx}.fc2.bias') ) # layernorm 1 (self-attention layernorm) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight') ) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias') ) # layernorm 2 (cross-attention layernorm) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight') ) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias') ) # layernorm 3 (final layernorm) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight') ) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias') ) rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") ) rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") ) # heads on top rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") ) rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") ) rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") ) rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") ) rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") ) for i in range(3 ): rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.weight', F'mask_embedder.{i}.0.weight') ) rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.bias', F'mask_embedder.{i}.0.bias') ) # fmt: on return rename_keys def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): __snake_case : Dict = dct.pop(__lowerCamelCase ) __snake_case : Any = val def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ): __snake_case : List[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): __snake_case : Optional[int] = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) __snake_case : Tuple = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.weight' ) __snake_case : Tuple = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict __snake_case : Tuple = in_proj_weight[:dim, :] __snake_case : Tuple = in_proj_bias[: dim] __snake_case : Union[str, Any] = in_proj_weight[ dim : dim * 2, : ] __snake_case : Tuple = in_proj_bias[ dim : dim * 2 ] __snake_case : str = in_proj_weight[ -dim :, : ] __snake_case : Any = in_proj_bias[-dim :] # fmt: on def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ): # fmt: off __snake_case : Optional[int] = config.decoder_config.hidden_size for idx in range(config.decoder_config.decoder_layers ): # read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias) __snake_case : List[str] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight' ) __snake_case : Union[str, Any] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict __snake_case : Any = in_proj_weight[: hidden_size, :] __snake_case : Optional[int] = in_proj_bias[:config.hidden_size] __snake_case : Any = in_proj_weight[hidden_size : hidden_size * 2, :] __snake_case : Any = in_proj_bias[hidden_size : hidden_size * 2] __snake_case : Tuple = in_proj_weight[-hidden_size :, :] __snake_case : Optional[Any] = in_proj_bias[-hidden_size :] # read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias) __snake_case : Optional[Any] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight' ) __snake_case : Union[str, Any] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict __snake_case : int = in_proj_weight[: hidden_size, :] __snake_case : Tuple = in_proj_bias[:config.hidden_size] __snake_case : str = in_proj_weight[hidden_size : hidden_size * 2, :] __snake_case : Optional[Any] = in_proj_bias[hidden_size : hidden_size * 2] __snake_case : Optional[Any] = in_proj_weight[-hidden_size :, :] __snake_case : Tuple = in_proj_bias[-hidden_size :] # fmt: on def lowerCAmelCase_ ( ): __snake_case : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg" __snake_case : List[str] = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw ) return im @torch.no_grad() def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = False ): __snake_case : Optional[int] = get_maskformer_config(__lowerCamelCase ) # load original state_dict with open(__lowerCamelCase , "rb" ) as f: __snake_case : int = pickle.load(__lowerCamelCase ) __snake_case : Optional[int] = data["model"] # for name, param in state_dict.items(): # print(name, param.shape) # rename keys __snake_case : Tuple = create_rename_keys(__lowerCamelCase ) for src, dest in rename_keys: rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) read_in_swin_q_k_v(__lowerCamelCase , config.backbone_config ) read_in_decoder_q_k_v(__lowerCamelCase , __lowerCamelCase ) # update to torch tensors for key, value in state_dict.items(): __snake_case : int = torch.from_numpy(__lowerCamelCase ) # load 🤗 model __snake_case : List[str] = MaskFormerForInstanceSegmentation(__lowerCamelCase ) model.eval() for name, param in model.named_parameters(): print(__lowerCamelCase , param.shape ) __snake_case , __snake_case : List[str] = model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase ) assert missing_keys == [ "model.pixel_level_module.encoder.model.layernorm.weight", "model.pixel_level_module.encoder.model.layernorm.bias", ] assert len(__lowerCamelCase ) == 0, F'Unexpected keys: {unexpected_keys}' # verify results __snake_case : Union[str, Any] = prepare_img() if "vistas" in model_name: __snake_case : Optional[int] = 6_5 elif "cityscapes" in model_name: __snake_case : Optional[int] = 6_5_5_3_5 else: __snake_case : Union[str, Any] = 2_5_5 __snake_case : Union[str, Any] = True if "ade" in model_name else False __snake_case : str = MaskFormerImageProcessor(ignore_index=__lowerCamelCase , reduce_labels=__lowerCamelCase ) __snake_case : List[str] = image_processor(__lowerCamelCase , return_tensors="pt" ) __snake_case : Tuple = model(**__lowerCamelCase ) print("Logits:" , outputs.class_queries_logits[0, :3, :3] ) if model_name == "maskformer-swin-tiny-ade": __snake_case : Optional[Any] = torch.tensor( [[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] ) assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCamelCase , atol=1e-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(F'Saving model and image processor to {pytorch_dump_folder_path}' ) Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase ) model.save_pretrained(__lowerCamelCase ) image_processor.save_pretrained(__lowerCamelCase ) if push_to_hub: print("Pushing model and image processor to the hub..." ) model.push_to_hub(F'nielsr/{model_name}' ) image_processor.push_to_hub(F'nielsr/{model_name}' ) if __name__ == "__main__": _snake_case : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="maskformer-swin-tiny-ade", type=str, help=("Name of the MaskFormer model you'd like to convert",), ) parser.add_argument( "--checkpoint_path", default="/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl", type=str, help="Path to the original state dict (.pth file).", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) _snake_case : List[str] = parser.parse_args() convert_maskformer_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
134
1
import os from shutil import copyfile from typing import List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase : int = logging.get_logger(__name__) lowerCAmelCase : Tuple = {"""vocab_file""": """sentencepiece.model"""} lowerCAmelCase : int = { """vocab_file""": { """google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""", }, } lowerCAmelCase : Any = { """google/rembert""": 256, } class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Optional[Any] = VOCAB_FILES_NAMES _UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : List[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int]=False , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : List[str]="[CLS]" , lowerCAmelCase__ : Tuple="[SEP]" , lowerCAmelCase__ : List[str]="[UNK]" , lowerCAmelCase__ : Union[str, Any]="[SEP]" , lowerCAmelCase__ : List[Any]="[PAD]" , lowerCAmelCase__ : int="[CLS]" , lowerCAmelCase__ : Optional[Any]="[MASK]" , **lowerCAmelCase__ : Tuple , ): super().__init__( do_lower_case=lowerCAmelCase__ , remove_space=lowerCAmelCase__ , keep_accents=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , **lowerCAmelCase__ , ) SCREAMING_SNAKE_CASE_: Optional[int] = do_lower_case SCREAMING_SNAKE_CASE_: Any = remove_space SCREAMING_SNAKE_CASE_: Tuple = keep_accents SCREAMING_SNAKE_CASE_: int = vocab_file SCREAMING_SNAKE_CASE_: List[Any] = spm.SentencePieceProcessor() self.sp_model.Load(lowerCAmelCase__) @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): return len(self.sp_model) def _SCREAMING_SNAKE_CASE ( self : Dict): SCREAMING_SNAKE_CASE_: Union[str, Any] = {self.convert_ids_to_tokens(lowerCAmelCase__): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__( self : int): SCREAMING_SNAKE_CASE_: int = self.__dict__.copy() SCREAMING_SNAKE_CASE_: Any = None return state def __setstate__( self : str , lowerCAmelCase__ : Any): SCREAMING_SNAKE_CASE_: Union[str, Any] = d SCREAMING_SNAKE_CASE_: Dict = spm.SentencePieceProcessor() self.sp_model.Load(self.vocab_file) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str]=False): SCREAMING_SNAKE_CASE_: Union[str, Any] = self.sp_model.EncodeAsPieces(lowerCAmelCase__) return pieces def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Any): return self.sp_model.PieceToId(lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Tuple): return self.sp_model.IdToPiece(lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : Optional[int]): SCREAMING_SNAKE_CASE_: List[str] = self.sp_model.decode_pieces(lowerCAmelCase__) return out_string def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None): SCREAMING_SNAKE_CASE_: Dict = [self.sep_token_id] SCREAMING_SNAKE_CASE_: str = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False): if already_has_special_tokens: if token_ids_a is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model.") return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(lowerCAmelCase__)) + [1] + ([0] * len(lowerCAmelCase__)) + [1] return [1] + ([0] * len(lowerCAmelCase__)) + [1] def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None): SCREAMING_SNAKE_CASE_: Any = [self.sep_token_id] SCREAMING_SNAKE_CASE_: str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None): if not os.path.isdir(lowerCAmelCase__): logger.error("Vocabulary path ({}) should be a directory".format(lowerCAmelCase__)) return SCREAMING_SNAKE_CASE_: Any = os.path.join( lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase__): copyfile(self.vocab_file , lowerCAmelCase__) return (out_vocab_file,)
13
_lowerCAmelCase : Optional[int] = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n" _lowerCAmelCase : Tuple = [{"type": "code", "content": INSTALL_CONTENT}] _lowerCAmelCase : Optional[int] = { "{processor_class}": "FakeProcessorClass", "{model_class}": "FakeModelClass", "{object_class}": "FakeObjectClass", }
218
0
'''simple docstring''' from __future__ import annotations import copy import inspect import json import math import os import tempfile import unittest from importlib import import_module import numpy as np from transformers import ViTMAEConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTMAEForPreTraining, TFViTMAEModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class UpperCamelCase_ : """simple docstring""" def __init__( self : Union[str, Any] , _lowerCamelCase : Tuple , _lowerCamelCase : str=13 , _lowerCamelCase : Tuple=30 , _lowerCamelCase : List[Any]=2 , _lowerCamelCase : Any=3 , _lowerCamelCase : int=True , _lowerCamelCase : List[Any]=True , _lowerCamelCase : int=32 , _lowerCamelCase : int=2 , _lowerCamelCase : int=4 , _lowerCamelCase : List[str]=37 , _lowerCamelCase : Optional[Any]="gelu" , _lowerCamelCase : List[str]=0.1 , _lowerCamelCase : Tuple=0.1 , _lowerCamelCase : Tuple=10 , _lowerCamelCase : Optional[Any]=0.02 , _lowerCamelCase : List[str]=3 , _lowerCamelCase : Optional[Any]=0.6 , _lowerCamelCase : Union[str, Any]=None , ): """simple docstring""" A_ : Optional[int] = parent A_ : Any = batch_size A_ : Any = image_size A_ : Union[str, Any] = patch_size A_ : Tuple = num_channels A_ : Tuple = is_training A_ : Union[str, Any] = use_labels A_ : Optional[int] = hidden_size A_ : Tuple = num_hidden_layers A_ : str = num_attention_heads A_ : Dict = intermediate_size A_ : List[Any] = hidden_act A_ : Union[str, Any] = hidden_dropout_prob A_ : Any = attention_probs_dropout_prob A_ : Tuple = type_sequence_label_size A_ : Optional[int] = initializer_range A_ : List[Any] = mask_ratio A_ : Dict = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) A_ : List[str] = (image_size // patch_size) ** 2 A_ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def _a ( self : str ): """simple docstring""" A_ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A_ : List[Any] = None if self.use_labels: A_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A_ : Union[str, Any] = self.get_config() return config, pixel_values, labels def _a ( self : Optional[int] ): """simple docstring""" return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def _a ( self : Optional[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple ): """simple docstring""" A_ : Union[str, Any] = TFViTMAEModel(config=_lowerCamelCase ) A_ : List[Any] = model(_lowerCamelCase , training=_lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _a ( self : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any] ): """simple docstring""" A_ : Any = TFViTMAEForPreTraining(_lowerCamelCase ) A_ : List[str] = model(_lowerCamelCase , training=_lowerCamelCase ) # expected sequence length = num_patches A_ : int = (self.image_size // self.patch_size) ** 2 A_ : int = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images A_ : str = 1 A_ : Dict = TFViTMAEForPreTraining(_lowerCamelCase ) A_ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A_ : Optional[Any] = model(_lowerCamelCase , training=_lowerCamelCase ) A_ : Tuple = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def _a ( self : int ): """simple docstring""" A_ : Tuple = self.prepare_config_and_inputs() (A_) : List[str] = config_and_inputs A_ : Tuple = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class UpperCamelCase_ (a__, a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else () _lowerCAmelCase = {'feature-extraction': TFViTMAEModel} if is_tf_available() else {} _lowerCAmelCase = False _lowerCAmelCase = False _lowerCAmelCase = False _lowerCAmelCase = False def _a ( self : Dict ): """simple docstring""" A_ : List[str] = TFViTMAEModelTester(self ) A_ : Optional[Any] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 ) def _a ( self : Tuple ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='''ViTMAE does not use inputs_embeds''' ) def _a ( self : Any ): """simple docstring""" pass def _a ( self : Optional[Any] ): """simple docstring""" A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : Tuple = model_class(_lowerCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) A_ : Union[str, Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_lowerCamelCase , tf.keras.layers.Layer ) ) def _a ( self : Any ): """simple docstring""" A_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : int = model_class(_lowerCamelCase ) A_ : Any = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A_ : List[Any] = [*signature.parameters.keys()] A_ : int = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _lowerCamelCase ) def _a ( self : str ): """simple docstring""" A_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCamelCase ) def _a ( self : Any ): """simple docstring""" A_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*_lowerCamelCase ) def _a ( self : Union[str, Any] ): """simple docstring""" np.random.seed(2 ) A_ : str = self.model_tester.prepare_config_and_inputs_for_common() A_ : Any = int((config.image_size // config.patch_size) ** 2 ) A_ : Any = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: A_ : List[str] = model_class(_lowerCamelCase ) A_ : Tuple = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) A_ : Tuple = model(_lowerCamelCase , noise=_lowerCamelCase ) A_ : List[str] = copy.deepcopy(self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) ) A_ : Any = model(**_lowerCamelCase , noise=_lowerCamelCase ) A_ : Optional[Any] = outputs_dict[0].numpy() A_ : Optional[Any] = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 ) def _a ( self : List[Any] ): """simple docstring""" np.random.seed(2 ) A_ : str = self.model_tester.prepare_config_and_inputs_for_common() A_ : str = int((config.image_size // config.patch_size) ** 2 ) A_ : Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) def prepare_numpy_arrays(_lowerCamelCase : Optional[Any] ): A_ : Any = {} for k, v in inputs_dict.items(): if tf.is_tensor(_lowerCamelCase ): A_ : Dict = v.numpy() else: A_ : Union[str, Any] = np.array(_lowerCamelCase ) return inputs_np_dict for model_class in self.all_model_classes: A_ : List[str] = model_class(_lowerCamelCase ) A_ : List[Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) A_ : int = prepare_numpy_arrays(_lowerCamelCase ) A_ : str = model(_lowerCamelCase , noise=_lowerCamelCase ) A_ : Union[str, Any] = model(**_lowerCamelCase , noise=_lowerCamelCase ) self.assert_outputs_same(_lowerCamelCase , _lowerCamelCase ) def _a ( self : List[str] , _lowerCamelCase : Any , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] ): """simple docstring""" np.random.seed(2 ) A_ : List[Any] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 ) A_ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) A_ : List[Any] = tf.constant(_lowerCamelCase ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument A_ : str = tf_noise super().check_pt_tf_models(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) def _a ( self : Dict ): """simple docstring""" np.random.seed(2 ) A_ : int = self.model_tester.prepare_config_and_inputs_for_common() A_ : Dict = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__ ),) for module_member_name in dir(_lowerCamelCase ) if module_member_name.endswith('''MainLayer''' ) # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )] for module_member in (getattr(_lowerCamelCase , _lowerCamelCase ),) if isinstance(_lowerCamelCase , _lowerCamelCase ) and tf.keras.layers.Layer in module_member.__bases__ and getattr(_lowerCamelCase , '''_keras_serializable''' , _lowerCamelCase ) } A_ : List[Any] = int((config.image_size // config.patch_size) ** 2 ) A_ : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) A_ : Any = tf.convert_to_tensor(_lowerCamelCase ) inputs_dict.update({'''noise''': noise} ) for main_layer_class in tf_main_layer_classes: A_ : Optional[Any] = main_layer_class(_lowerCamelCase ) A_ : List[Any] = { name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items() } A_ : Tuple = tf.keras.Model(_lowerCamelCase , outputs=main_layer(_lowerCamelCase ) ) A_ : Dict = model(_lowerCamelCase ) with tempfile.TemporaryDirectory() as tmpdirname: A_ : List[Any] = os.path.join(_lowerCamelCase , '''keras_model.h5''' ) model.save(_lowerCamelCase ) A_ : Any = tf.keras.models.load_model( _lowerCamelCase , custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(_lowerCamelCase , tf.keras.Model ) A_ : Optional[int] = model(_lowerCamelCase ) self.assert_outputs_same(_lowerCamelCase , _lowerCamelCase ) @slow def _a ( self : str ): """simple docstring""" np.random.seed(2 ) A_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() A_ : Union[str, Any] = int((config.image_size // config.patch_size) ** 2 ) A_ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: A_ : Any = model_class(_lowerCamelCase ) A_ : List[Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) A_ : Optional[Any] = model(_lowerCamelCase , noise=_lowerCamelCase ) if model_class.__name__ == "TFViTMAEModel": A_ : Dict = outputs.last_hidden_state.numpy() A_ : List[Any] = 0 else: A_ : Any = outputs.logits.numpy() A_ : List[Any] = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_lowerCamelCase , saved_model=_lowerCamelCase ) A_ : List[Any] = model_class.from_pretrained(_lowerCamelCase ) A_ : Any = model(_lowerCamelCase , noise=_lowerCamelCase ) if model_class.__name__ == "TFViTMAEModel": A_ : List[Any] = after_outputs['''last_hidden_state'''].numpy() A_ : Optional[Any] = 0 else: A_ : Dict = after_outputs['''logits'''].numpy() A_ : Dict = 0 A_ : List[Any] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_lowerCamelCase , 1E-5 ) def _a ( self : Optional[Any] ): """simple docstring""" np.random.seed(2 ) A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() A_ : int = int((config.image_size // config.patch_size) ** 2 ) A_ : Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: A_ : List[Any] = model_class(_lowerCamelCase ) A_ : str = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) A_ : str = model(_lowerCamelCase , noise=_lowerCamelCase ) A_ : Optional[Any] = model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(_lowerCamelCase ) A_ : Optional[int] = model_class.from_config(model.get_config() ) # make sure it also accepts a normal config A_ : Any = model_class.from_config(model.config ) A_ : int = new_model(_lowerCamelCase ) # Build model new_model.set_weights(model.get_weights() ) A_ : Dict = new_model(_lowerCamelCase , noise=_lowerCamelCase ) self.assert_outputs_same(_lowerCamelCase , _lowerCamelCase ) @unittest.skip( reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.''' ) def _a ( self : List[str] ): """simple docstring""" pass @unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' ) def _a ( self : List[Any] ): """simple docstring""" pass @slow def _a ( self : Optional[Any] ): """simple docstring""" A_ : Tuple = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' ) self.assertIsNotNone(_lowerCamelCase ) def snake_case__ ( ) -> List[str]: A_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" @cached_property def _a ( self : int ): """simple docstring""" return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None @slow def _a ( self : Optional[Any] ): """simple docstring""" np.random.seed(2 ) A_ : List[str] = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' ) A_ : str = self.default_image_processor A_ : Dict = prepare_img() A_ : Union[str, Any] = image_processor(images=_lowerCamelCase , return_tensors='''tf''' ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) A_ : Tuple = ViTMAEConfig() A_ : List[str] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) A_ : Optional[Any] = np.random.uniform(size=(1, num_patches) ) # forward pass A_ : Union[str, Any] = model(**_lowerCamelCase , noise=_lowerCamelCase ) # verify the logits A_ : Dict = tf.convert_to_tensor([1, 196, 768] ) self.assertEqual(outputs.logits.shape , _lowerCamelCase ) A_ : Dict = tf.convert_to_tensor( [[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] ) tf.debugging.assert_near(outputs.logits[0, :3, :3] , _lowerCamelCase , atol=1E-4 )
359
'''simple docstring''' import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 snake_case__ = sys.version_info >= (3, 10) def snake_case__ ( lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : str=None ) -> List[Any]: return field(default_factory=lambda: default , metadata=lowerCamelCase__ ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = 42 _lowerCAmelCase = 42 _lowerCAmelCase = 42 _lowerCAmelCase = 42 @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = 4_2 _lowerCAmelCase = field(default='toto', metadata={'help': 'help message'} ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = False _lowerCAmelCase = True _lowerCAmelCase = None class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'titi' _lowerCAmelCase = 'toto' class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'titi' _lowerCAmelCase = 'toto' _lowerCAmelCase = 4_2 @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = "toto" def _a ( self : Optional[Any] ): """simple docstring""" A_ : Optional[int] = BasicEnum(self.foo ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = "toto" def _a ( self : Tuple ): """simple docstring""" A_ : Optional[Any] = MixedTypeEnum(self.foo ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = None _lowerCAmelCase = field(default=a__, metadata={'help': 'help message'} ) _lowerCAmelCase = None _lowerCAmelCase = list_field(default=[] ) _lowerCAmelCase = list_field(default=[] ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = list_field(default=[] ) _lowerCAmelCase = list_field(default=[1, 2, 3] ) _lowerCAmelCase = list_field(default=['Hallo', 'Bonjour', 'Hello'] ) _lowerCAmelCase = list_field(default=[0.1, 0.2, 0.3] ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = field() _lowerCAmelCase = field() _lowerCAmelCase = field() def _a ( self : Tuple ): """simple docstring""" A_ : Tuple = BasicEnum(self.required_enum ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = 42 _lowerCAmelCase = field() _lowerCAmelCase = None _lowerCAmelCase = field(default='toto', metadata={'help': 'help message'} ) _lowerCAmelCase = list_field(default=['Hallo', 'Bonjour', 'Hello'] ) if is_python_no_less_than_3_10: @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = False _lowerCAmelCase = True _lowerCAmelCase = None @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = None _lowerCAmelCase = field(default=a__, metadata={'help': 'help message'} ) _lowerCAmelCase = None _lowerCAmelCase = list_field(default=[] ) _lowerCAmelCase = list_field(default=[] ) class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : List[str] , _lowerCamelCase : argparse.ArgumentParser , _lowerCamelCase : argparse.ArgumentParser ): """simple docstring""" self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): A_ : Union[str, Any] = {k: v for k, v in vars(_lowerCamelCase ).items() if k != '''container'''} A_ : Optional[Any] = {k: v for k, v in vars(_lowerCamelCase ).items() if k != '''container'''} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get('''choices''' , _lowerCamelCase ) and yy.get('''choices''' , _lowerCamelCase ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx['''type'''](_lowerCamelCase ) , yy['''type'''](_lowerCamelCase ) ) del xx["type"], yy["type"] self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Optional[int] ): """simple docstring""" A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase ) A_ : Optional[Any] = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument('''--bar''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument('''--baz''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument('''--flag''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : Union[str, Any] = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5'''] ((A_) ,) : List[str] = parser.parse_args_into_dataclasses(_lowerCamelCase , look_for_args_file=_lowerCamelCase ) self.assertFalse(example.flag ) def _a ( self : Dict ): """simple docstring""" A_ : int = HfArgumentParser(_lowerCamelCase ) A_ : int = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=42 , type=_lowerCamelCase ) expected.add_argument('''--baz''' , default='''toto''' , type=_lowerCamelCase , help='''help message''' ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Dict ): """simple docstring""" A_ : Any = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' ) expected.add_argument('''--baz''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument('''--no_baz''' , action='''store_false''' , default=_lowerCamelCase , dest='''baz''' ) expected.add_argument('''--opt''' , type=_lowerCamelCase , default=_lowerCamelCase ) A_ : Dict = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(_lowerCamelCase ) for dataclass_type in dataclass_types: A_ : Any = HfArgumentParser(_lowerCamelCase ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : List[Any] = parser.parse_args([] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) ) A_ : Optional[int] = parser.parse_args(['''--foo''', '''--no_baz'''] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) ) A_ : Union[str, Any] = parser.parse_args(['''--foo''', '''--baz'''] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) ) A_ : List[str] = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) ) A_ : List[Any] = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) ) def _a ( self : List[Any] ): """simple docstring""" A_ : str = HfArgumentParser(_lowerCamelCase ) A_ : Optional[int] = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : str = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) A_ : List[Any] = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) A_ : int = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) A_ : Dict = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) A_ : Tuple = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 42 ) A_ : List[str] = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def _a ( self : Optional[int] ): """simple docstring""" @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = "toto" A_ : List[str] = HfArgumentParser(_lowerCamelCase ) A_ : Tuple = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : Tuple = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) A_ : List[str] = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) A_ : int = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 42 ) def _a ( self : Dict ): """simple docstring""" A_ : int = HfArgumentParser(_lowerCamelCase ) A_ : List[Any] = argparse.ArgumentParser() expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=_lowerCamelCase ) expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=_lowerCamelCase ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_lowerCamelCase ) expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=_lowerCamelCase ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : Optional[int] = parser.parse_args([] ) self.assertEqual( _lowerCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , ) A_ : str = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() ) self.assertEqual(_lowerCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) ) def _a ( self : Dict ): """simple docstring""" A_ : Optional[Any] = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=_lowerCamelCase , type=_lowerCamelCase ) expected.add_argument('''--bar''' , default=_lowerCamelCase , type=_lowerCamelCase , help='''help message''' ) expected.add_argument('''--baz''' , default=_lowerCamelCase , type=_lowerCamelCase ) expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=_lowerCamelCase ) expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=_lowerCamelCase ) A_ : Tuple = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(_lowerCamelCase ) for dataclass_type in dataclass_types: A_ : int = HfArgumentParser(_lowerCamelCase ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : List[Any] = parser.parse_args([] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , bar=_lowerCamelCase , baz=_lowerCamelCase , ces=[] , des=[] ) ) A_ : Optional[Any] = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() ) self.assertEqual(_lowerCamelCase , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) ) def _a ( self : List[Any] ): """simple docstring""" A_ : List[Any] = HfArgumentParser(_lowerCamelCase ) A_ : Dict = argparse.ArgumentParser() expected.add_argument('''--required_list''' , nargs='''+''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument('''--required_str''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_lowerCamelCase , ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase ) A_ : List[Any] = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_lowerCamelCase , ) expected.add_argument('''--opt''' , type=_lowerCamelCase , default=_lowerCamelCase ) expected.add_argument('''--baz''' , default='''toto''' , type=_lowerCamelCase , help='''help message''' ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_lowerCamelCase ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Tuple ): """simple docstring""" A_ : List[Any] = HfArgumentParser(_lowerCamelCase ) A_ : Union[str, Any] = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, } A_ : Optional[int] = parser.parse_dict(_lowerCamelCase )[0] A_ : str = BasicExample(**_lowerCamelCase ) self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : List[str] ): """simple docstring""" A_ : Any = HfArgumentParser(_lowerCamelCase ) A_ : List[str] = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, '''extra''': 42, } self.assertRaises(_lowerCamelCase , parser.parse_dict , _lowerCamelCase , allow_extra_keys=_lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase ) A_ : List[str] = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: A_ : Tuple = os.path.join(_lowerCamelCase , '''temp_json''' ) os.mkdir(_lowerCamelCase ) with open(temp_local_path + '''.json''' , '''w+''' ) as f: json.dump(_lowerCamelCase , _lowerCamelCase ) A_ : List[str] = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0] A_ : Optional[Any] = BasicExample(**_lowerCamelCase ) self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : int ): """simple docstring""" A_ : int = HfArgumentParser(_lowerCamelCase ) A_ : Tuple = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: A_ : int = os.path.join(_lowerCamelCase , '''temp_yaml''' ) os.mkdir(_lowerCamelCase ) with open(temp_local_path + '''.yaml''' , '''w+''' ) as f: yaml.dump(_lowerCamelCase , _lowerCamelCase ) A_ : Optional[Any] = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0] A_ : int = BasicExample(**_lowerCamelCase ) self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Union[str, Any] ): """simple docstring""" A_ : Dict = HfArgumentParser(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase )
4
0
from __future__ import annotations __snake_case : Optional[int] =8.988E9 # units = N * m^s * C^-2 def lowerCAmelCase__ ( lowerCamelCase_ : float ,lowerCamelCase_ : float ,lowerCamelCase_ : float ,lowerCamelCase_ : float): '''simple docstring''' lowerCAmelCase__ : int = abs(chargea * chargea) if (force, chargea, chargea, distance).count(0) != 1: raise ValueError('''One and only one argument must be 0''') if distance < 0: raise ValueError('''Distance cannot be negative''') if force == 0: lowerCAmelCase__ : str = COULOMBS_CONSTANT * charge_product / (distance**2) return {"force": force} elif chargea == 0: lowerCAmelCase__ : Optional[Any] = abs(lowerCamelCase_) * (distance**2) / (COULOMBS_CONSTANT * chargea) return {"charge1": chargea} elif chargea == 0: lowerCAmelCase__ : List[str] = abs(lowerCamelCase_) * (distance**2) / (COULOMBS_CONSTANT * chargea) return {"charge2": chargea} elif distance == 0: lowerCAmelCase__ : Optional[Any] = (COULOMBS_CONSTANT * charge_product / abs(lowerCamelCase_)) ** 0.5 return {"distance": distance} raise ValueError('''Exactly one argument must be 0''') if __name__ == "__main__": import doctest doctest.testmod()
129
from collections import UserDict from typing import Union import numpy as np import requests from ..utils import ( add_end_docstrings, logging, ) from .audio_classification import ffmpeg_read from .base import PIPELINE_INIT_ARGS, Pipeline __snake_case : Dict =logging.get_logger(__name__) @add_end_docstrings(lowerCamelCase__) class lowerCamelCase__ ( lowerCamelCase__): '''simple docstring''' def __init__(self ,**__lowerCamelCase ) -> Optional[Any]: """simple docstring""" super().__init__(**__lowerCamelCase ) if self.framework != "pt": raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" ) # No specific FOR_XXX available yet def __call__(self ,__lowerCamelCase ,**__lowerCamelCase ) -> List[Any]: """simple docstring""" return super().__call__(__lowerCamelCase ,**__lowerCamelCase ) def lowerCAmelCase__ (self ,**__lowerCamelCase ) -> Any: """simple docstring""" lowerCAmelCase__ : str = {} if "candidate_labels" in kwargs: lowerCAmelCase__ : List[str] = kwargs['''candidate_labels'''] if "hypothesis_template" in kwargs: lowerCAmelCase__ : int = kwargs['''hypothesis_template'''] return preprocess_params, {}, {} def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase=None ,__lowerCamelCase="This is a sound of {}." ) -> str: """simple docstring""" if isinstance(__lowerCamelCase ,__lowerCamelCase ): if audio.startswith('''http://''' ) or audio.startswith('''https://''' ): # We need to actually check for a real protocol, otherwise it's impossible to use a local file # like http_huggingface_co.png lowerCAmelCase__ : List[str] = requests.get(__lowerCamelCase ).content else: with open(__lowerCamelCase ,'''rb''' ) as f: lowerCAmelCase__ : int = f.read() if isinstance(__lowerCamelCase ,__lowerCamelCase ): lowerCAmelCase__ : Tuple = ffmpeg_read(__lowerCamelCase ,self.feature_extractor.sampling_rate ) if not isinstance(__lowerCamelCase ,np.ndarray ): raise ValueError('''We expect a numpy ndarray as input''' ) if len(audio.shape ) != 1: raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' ) lowerCAmelCase__ : Any = self.feature_extractor( [audio] ,sampling_rate=self.feature_extractor.sampling_rate ,return_tensors='''pt''' ) lowerCAmelCase__ : Union[str, Any] = candidate_labels lowerCAmelCase__ : str = [hypothesis_template.format(__lowerCamelCase ) for x in candidate_labels] lowerCAmelCase__ : Any = self.tokenizer(__lowerCamelCase ,return_tensors=self.framework ,padding=__lowerCamelCase ) lowerCAmelCase__ : List[Any] = [text_inputs] return inputs def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Dict: """simple docstring""" lowerCAmelCase__ : Union[str, Any] = model_inputs.pop('''candidate_labels''' ) lowerCAmelCase__ : List[str] = model_inputs.pop('''text_inputs''' ) if isinstance(text_inputs[0] ,__lowerCamelCase ): lowerCAmelCase__ : List[str] = text_inputs[0] else: # Batching case. lowerCAmelCase__ : List[str] = text_inputs[0][0] lowerCAmelCase__ : Union[str, Any] = self.model(**__lowerCamelCase ,**__lowerCamelCase ) lowerCAmelCase__ : Any = { '''candidate_labels''': candidate_labels, '''logits''': outputs.logits_per_audio, } return model_outputs def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Optional[int]: """simple docstring""" lowerCAmelCase__ : Optional[int] = model_outputs.pop('''candidate_labels''' ) lowerCAmelCase__ : Optional[Any] = model_outputs['''logits'''][0] if self.framework == "pt": lowerCAmelCase__ : str = logits.softmax(dim=0 ) lowerCAmelCase__ : Dict = probs.tolist() else: raise ValueError('''`tf` framework not supported.''' ) lowerCAmelCase__ : Any = [ {'''score''': score, '''label''': candidate_label} for score, candidate_label in sorted(zip(__lowerCamelCase ,__lowerCamelCase ) ,key=lambda __lowerCamelCase : -x[0] ) ] return result
129
1
"""simple docstring""" def __UpperCAmelCase ( UpperCAmelCase_ : int = 2_00_00_00 ) -> int: '''simple docstring''' __snake_case : int = [0 for i in range(n + 1 )] __snake_case : int = 1 __snake_case : str = 1 for i in range(2 , int(n**0.5 ) + 1 ): if primality_list[i] == 0: for j in range(i * i , n + 1 , UpperCAmelCase_ ): __snake_case : Optional[int] = 1 __snake_case : str = 0 for i in range(UpperCAmelCase_ ): if primality_list[i] == 0: sum_of_primes += i return sum_of_primes if __name__ == "__main__": print(f'''{solution() = }''')
366
"""simple docstring""" from argparse import ArgumentParser, Namespace from ..utils import logging from . import BaseTransformersCLICommand def __UpperCAmelCase ( UpperCAmelCase_ : Namespace ) -> Union[str, Any]: '''simple docstring''' return ConvertCommand( args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name ) _a : str= "\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n" class UpperCamelCase ( lowercase ): @staticmethod def _lowercase (_A : ArgumentParser) -> Tuple: __snake_case : Optional[Any] = parser.add_parser( 'convert' , help='CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.' , ) train_parser.add_argument('--model_type' , type=_A , required=_A , help='Model\'s type.') train_parser.add_argument( '--tf_checkpoint' , type=_A , required=_A , help='TensorFlow checkpoint path or folder.') train_parser.add_argument( '--pytorch_dump_output' , type=_A , required=_A , help='Path to the PyTorch saved model output.') train_parser.add_argument('--config' , type=_A , default='' , help='Configuration file path or folder.') train_parser.add_argument( '--finetuning_task_name' , type=_A , default=_A , help='Optional fine-tuning task name if the TF model was a finetuned model.' , ) train_parser.set_defaults(func=_A) def __init__(self : List[str] , _A : str , _A : str , _A : str , _A : str , _A : str , *_A : Any , ) -> Optional[Any]: __snake_case : List[Any] = logging.get_logger('transformers-cli/converting') self._logger.info(f"Loading model {model_type}") __snake_case : List[str] = model_type __snake_case : int = tf_checkpoint __snake_case : Optional[int] = pytorch_dump_output __snake_case : Optional[Any] = config __snake_case : Optional[Any] = finetuning_task_name def _lowercase (self : List[str]) -> str: if self._model_type == "albert": try: from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_A) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output) elif self._model_type == "bert": try: from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_A) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output) elif self._model_type == "funnel": try: from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_A) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output) elif self._model_type == "t5": try: from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch except ImportError: raise ImportError(_A) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output) elif self._model_type == "gpt": from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import ( convert_openai_checkpoint_to_pytorch, ) convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output) elif self._model_type == "transfo_xl": try: from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import ( convert_transfo_xl_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_A) if "ckpt" in self._tf_checkpoint.lower(): __snake_case : Union[str, Any] = self._tf_checkpoint __snake_case : List[Any] = '' else: __snake_case : Optional[Any] = self._tf_checkpoint __snake_case : List[Any] = '' convert_transfo_xl_checkpoint_to_pytorch( _A , self._config , self._pytorch_dump_output , _A) elif self._model_type == "gpt2": try: from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import ( convert_gpta_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_A) convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output) elif self._model_type == "xlnet": try: from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import ( convert_xlnet_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_A) convert_xlnet_checkpoint_to_pytorch( self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name) elif self._model_type == "xlm": from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import ( convert_xlm_checkpoint_to_pytorch, ) convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output) elif self._model_type == "lxmert": from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import ( convert_lxmert_checkpoint_to_pytorch, ) convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output) elif self._model_type == "rembert": from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import ( convert_rembert_tf_checkpoint_to_pytorch, ) convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output) else: raise ValueError( '--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]')
95
0
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=snake_case_) class __lowerCAmelCase ( snake_case_): _a = field(default='''language-modeling''' , metadata={'''include_in_asdict_even_if_is_default''': True}) _a = Features({'''text''': Value('''string''')}) _a = Features({}) _a = "text" @property def SCREAMING_SNAKE_CASE ( self: Any ): return {self.text_column: "text"}
236
import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) _SCREAMING_SNAKE_CASE = logging.getLogger(__name__) @dataclass class SCREAMING_SNAKE_CASE_ : __magic_name__: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) __magic_name__: Optional[str] = field( default=snake_case_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) __magic_name__: Optional[str] = field( default=snake_case_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) __magic_name__: Optional[str] = field( default=snake_case_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) __magic_name__: bool = field(default=snake_case_ , metadata={"help": "Whether tp freeze the encoder."} ) __magic_name__: bool = field(default=snake_case_ , metadata={"help": "Whether to freeze the embeddings."} ) @dataclass class SCREAMING_SNAKE_CASE_ : __magic_name__: str = field( metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} ) __magic_name__: Optional[str] = field( default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , ) __magic_name__: Optional[int] = field( default=1024 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) __magic_name__: Optional[int] = field( default=128 , metadata={ "help": ( "The maximum total sequence length for target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) __magic_name__: Optional[int] = field( default=142 , metadata={ "help": ( "The maximum total sequence length for validation target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded. " "This argument is also used to override the ``max_length`` param of ``model.generate``, which is used " "during ``evaluate`` and ``predict``." ) } , ) __magic_name__: Optional[int] = field( default=142 , metadata={ "help": ( "The maximum total sequence length for test target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) __magic_name__: Optional[int] = field(default=-1 , metadata={"help": "# training examples. -1 means use all."} ) __magic_name__: Optional[int] = field(default=-1 , metadata={"help": "# validation examples. -1 means use all."} ) __magic_name__: Optional[int] = field(default=-1 , metadata={"help": "# test examples. -1 means use all."} ) __magic_name__: Optional[str] = field(default=snake_case_ , metadata={"help": "Source language id for translation."} ) __magic_name__: Optional[str] = field(default=snake_case_ , metadata={"help": "Target language id for translation."} ) __magic_name__: Optional[int] = field(default=snake_case_ , metadata={"help": "# num_beams to use for evaluation."} ) __magic_name__: bool = field( default=snake_case_ , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , ) def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ): logger.info(f"""***** {split} metrics *****""" ) for key in sorted(metrics.keys() ): logger.info(f""" {key} = {metrics[key]}""" ) save_json(__a , os.path.join(__a , f"""{split}_results.json""" ) ) def SCREAMING_SNAKE_CASE__ ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. snake_case_ : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. snake_case_ ,snake_case_ ,snake_case_ : List[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: snake_case_ ,snake_case_ ,snake_case_ : List[str] = parser.parse_args_into_dataclasses() check_output_dir(__a ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info('Training/evaluation parameters %s' , __a ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. snake_case_ : Tuple = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) snake_case_ : Any = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout') for p in extra_model_params: if getattr(__a , __a , __a ): assert hasattr(__a , __a ), f"""({config.__class__.__name__}) doesn't have a `{p}` attribute""" setattr(__a , __a , getattr(__a , __a ) ) snake_case_ : Tuple = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) snake_case_ : Any = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf='.ckpt' in model_args.model_name_or_path , config=__a , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(__a , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: snake_case_ : Any = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(__a , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(__a , __a ): snake_case_ : int = tokenizer.lang_code_to_id[data_args.tgt_lang] else: snake_case_ : int = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(__a ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) snake_case_ : List[Any] = SeqaSeqDataset # Get datasets snake_case_ : List[Any] = ( dataset_class( __a , type_path='train' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , ) if training_args.do_train else None ) snake_case_ : List[str] = ( dataset_class( __a , type_path='val' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) snake_case_ : List[Any] = ( dataset_class( __a , type_path='test' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , ) if training_args.do_predict else None ) # Initialize our Trainer snake_case_ : Any = ( build_compute_metrics_fn(data_args.task , __a ) if training_args.predict_with_generate else None ) snake_case_ : List[str] = SeqaSeqTrainer( model=__a , args=__a , data_args=__a , train_dataset=__a , eval_dataset=__a , data_collator=SeqaSeqDataCollator( __a , __a , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=__a , tokenizer=__a , ) snake_case_ : Optional[int] = {} # Training if training_args.do_train: logger.info('*** Train ***' ) snake_case_ : Any = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) snake_case_ : Tuple = train_result.metrics snake_case_ : List[str] = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics('train' , __a , training_args.output_dir ) all_metrics.update(__a ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info('*** Evaluate ***' ) snake_case_ : List[Any] = trainer.evaluate(metric_key_prefix='val' ) snake_case_ : str = data_args.n_val snake_case_ : Union[str, Any] = round(metrics['val_loss'] , 4 ) if trainer.is_world_process_zero(): handle_metrics('val' , __a , training_args.output_dir ) all_metrics.update(__a ) if training_args.do_predict: logger.info('*** Predict ***' ) snake_case_ : Dict = trainer.predict(test_dataset=__a , metric_key_prefix='test' ) snake_case_ : Union[str, Any] = test_output.metrics snake_case_ : int = data_args.n_test if trainer.is_world_process_zero(): snake_case_ : List[str] = round(metrics['test_loss'] , 4 ) handle_metrics('test' , __a , training_args.output_dir ) all_metrics.update(__a ) if training_args.predict_with_generate: snake_case_ : Any = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=__a , clean_up_tokenization_spaces=__a ) snake_case_ : Any = lmap(str.strip , __a ) write_txt_file(__a , os.path.join(training_args.output_dir , 'test_generations.txt' ) ) if trainer.is_world_process_zero(): save_json(__a , os.path.join(training_args.output_dir , 'all_results.json' ) ) return all_metrics def SCREAMING_SNAKE_CASE__ ( __a ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
327
0
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers import ( TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, BertConfig, DPRConfig, TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, ) class __lowerCAmelCase : def __init__(self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=9_9 , lowerCAmelCase__=3_2 , lowerCAmelCase__=2 , lowerCAmelCase__=4 , lowerCAmelCase__=3_7 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=1_6 , lowerCAmelCase__=2 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=None , lowerCAmelCase__=0 , ): _UpperCAmelCase : Optional[int] = parent _UpperCAmelCase : Union[str, Any] = batch_size _UpperCAmelCase : Union[str, Any] = seq_length _UpperCAmelCase : List[Any] = is_training _UpperCAmelCase : Optional[Any] = use_input_mask _UpperCAmelCase : Tuple = use_token_type_ids _UpperCAmelCase : Dict = use_labels _UpperCAmelCase : Any = vocab_size _UpperCAmelCase : List[str] = hidden_size _UpperCAmelCase : Dict = num_hidden_layers _UpperCAmelCase : Tuple = num_attention_heads _UpperCAmelCase : Union[str, Any] = intermediate_size _UpperCAmelCase : Any = hidden_act _UpperCAmelCase : int = hidden_dropout_prob _UpperCAmelCase : Dict = attention_probs_dropout_prob _UpperCAmelCase : Union[str, Any] = max_position_embeddings _UpperCAmelCase : Tuple = type_vocab_size _UpperCAmelCase : Optional[Any] = type_sequence_label_size _UpperCAmelCase : Optional[Any] = initializer_range _UpperCAmelCase : str = num_labels _UpperCAmelCase : str = num_choices _UpperCAmelCase : Optional[Any] = scope _UpperCAmelCase : Optional[Any] = projection_dim def snake_case_ (self ): _UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase : Optional[int] = None if self.use_input_mask: # follow test_modeling_tf_ctrl.py _UpperCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCAmelCase : Dict = None if self.use_token_type_ids: _UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _UpperCAmelCase : str = None _UpperCAmelCase : List[str] = None _UpperCAmelCase : Optional[int] = None if self.use_labels: _UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices ) _UpperCAmelCase : Union[str, Any] = BertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , ) _UpperCAmelCase : int = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): _UpperCAmelCase : Any = TFDPRContextEncoder(config=lowerCAmelCase__ ) _UpperCAmelCase : Optional[int] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ ) _UpperCAmelCase : Dict = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ ) _UpperCAmelCase : List[str] = model(lowerCAmelCase__ ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) ) def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): _UpperCAmelCase : List[str] = TFDPRQuestionEncoder(config=lowerCAmelCase__ ) _UpperCAmelCase : int = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ ) _UpperCAmelCase : Union[str, Any] = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ ) _UpperCAmelCase : Dict = model(lowerCAmelCase__ ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) ) def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): _UpperCAmelCase : Dict = TFDPRReader(config=lowerCAmelCase__ ) _UpperCAmelCase : Optional[int] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) ) def snake_case_ (self ): _UpperCAmelCase : Dict = self.prepare_config_and_inputs() ( _UpperCAmelCase ) : str = config_and_inputs _UpperCAmelCase : Dict = {"""input_ids""": input_ids} return config, inputs_dict @require_tf class __lowerCAmelCase ( __a , __a , unittest.TestCase ): snake_case : Optional[int] = ( ( TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, ) if is_tf_available() else () ) snake_case : Tuple = {"""feature-extraction""": TFDPRQuestionEncoder} if is_tf_available() else {} snake_case : Optional[int] = False snake_case : Optional[int] = False snake_case : List[Any] = False snake_case : List[Any] = False snake_case : int = False def snake_case_ (self ): _UpperCAmelCase : Dict = TFDPRModelTester(self ) _UpperCAmelCase : List[Any] = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=3_7 ) def snake_case_ (self ): self.config_tester.run_common_tests() def snake_case_ (self ): _UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_context_encoder(*lowerCAmelCase__ ) def snake_case_ (self ): _UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_question_encoder(*lowerCAmelCase__ ) def snake_case_ (self ): _UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_reader(*lowerCAmelCase__ ) @slow def snake_case_ (self ): for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase : str = TFDPRContextEncoder.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase : Union[str, Any] = TFDPRContextEncoder.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase : Dict = TFDPRQuestionEncoder.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase : Union[str, Any] = TFDPRReader.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) @require_tf class __lowerCAmelCase ( unittest.TestCase ): @slow def snake_case_ (self ): _UpperCAmelCase : int = TFDPRQuestionEncoder.from_pretrained("""facebook/dpr-question_encoder-single-nq-base""" ) _UpperCAmelCase : int = tf.constant( [[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_0_3, 2_0_2_6, 3_8_9_9, 1_0_1_4_0, 1_0_2_9, 1_0_2]] ) # [CLS] hello, is my dog cute? [SEP] _UpperCAmelCase : Tuple = model(lowerCAmelCase__ )[0] # embedding shape = (1, 768) # compare the actual values for a slice. _UpperCAmelCase : int = tf.constant( [ [ 0.0_3_2_3_6_2_5_3, 0.1_2_7_5_3_3_3_5, 0.1_6_8_1_8_5_0_9, 0.0_0_2_7_9_7_8_6, 0.3_8_9_6_9_3_3, 0.2_4_2_6_4_9_4_5, 0.2_1_7_8_9_7_1, -0.0_2_3_3_5_2_2_7, -0.0_8_4_8_1_9_5_9, -0.1_4_3_2_4_1_1_7, ] ] ) self.assertTrue(numpy.allclose(output[:, :1_0].numpy() , expected_slice.numpy() , atol=1e-4 ) )
368
'''simple docstring''' import argparse import intel_extension_for_pytorch as ipex import torch from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline lowerCAmelCase_ : Dict = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False) parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''') parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''') lowerCAmelCase_ : Tuple = parser.parse_args() lowerCAmelCase_ : Union[str, Any] = '''cpu''' lowerCAmelCase_ : List[Any] = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings''' lowerCAmelCase_ : List[Any] = '''path-to-your-trained-model''' lowerCAmelCase_ : Union[str, Any] = StableDiffusionPipeline.from_pretrained(model_id) if args.dpm: lowerCAmelCase_ : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) lowerCAmelCase_ : str = pipe.to(device) # to channels last lowerCAmelCase_ : Dict = pipe.unet.to(memory_format=torch.channels_last) lowerCAmelCase_ : Union[str, Any] = pipe.vae.to(memory_format=torch.channels_last) lowerCAmelCase_ : Optional[int] = pipe.text_encoder.to(memory_format=torch.channels_last) if pipe.requires_safety_checker: lowerCAmelCase_ : Any = pipe.safety_checker.to(memory_format=torch.channels_last) # optimize with ipex lowerCAmelCase_ : str = torch.randn(2, 4, 64, 64) lowerCAmelCase_ : str = torch.rand(1) * 999 lowerCAmelCase_ : Any = torch.randn(2, 77, 768) lowerCAmelCase_ : Optional[Any] = (sample, timestep, encoder_hidden_status) try: lowerCAmelCase_ : Union[str, Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example) except Exception: lowerCAmelCase_ : Optional[int] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True) lowerCAmelCase_ : Dict = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True) lowerCAmelCase_ : Optional[int] = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True) if pipe.requires_safety_checker: lowerCAmelCase_ : Optional[int] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True) # compute lowerCAmelCase_ : str = 666 lowerCAmelCase_ : int = torch.Generator(device).manual_seed(seed) lowerCAmelCase_ : Dict = {'''generator''': generator} if args.steps is not None: lowerCAmelCase_ : Any = args.steps with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa): lowerCAmelCase_ : Tuple = pipe(prompt, **generate_kwargs).images[0] # save image image.save('''generated.png''')
170
0
'''simple docstring''' __snake_case =frozenset( [ """prompt""", """height""", """width""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", """cross_attention_kwargs""", ] ) __snake_case =frozenset(["""prompt""", """negative_prompt"""]) __snake_case =frozenset([]) __snake_case =frozenset(["""image"""]) __snake_case =frozenset( [ """image""", """height""", """width""", """guidance_scale""", ] ) __snake_case =frozenset(["""image"""]) __snake_case =frozenset( [ """prompt""", """image""", """height""", """width""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", ] ) __snake_case =frozenset(["""prompt""", """image""", """negative_prompt"""]) __snake_case =frozenset( [ # Text guided image variation with an image mask """prompt""", """image""", """mask_image""", """height""", """width""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", ] ) __snake_case =frozenset(["""prompt""", """image""", """mask_image""", """negative_prompt"""]) __snake_case =frozenset( [ # image variation with an image mask """image""", """mask_image""", """height""", """width""", """guidance_scale""", ] ) __snake_case =frozenset(["""image""", """mask_image"""]) __snake_case =frozenset( [ """example_image""", """image""", """mask_image""", """height""", """width""", """guidance_scale""", ] ) __snake_case =frozenset(["""example_image""", """image""", """mask_image"""]) __snake_case =frozenset(["""class_labels"""]) __snake_case =frozenset(["""class_labels"""]) __snake_case =frozenset(["""batch_size"""]) __snake_case =frozenset([]) __snake_case =frozenset(["""batch_size"""]) __snake_case =frozenset([]) __snake_case =frozenset( [ """prompt""", """audio_length_in_s""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", """cross_attention_kwargs""", ] ) __snake_case =frozenset(["""prompt""", """negative_prompt"""]) __snake_case =frozenset(["""input_tokens"""]) __snake_case =frozenset(["""input_tokens"""])
4
'''simple docstring''' from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer __snake_case =logging.get_logger(__name__) __snake_case ={ """vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_config_file""": """tokenizer_config.json""", } __snake_case ={ """vocab_file""": { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json""" }, """merges_file""": { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt""" }, """tokenizer_config_file""": { """facebook/blenderbot_small-90M""": ( """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json""" ) }, } __snake_case ={ """facebook/blenderbot_small-90M""": 512, } class UpperCAmelCase_ ( __lowercase ): lowerCamelCase : Tuple = VOCAB_FILES_NAMES lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase : Optional[Any] = BlenderbotSmallTokenizer def __init__( self : Any , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : int="<|endoftext|>" , UpperCAmelCase__ : Dict="<|endoftext|>" , UpperCAmelCase__ : str="<|endoftext|>" , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Tuple=True , **UpperCAmelCase__ : Optional[Any] , ) -> Any: super().__init__( ByteLevelBPETokenizer( vocab=UpperCAmelCase__ , merges=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , ) , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , **UpperCAmelCase__ , ) lowerCAmelCase = add_prefix_space def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict=None ) -> Any: lowerCAmelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]: lowerCAmelCase = [self.sep_token_id] lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
4
1
"""simple docstring""" def lowercase_ ( _lowerCamelCase : int = 10 , _lowerCamelCase : int = 1000 , _lowerCamelCase : bool = True): assert ( isinstance(_lowerCamelCase , _lowerCamelCase) and isinstance(_lowerCamelCase , _lowerCamelCase) and isinstance(_lowerCamelCase , _lowerCamelCase) ), "Invalid type of value(s) specified to function!" if min_val > max_val: raise ValueError("Invalid value for min_val or max_val (min_value < max_value)") return min_val if option else max_val def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int): return int((number_a + number_a) / 2) def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int): assert ( isinstance(_lowerCamelCase , _lowerCamelCase) and isinstance(_lowerCamelCase , _lowerCamelCase) and isinstance(_lowerCamelCase , _lowerCamelCase) ), 'argument values must be type of "int"' if lower > higher: raise ValueError("argument value for lower and higher must be(lower > higher)") if not lower < to_guess < higher: raise ValueError( "guess value must be within the range of lower and higher value") def answer(_lowerCamelCase : int) -> str: if number > to_guess: return "high" elif number < to_guess: return "low" else: return "same" print("started...") lowercase__ : Optional[int] = lower lowercase__ : List[Any] = higher lowercase__ : Dict = [] while True: lowercase__ : Any = get_avg(_lowerCamelCase , _lowerCamelCase) last_numbers.append(_lowerCamelCase) if answer(_lowerCamelCase) == "low": lowercase__ : List[str] = number elif answer(_lowerCamelCase) == "high": lowercase__ : Optional[int] = number else: break print(f'''guess the number : {last_numbers[-1]}''') print(f'''details : {last_numbers!s}''') def lowercase_ ( ): lowercase__ : Tuple = int(input("Enter lower value : ").strip()) lowercase__ : Optional[int] = int(input("Enter high value : ").strip()) lowercase__ : Optional[Any] = int(input("Enter value to guess : ").strip()) guess_the_number(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) if __name__ == "__main__": main()
371
import argparse import datetime def lowercase_ ( _lowerCamelCase : str): lowercase__ : Optional[Any] = { "0": "Sunday", "1": "Monday", "2": "Tuesday", "3": "Wednesday", "4": "Thursday", "5": "Friday", "6": "Saturday", } lowercase__ : Any = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0} # Validate if not 0 < len(_lowerCamelCase) < 11: raise ValueError("Must be 10 characters long") # Get month lowercase__ : int = int(date_input[0] + date_input[1]) # Validate if not 0 < m < 13: raise ValueError("Month must be between 1 - 12") lowercase__ : str = date_input[2] # Validate if sep_a not in ["-", "/"]: raise ValueError("Date separator must be '-' or '/'") # Get day lowercase__ : int = int(date_input[3] + date_input[4]) # Validate if not 0 < d < 32: raise ValueError("Date must be between 1 - 31") # Get second separator lowercase__ : str = date_input[5] # Validate if sep_a not in ["-", "/"]: raise ValueError("Date separator must be '-' or '/'") # Get year lowercase__ : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9]) # Arbitrary year range if not 45 < y < 8500: raise ValueError( "Year out of range. There has to be some sort of limit...right?") # Get datetime obj for validation lowercase__ : Union[str, Any] = datetime.date(int(_lowerCamelCase) , int(_lowerCamelCase) , int(_lowerCamelCase)) # Start math if m <= 2: lowercase__ : Optional[Any] = y - 1 lowercase__ : int = m + 12 # maths var lowercase__ : int = int(str(_lowerCamelCase)[:2]) lowercase__ : int = int(str(_lowerCamelCase)[2:]) lowercase__ : int = int(2.6 * m - 5.39) lowercase__ : int = int(c / 4) lowercase__ : int = int(k / 4) lowercase__ : int = int(d + k) lowercase__ : int = int(t + u + v + x) lowercase__ : int = int(z - (2 * c)) lowercase__ : int = round(w % 7) # End math # Validate math if f != convert_datetime_days[dt_ck.weekday()]: raise AssertionError("The date was evaluated incorrectly. Contact developer.") # Response lowercase__ : str = f'''Your date {date_input}, is a {days[str(_lowerCamelCase)]}!''' return response if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase = argparse.ArgumentParser( description=( '''Find out what day of the week nearly any date is or was. Enter ''' '''date as a string in the mm-dd-yyyy or mm/dd/yyyy format''' ) ) parser.add_argument( '''date_input''', type=str, help='''Date as a string (mm-dd-yyyy or mm/dd/yyyy)''' ) UpperCamelCase = parser.parse_args() zeller(args.date_input)
333
0
import math def lowerCAmelCase_ ( __UpperCAmelCase: int ) -> bool: UpperCamelCase__ : Union[str, Any] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 ) return exponent == int(__UpperCAmelCase ) def lowerCAmelCase_ ( __UpperCAmelCase: float = 1 / 1_2345 ) -> int: UpperCamelCase__ : Any = 0 UpperCamelCase__ : Optional[int] = 0 UpperCamelCase__ : Dict = 3 while True: UpperCamelCase__ : List[str] = (integer**2 - 1) / 4 # if candidate is an integer, then there is a partition for k if partition_candidate == int(__UpperCAmelCase ): UpperCamelCase__ : Dict = int(__UpperCAmelCase ) total_partitions += 1 if check_partition_perfect(__UpperCAmelCase ): perfect_partitions += 1 if perfect_partitions > 0: if perfect_partitions / total_partitions < max_proportion: return int(__UpperCAmelCase ) integer += 1 if __name__ == "__main__": print(F'''{solution() = }''')
201
UpperCAmelCase_ = 'Input must be a string of 8 numbers plus letter' UpperCAmelCase_ = 'TRWAGMYFPDXBNJZSQVHLCKE' def lowerCAmelCase_ ( __UpperCAmelCase: str ) -> bool: if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): UpperCamelCase__ : Any = f"Expected string as input, found {type(__UpperCAmelCase ).__name__}" raise TypeError(__UpperCAmelCase ) UpperCamelCase__ : int = spanish_id.replace('''-''' , '''''' ).upper() if len(__UpperCAmelCase ) != 9: raise ValueError(__UpperCAmelCase ) try: UpperCamelCase__ : List[str] = int(spanish_id_clean[0:8] ) UpperCamelCase__ : Optional[int] = spanish_id_clean[8] except ValueError as ex: raise ValueError(__UpperCAmelCase ) from ex if letter.isdigit(): raise ValueError(__UpperCAmelCase ) return letter == LOOKUP_LETTERS[number % 23] if __name__ == "__main__": import doctest doctest.testmod()
201
1
from ..utils import DummyObject, requires_backends class lowerCamelCase (metaclass=__lowerCamelCase ): """simple docstring""" UpperCAmelCase_ = ["note_seq"] def __init__( self : int, *_UpperCAmelCase : Union[str, Any], **_UpperCAmelCase : int ) -> str: """simple docstring""" requires_backends(self, ["note_seq"] ) @classmethod def A_ ( cls : List[Any], *_UpperCAmelCase : Tuple, **_UpperCAmelCase : Union[str, Any] ) -> Optional[Any]: """simple docstring""" requires_backends(cls, ["note_seq"] ) @classmethod def A_ ( cls : str, *_UpperCAmelCase : Tuple, **_UpperCAmelCase : Any ) -> Optional[int]: """simple docstring""" requires_backends(cls, ["note_seq"] )
369
import inspect import unittest from transformers import ViTMSNConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMSNForImageClassification, ViTMSNModel from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class lowerCamelCase : """simple docstring""" def __init__( self : List[Any], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : List[Any]=1_3, _UpperCAmelCase : Optional[Any]=3_0, _UpperCAmelCase : List[str]=2, _UpperCAmelCase : str=3, _UpperCAmelCase : Optional[int]=True, _UpperCAmelCase : Optional[int]=True, _UpperCAmelCase : Optional[Any]=3_2, _UpperCAmelCase : Any=5, _UpperCAmelCase : Optional[Any]=4, _UpperCAmelCase : List[Any]=3_7, _UpperCAmelCase : Optional[int]="gelu", _UpperCAmelCase : int=0.1, _UpperCAmelCase : List[str]=0.1, _UpperCAmelCase : List[str]=1_0, _UpperCAmelCase : List[Any]=0.02, _UpperCAmelCase : List[Any]=None, ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = parent SCREAMING_SNAKE_CASE__ : Optional[Any] = batch_size SCREAMING_SNAKE_CASE__ : str = image_size SCREAMING_SNAKE_CASE__ : Optional[int] = patch_size SCREAMING_SNAKE_CASE__ : Optional[int] = num_channels SCREAMING_SNAKE_CASE__ : List[str] = is_training SCREAMING_SNAKE_CASE__ : Any = use_labels SCREAMING_SNAKE_CASE__ : List[Any] = hidden_size SCREAMING_SNAKE_CASE__ : Optional[Any] = num_hidden_layers SCREAMING_SNAKE_CASE__ : str = num_attention_heads SCREAMING_SNAKE_CASE__ : str = intermediate_size SCREAMING_SNAKE_CASE__ : List[Any] = hidden_act SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE__ : Any = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : Any = type_sequence_label_size SCREAMING_SNAKE_CASE__ : Any = initializer_range SCREAMING_SNAKE_CASE__ : Any = scope # in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) SCREAMING_SNAKE_CASE__ : str = (image_size // patch_size) ** 2 SCREAMING_SNAKE_CASE__ : str = num_patches + 1 def A_ ( self : Any ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE__ : List[str] = None if self.use_labels: SCREAMING_SNAKE_CASE__ : int = ids_tensor([self.batch_size], self.type_sequence_label_size ) SCREAMING_SNAKE_CASE__ : int = self.get_config() return config, pixel_values, labels def A_ ( self : int ) -> Tuple: """simple docstring""" return ViTMSNConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, initializer_range=self.initializer_range, ) def A_ ( self : Dict, _UpperCAmelCase : List[str], _UpperCAmelCase : List[Any], _UpperCAmelCase : List[Any] ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = ViTMSNModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def A_ ( self : int, _UpperCAmelCase : Dict, _UpperCAmelCase : List[Any], _UpperCAmelCase : Union[str, Any] ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = self.type_sequence_label_size SCREAMING_SNAKE_CASE__ : Tuple = ViTMSNForImageClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() SCREAMING_SNAKE_CASE__ : int = model(_UpperCAmelCase, labels=_UpperCAmelCase ) print("Pixel and labels shape: {pixel_values.shape}, {labels.shape}" ) print("Labels: {labels}" ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) ) # test greyscale images SCREAMING_SNAKE_CASE__ : Optional[Any] = 1 SCREAMING_SNAKE_CASE__ : Union[str, Any] = ViTMSNForImageClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() SCREAMING_SNAKE_CASE__ : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE__ : List[str] = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) ) def A_ ( self : str ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Tuple = config_and_inputs SCREAMING_SNAKE_CASE__ : Any = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowerCamelCase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ): """simple docstring""" UpperCAmelCase_ = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else () UpperCAmelCase_ = ( {"feature-extraction": ViTMSNModel, "image-classification": ViTMSNForImageClassification} if is_torch_available() else {} ) UpperCAmelCase_ = False UpperCAmelCase_ = False UpperCAmelCase_ = False UpperCAmelCase_ = False def A_ ( self : Any ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = ViTMSNModelTester(self ) SCREAMING_SNAKE_CASE__ : str = ConfigTester(self, config_class=_UpperCAmelCase, has_text_modality=_UpperCAmelCase, hidden_size=3_7 ) def A_ ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="ViTMSN does not use inputs_embeds" ) def A_ ( self : List[str] ) -> Tuple: """simple docstring""" pass def A_ ( self : Optional[int] ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : Optional[Any] = model_class(_UpperCAmelCase ) self.assertIsInstance(model.get_input_embeddings(), (nn.Module) ) SCREAMING_SNAKE_CASE__ : List[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_UpperCAmelCase, nn.Linear ) ) def A_ ( self : List[Any] ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : int = model_class(_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE__ : int = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE__ : str = ["pixel_values"] self.assertListEqual(arg_names[:1], _UpperCAmelCase ) def A_ ( self : Optional[int] ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def A_ ( self : Optional[int] ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase ) @slow def A_ ( self : Optional[int] ) -> List[Any]: """simple docstring""" for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE__ : Optional[Any] = ViTMSNModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) def _a ( ) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class lowerCamelCase (unittest.TestCase ): """simple docstring""" @cached_property def A_ ( self : List[str] ) -> Optional[Any]: """simple docstring""" return ViTImageProcessor.from_pretrained("facebook/vit-msn-small" ) if is_vision_available() else None @slow def A_ ( self : Any ) -> Dict: """simple docstring""" torch.manual_seed(2 ) SCREAMING_SNAKE_CASE__ : List[str] = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small" ).to(_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Dict = self.default_image_processor SCREAMING_SNAKE_CASE__ : List[Any] = prepare_img() SCREAMING_SNAKE_CASE__ : Dict = image_processor(images=_UpperCAmelCase, return_tensors="pt" ).to(_UpperCAmelCase ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE__ : Dict = model(**_UpperCAmelCase ) # verify the logits SCREAMING_SNAKE_CASE__ : Tuple = torch.Size((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape, _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : int = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(_UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3], _UpperCAmelCase, atol=1E-4 ) )
191
0
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case_ = logging.get_logger(__name__) snake_case_ = { """asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""", # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class A_ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __UpperCamelCase = """sew-d""" def __init__( self :int , lowercase_ :Tuple=32 , lowercase_ :Any=7_68 , lowercase_ :Dict=12 , lowercase_ :Optional[Any]=12 , lowercase_ :Optional[int]=30_72 , lowercase_ :Any=2 , lowercase_ :int=5_12 , lowercase_ :Optional[Any]=2_56 , lowercase_ :Tuple=True , lowercase_ :Union[str, Any]=True , lowercase_ :List[Any]=("p2c", "c2p") , lowercase_ :int="layer_norm" , lowercase_ :Any="gelu_python" , lowercase_ :Union[str, Any]=0.1 , lowercase_ :Any=0.1 , lowercase_ :int=0.1 , lowercase_ :List[str]=0.0 , lowercase_ :Dict=0.1 , lowercase_ :int=0.02 , lowercase_ :List[str]=1E-7 , lowercase_ :Dict=1E-5 , lowercase_ :List[str]="group" , lowercase_ :Any="gelu" , lowercase_ :List[str]=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12) , lowercase_ :List[Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase_ :Optional[int]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase_ :str=False , lowercase_ :Tuple=1_28 , lowercase_ :Union[str, Any]=16 , lowercase_ :Any=True , lowercase_ :int=0.05 , lowercase_ :Union[str, Any]=10 , lowercase_ :List[str]=2 , lowercase_ :Any=0.0 , lowercase_ :Tuple=10 , lowercase_ :List[str]=0 , lowercase_ :str="mean" , lowercase_ :Optional[int]=False , lowercase_ :str=False , lowercase_ :int=2_56 , lowercase_ :Optional[int]=0 , lowercase_ :List[str]=1 , lowercase_ :List[Any]=2 , **lowercase_ :Dict , ) -> Optional[Any]: super().__init__(**lowercase_ , pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ ) UpperCAmelCase = hidden_size UpperCAmelCase = feat_extract_norm UpperCAmelCase = feat_extract_activation UpperCAmelCase = list(lowercase_ ) UpperCAmelCase = list(lowercase_ ) UpperCAmelCase = list(lowercase_ ) UpperCAmelCase = conv_bias UpperCAmelCase = num_conv_pos_embeddings UpperCAmelCase = num_conv_pos_embedding_groups UpperCAmelCase = len(self.conv_dim ) UpperCAmelCase = num_hidden_layers UpperCAmelCase = intermediate_size UpperCAmelCase = squeeze_factor UpperCAmelCase = max_position_embeddings UpperCAmelCase = position_buckets UpperCAmelCase = share_att_key UpperCAmelCase = relative_attention UpperCAmelCase = norm_rel_ebd UpperCAmelCase = list(lowercase_ ) UpperCAmelCase = hidden_act UpperCAmelCase = num_attention_heads UpperCAmelCase = hidden_dropout UpperCAmelCase = attention_dropout UpperCAmelCase = activation_dropout UpperCAmelCase = feat_proj_dropout UpperCAmelCase = final_dropout UpperCAmelCase = layer_norm_eps UpperCAmelCase = feature_layer_norm_eps UpperCAmelCase = initializer_range UpperCAmelCase = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect.' 'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,' f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)""" f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 UpperCAmelCase = apply_spec_augment UpperCAmelCase = mask_time_prob UpperCAmelCase = mask_time_length UpperCAmelCase = mask_time_min_masks UpperCAmelCase = mask_feature_prob UpperCAmelCase = mask_feature_length UpperCAmelCase = mask_feature_min_masks # ctc loss UpperCAmelCase = ctc_loss_reduction UpperCAmelCase = ctc_zero_infinity # sequence classification UpperCAmelCase = use_weighted_layer_sum UpperCAmelCase = classifier_proj_size @property def UpperCAmelCase__ ( self :str ) -> Dict: return functools.reduce(operator.mul , self.conv_stride , 1 )
78
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices A__ : List[str] = logging.get_logger(__name__) class __snake_case ( UpperCamelCase_ ,UpperCamelCase_ ): _a = '''maskformer-swin''' _a = { '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self : Union[str, Any] , A_ : Dict=2_2_4 , A_ : Optional[Any]=4 , A_ : List[str]=3 , A_ : str=9_6 , A_ : Optional[Any]=[2, 2, 6, 2] , A_ : Tuple=[3, 6, 1_2, 2_4] , A_ : List[Any]=7 , A_ : List[Any]=4.0 , A_ : List[str]=True , A_ : Dict=0.0 , A_ : int=0.0 , A_ : str=0.1 , A_ : Optional[int]="gelu" , A_ : List[Any]=False , A_ : int=0.02 , A_ : int=1e-5 , A_ : Optional[int]=None , A_ : List[str]=None , **A_ : List[Any] , ): super().__init__(**A_) lowerCAmelCase_ : Dict = image_size lowerCAmelCase_ : Optional[Any] = patch_size lowerCAmelCase_ : Tuple = num_channels lowerCAmelCase_ : Any = embed_dim lowerCAmelCase_ : List[str] = depths lowerCAmelCase_ : Union[str, Any] = len(A_) lowerCAmelCase_ : List[str] = num_heads lowerCAmelCase_ : Dict = window_size lowerCAmelCase_ : Optional[int] = mlp_ratio lowerCAmelCase_ : Dict = qkv_bias lowerCAmelCase_ : str = hidden_dropout_prob lowerCAmelCase_ : List[str] = attention_probs_dropout_prob lowerCAmelCase_ : Optional[int] = drop_path_rate lowerCAmelCase_ : Any = hidden_act lowerCAmelCase_ : str = use_absolute_embeddings lowerCAmelCase_ : List[str] = layer_norm_eps lowerCAmelCase_ : int = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model lowerCAmelCase_ : str = int(embed_dim * 2 ** (len(A_) - 1)) lowerCAmelCase_ : Optional[Any] = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(A_) + 1)] lowerCAmelCase_ , lowerCAmelCase_ : int = get_aligned_output_features_output_indices( out_features=A_ , out_indices=A_ , stage_names=self.stage_names)
103
0
'''simple docstring''' import argparse import logging import pickle from collections import Counter logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO ) UpperCAmelCase : Union[str, Any] = logging.getLogger(__name__) if __name__ == "__main__": UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser( description='Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)' ) parser.add_argument( '--data_file', type=str, default='data/dump.bert-base-uncased.pickle', help='The binarized dataset.' ) parser.add_argument( '--token_counts_dump', type=str, default='data/token_counts.bert-base-uncased.pickle', help='The dump file.' ) parser.add_argument('--vocab_size', default=3_0_5_2_2, type=int) UpperCAmelCase : Dict = parser.parse_args() logger.info(f"""Loading data from {args.data_file}""") with open(args.data_file, 'rb') as fp: UpperCAmelCase : Tuple = pickle.load(fp) logger.info('Counting occurrences for MLM.') UpperCAmelCase : int = Counter() for tk_ids in data: counter.update(tk_ids) UpperCAmelCase : List[str] = [0] * args.vocab_size for k, v in counter.items(): UpperCAmelCase : Tuple = v logger.info(f"""Dump to {args.token_counts_dump}""") with open(args.token_counts_dump, 'wb') as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
367
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__) @add_end_docstrings(a ) class lowerCAmelCase__ ( a ): """simple docstring""" def __init__( self : Optional[Any] , *__SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : str ) -> Any: """simple docstring""" super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) requires_backends(self , """vision""" ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING ) def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Any=None ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = {} if prompt is not None: __SCREAMING_SNAKE_CASE = prompt if generate_kwargs is not None: __SCREAMING_SNAKE_CASE = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: __SCREAMING_SNAKE_CASE = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( """'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,""" """ please use only one""" ) __SCREAMING_SNAKE_CASE = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self : int , __SCREAMING_SNAKE_CASE : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> int: """simple docstring""" return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any]=None ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = load_image(__SCREAMING_SNAKE_CASE ) if prompt is not None: if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): raise ValueError( f'Received an invalid text input, got - {type(__SCREAMING_SNAKE_CASE )} - but expected a single string. ' """Note also that one single text can be provided for conditional image to text generation.""" ) __SCREAMING_SNAKE_CASE = self.model.config.model_type if model_type == "git": __SCREAMING_SNAKE_CASE = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=self.framework ) __SCREAMING_SNAKE_CASE = self.tokenizer(text=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ).input_ids __SCREAMING_SNAKE_CASE = [self.tokenizer.cls_token_id] + input_ids __SCREAMING_SNAKE_CASE = torch.tensor(__SCREAMING_SNAKE_CASE ).unsqueeze(0 ) model_inputs.update({"""input_ids""": input_ids} ) elif model_type == "pix2struct": __SCREAMING_SNAKE_CASE = self.image_processor(images=__SCREAMING_SNAKE_CASE , header_text=__SCREAMING_SNAKE_CASE , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation __SCREAMING_SNAKE_CASE = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=self.framework ) __SCREAMING_SNAKE_CASE = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=self.framework ) model_inputs.update(__SCREAMING_SNAKE_CASE ) else: raise ValueError(f'Model type {model_type} does not support conditional text generation' ) else: __SCREAMING_SNAKE_CASE = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: __SCREAMING_SNAKE_CASE = None return model_inputs def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[Any]=None ) -> List[str]: """simple docstring""" if ( "input_ids" in model_inputs and isinstance(model_inputs["""input_ids"""] , __SCREAMING_SNAKE_CASE ) and all(x is None for x in model_inputs["""input_ids"""] ) ): __SCREAMING_SNAKE_CASE = None if generate_kwargs is None: __SCREAMING_SNAKE_CASE = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. __SCREAMING_SNAKE_CASE = model_inputs.pop(self.model.main_input_name ) __SCREAMING_SNAKE_CASE = self.model.generate(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) return model_outputs def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = [] for output_ids in model_outputs: __SCREAMING_SNAKE_CASE = { """generated_text""": self.tokenizer.decode( __SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE , ) } records.append(__SCREAMING_SNAKE_CASE ) return records
331
0
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bert import BertTokenizer a =logging.get_logger(__name__) a ={"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} a ={ """vocab_file""": { """bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt""", """bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt""", """bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/vocab.txt""", """bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/vocab.txt""", """bert-base-multilingual-uncased""": ( """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt""" ), """bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt""", """bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt""", """bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt""", """bert-large-uncased-whole-word-masking""": ( """https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt""" ), """bert-large-cased-whole-word-masking""": ( """https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt""" ), """bert-large-uncased-whole-word-masking-finetuned-squad""": ( """https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt""" ), """bert-large-cased-whole-word-masking-finetuned-squad""": ( """https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt""" ), """bert-base-cased-finetuned-mrpc""": ( """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt""" ), """bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt""", """bert-base-german-dbmdz-uncased""": ( """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt""" ), """TurkuNLP/bert-base-finnish-cased-v1""": ( """https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt""" ), """TurkuNLP/bert-base-finnish-uncased-v1""": ( """https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt""" ), """wietsedv/bert-base-dutch-cased""": ( """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json""", """bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json""", """bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json""", """bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json""", """bert-base-multilingual-uncased""": ( """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json""" ), """bert-base-multilingual-cased""": ( """https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json""" ), """bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json""", """bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json""", """bert-large-uncased-whole-word-masking""": ( """https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json""" ), """bert-large-cased-whole-word-masking""": ( """https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json""" ), """bert-large-uncased-whole-word-masking-finetuned-squad""": ( """https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json""" ), """bert-large-cased-whole-word-masking-finetuned-squad""": ( """https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json""" ), """bert-base-cased-finetuned-mrpc""": ( """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json""" ), """bert-base-german-dbmdz-cased""": ( """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json""" ), """bert-base-german-dbmdz-uncased""": ( """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json""" ), """TurkuNLP/bert-base-finnish-cased-v1""": ( """https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json""" ), """TurkuNLP/bert-base-finnish-uncased-v1""": ( """https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json""" ), """wietsedv/bert-base-dutch-cased""": ( """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json""" ), }, } a ={ """bert-base-uncased""": 512, """bert-large-uncased""": 512, """bert-base-cased""": 512, """bert-large-cased""": 512, """bert-base-multilingual-uncased""": 512, """bert-base-multilingual-cased""": 512, """bert-base-chinese""": 512, """bert-base-german-cased""": 512, """bert-large-uncased-whole-word-masking""": 512, """bert-large-cased-whole-word-masking""": 512, """bert-large-uncased-whole-word-masking-finetuned-squad""": 512, """bert-large-cased-whole-word-masking-finetuned-squad""": 512, """bert-base-cased-finetuned-mrpc""": 512, """bert-base-german-dbmdz-cased""": 512, """bert-base-german-dbmdz-uncased""": 512, """TurkuNLP/bert-base-finnish-cased-v1""": 512, """TurkuNLP/bert-base-finnish-uncased-v1""": 512, """wietsedv/bert-base-dutch-cased""": 512, } a ={ """bert-base-uncased""": {"""do_lower_case""": True}, """bert-large-uncased""": {"""do_lower_case""": True}, """bert-base-cased""": {"""do_lower_case""": False}, """bert-large-cased""": {"""do_lower_case""": False}, """bert-base-multilingual-uncased""": {"""do_lower_case""": True}, """bert-base-multilingual-cased""": {"""do_lower_case""": False}, """bert-base-chinese""": {"""do_lower_case""": False}, """bert-base-german-cased""": {"""do_lower_case""": False}, """bert-large-uncased-whole-word-masking""": {"""do_lower_case""": True}, """bert-large-cased-whole-word-masking""": {"""do_lower_case""": False}, """bert-large-uncased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": True}, """bert-large-cased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": False}, """bert-base-cased-finetuned-mrpc""": {"""do_lower_case""": False}, """bert-base-german-dbmdz-cased""": {"""do_lower_case""": False}, """bert-base-german-dbmdz-uncased""": {"""do_lower_case""": True}, """TurkuNLP/bert-base-finnish-cased-v1""": {"""do_lower_case""": False}, """TurkuNLP/bert-base-finnish-uncased-v1""": {"""do_lower_case""": True}, """wietsedv/bert-base-dutch-cased""": {"""do_lower_case""": False}, } class A_ ( SCREAMING_SNAKE_CASE ): _UpperCAmelCase : Optional[int] = VOCAB_FILES_NAMES _UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase : List[str] = PRETRAINED_INIT_CONFIGURATION _UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase : str = BertTokenizer def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : List[Any]=None ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : Optional[Any]="[UNK]" ,SCREAMING_SNAKE_CASE__ : List[str]="[SEP]" ,SCREAMING_SNAKE_CASE__ : str="[PAD]" ,SCREAMING_SNAKE_CASE__ : Optional[Any]="[CLS]" ,SCREAMING_SNAKE_CASE__ : str="[MASK]" ,SCREAMING_SNAKE_CASE__ : Optional[Any]=True ,SCREAMING_SNAKE_CASE__ : str=None ,**SCREAMING_SNAKE_CASE__ : List[Any] ,): super().__init__( SCREAMING_SNAKE_CASE__ ,tokenizer_file=SCREAMING_SNAKE_CASE__ ,do_lower_case=SCREAMING_SNAKE_CASE__ ,unk_token=SCREAMING_SNAKE_CASE__ ,sep_token=SCREAMING_SNAKE_CASE__ ,pad_token=SCREAMING_SNAKE_CASE__ ,cls_token=SCREAMING_SNAKE_CASE__ ,mask_token=SCREAMING_SNAKE_CASE__ ,tokenize_chinese_chars=SCREAMING_SNAKE_CASE__ ,strip_accents=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,) __lowerCamelCase : str = json.loads(self.backend_tokenizer.normalizer.__getstate__()) if ( normalizer_state.get('lowercase' ,SCREAMING_SNAKE_CASE__) != do_lower_case or normalizer_state.get('strip_accents' ,SCREAMING_SNAKE_CASE__) != strip_accents or normalizer_state.get('handle_chinese_chars' ,SCREAMING_SNAKE_CASE__) != tokenize_chinese_chars ): __lowerCamelCase : Any = getattr(SCREAMING_SNAKE_CASE__ ,normalizer_state.pop('type')) __lowerCamelCase : str = do_lower_case __lowerCamelCase : Any = strip_accents __lowerCamelCase : Any = tokenize_chinese_chars __lowerCamelCase : int = normalizer_class(**SCREAMING_SNAKE_CASE__) __lowerCamelCase : Union[str, Any] = do_lower_case def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Tuple=None): __lowerCamelCase : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None): __lowerCamelCase : str = [self.sep_token_id] __lowerCamelCase : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[str] = None): __lowerCamelCase : Optional[Any] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ ,name=SCREAMING_SNAKE_CASE__) return tuple(SCREAMING_SNAKE_CASE__)
73
from ...processing_utils import ProcessorMixin class _UpperCamelCase ( lowerCAmelCase ): UpperCAmelCase_ = ["""image_processor""", """feature_extractor"""] UpperCAmelCase_ = """TvltImageProcessor""" UpperCAmelCase_ = """TvltFeatureExtractor""" def __init__( self :List[str] , lowerCamelCase :Dict , lowerCamelCase :Tuple ) -> Any: super().__init__(image_processor=lowerCamelCase , feature_extractor=lowerCamelCase ) UpperCAmelCase__ = image_processor UpperCAmelCase__ = feature_extractor def __call__( self :Union[str, Any] , lowerCamelCase :List[str]=None , lowerCamelCase :int=None , lowerCamelCase :List[Any]=None , lowerCamelCase :Dict=None , lowerCamelCase :List[str]=False , lowerCamelCase :Optional[Any]=False , *lowerCamelCase :List[Any] , **lowerCamelCase :Dict , ) -> List[str]: if images is None and audio is None: raise ValueError("You need to specify either an `images` or `audio` input to process." ) UpperCAmelCase__ = None if images is not None: UpperCAmelCase__ = self.image_processor(lowerCamelCase , mask_pixel=lowerCamelCase , *lowerCamelCase , **lowerCamelCase ) if images_mixed is not None: UpperCAmelCase__ = self.image_processor(lowerCamelCase , is_mixed=lowerCamelCase , *lowerCamelCase , **lowerCamelCase ) if audio is not None: UpperCAmelCase__ = self.feature_extractor( lowerCamelCase , *lowerCamelCase , sampling_rate=lowerCamelCase , mask_audio=lowerCamelCase , **lowerCamelCase ) UpperCAmelCase__ = {} if audio is not None: output_dict.update(lowerCamelCase ) if images is not None: output_dict.update(lowerCamelCase ) if images_mixed_dict is not None: output_dict.update(lowerCamelCase ) return output_dict @property def UpperCAmelCase_ ( self :Dict ) -> Optional[Any]: UpperCAmelCase__ = self.image_processor.model_input_names UpperCAmelCase__ = self.feature_extractor.model_input_names return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
169
0
import numpy as np class __a : def __init__( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = (0, 0) _UpperCAmelCase = None _UpperCAmelCase = 0 _UpperCAmelCase = 0 _UpperCAmelCase = 0 def __eq__( self , _SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" return self.position == cell.position def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" print(self.position ) class __a : def __init__( self , _SCREAMING_SNAKE_CASE=(5, 5) ) -> Tuple: """simple docstring""" _UpperCAmelCase = np.zeros(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = world_size[0] _UpperCAmelCase = world_size[1] def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" print(self.w ) def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" _UpperCAmelCase = [ (-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1), ] _UpperCAmelCase = cell.position[0] _UpperCAmelCase = cell.position[1] _UpperCAmelCase = [] for n in neughbour_cord: _UpperCAmelCase = current_x + n[0] _UpperCAmelCase = current_y + n[1] if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit: _UpperCAmelCase = Cell() _UpperCAmelCase = (x, y) _UpperCAmelCase = cell neighbours.append(_SCREAMING_SNAKE_CASE ) return neighbours def lowerCAmelCase__ ( a__: List[str] , a__: Any , a__: List[Any] ) -> List[str]: '''simple docstring''' _UpperCAmelCase = [] _UpperCAmelCase = [] _open.append(a__ ) while _open: _UpperCAmelCase = np.argmin([n.f for n in _open] ) _UpperCAmelCase = _open[min_f] _closed.append(_open.pop(a__ ) ) if current == goal: break for n in world.get_neigbours(a__ ): for c in _closed: if c == n: continue _UpperCAmelCase = current.g + 1 _UpperCAmelCase , _UpperCAmelCase = n.position _UpperCAmelCase , _UpperCAmelCase = goal.position _UpperCAmelCase = (ya - ya) ** 2 + (xa - xa) ** 2 _UpperCAmelCase = n.h + n.g for c in _open: if c == n and c.f < n.f: continue _open.append(a__ ) _UpperCAmelCase = [] while current.parent is not None: path.append(current.position ) _UpperCAmelCase = current.parent path.append(current.position ) return path[::-1] if __name__ == "__main__": lowerCAmelCase__ :int = Gridworld() # Start position and goal lowerCAmelCase__ :List[str] = Cell() lowerCAmelCase__ :str = (0, 0) lowerCAmelCase__ :int = Cell() lowerCAmelCase__ :Dict = (4, 4) print(f'''path from {start.position} to {goal.position}''') lowerCAmelCase__ :List[str] = astar(world, start, goal) # Just for visual reasons. for i in s: lowerCAmelCase__ :Dict = 1 print(world.w)
185
from __future__ import annotations import time from collections.abc import Sequence from random import randint from matplotlib import pyplot as plt def lowerCAmelCase__ ( a__: Sequence[float] , a__: int , a__: int ) -> tuple[int | None, int | None, float]: '''simple docstring''' if not arr: return None, None, 0 if low == high: return low, high, arr[low] _UpperCAmelCase = (low + high) // 2 _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = max_subarray(a__ , a__ , a__ ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = max_subarray(a__ , mid + 1 , a__ ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = max_cross_sum(a__ , a__ , a__ , a__ ) if left_sum >= right_sum and left_sum >= cross_sum: return left_low, left_high, left_sum elif right_sum >= left_sum and right_sum >= cross_sum: return right_low, right_high, right_sum return cross_left, cross_right, cross_sum def lowerCAmelCase__ ( a__: Sequence[float] , a__: int , a__: int , a__: int ) -> tuple[int, int, float]: '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = float('-inf' ), -1 _UpperCAmelCase , _UpperCAmelCase = float('-inf' ), -1 _UpperCAmelCase = 0 for i in range(a__ , low - 1 , -1 ): summ += arr[i] if summ > left_sum: _UpperCAmelCase = summ _UpperCAmelCase = i _UpperCAmelCase = 0 for i in range(mid + 1 , high + 1 ): summ += arr[i] if summ > right_sum: _UpperCAmelCase = summ _UpperCAmelCase = i return max_left, max_right, (left_sum + right_sum) def lowerCAmelCase__ ( a__: int ) -> float: '''simple docstring''' _UpperCAmelCase = [randint(1 , a__ ) for _ in range(a__ )] _UpperCAmelCase = time.time() max_subarray(a__ , 0 , input_size - 1 ) _UpperCAmelCase = time.time() return end - start def lowerCAmelCase__ ( ) -> None: '''simple docstring''' _UpperCAmelCase = [1_0, 1_0_0, 1_0_0_0, 1_0_0_0_0, 5_0_0_0_0, 1_0_0_0_0_0, 2_0_0_0_0_0, 3_0_0_0_0_0, 4_0_0_0_0_0, 5_0_0_0_0_0] _UpperCAmelCase = [time_max_subarray(a__ ) for input_size in input_sizes] print('No of Inputs\t\tTime Taken' ) for input_size, runtime in zip(a__ , a__ ): print(a__ , '\t\t' , a__ ) plt.plot(a__ , a__ ) plt.xlabel('Number of Inputs' ) plt.ylabel('Time taken in seconds' ) plt.show() if __name__ == "__main__": from doctest import testmod testmod()
185
1
"""simple docstring""" import copy import tempfile import unittest from huggingface_hub import HfFolder, delete_repo from parameterized import parameterized from requests.exceptions import HTTPError from transformers import AutoConfig, GenerationConfig from transformers.testing_utils import TOKEN, USER, is_staging_test class lowerCamelCase__ ( unittest.TestCase ): @parameterized.expand([(None,), ("foo.json",)] ) def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ): """simple docstring""" snake_case : int = GenerationConfig( do_sample=UpperCAmelCase__ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(UpperCAmelCase__ , config_name=UpperCAmelCase__ ) snake_case : Any = GenerationConfig.from_pretrained(UpperCAmelCase__ , config_name=UpperCAmelCase__ ) # Checks parameters that were specified self.assertEqual(loaded_config.do_sample , UpperCAmelCase__ ) self.assertEqual(loaded_config.temperature , 0.7 ) self.assertEqual(loaded_config.length_penalty , 1.0 ) self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] ) # Checks parameters that were not specified (defaults) self.assertEqual(loaded_config.top_k , 50 ) self.assertEqual(loaded_config.max_length , 20 ) self.assertEqual(loaded_config.max_time , UpperCAmelCase__ ) def lowerCamelCase_ ( self ): """simple docstring""" snake_case : Optional[int] = AutoConfig.from_pretrained("gpt2" ) snake_case : List[str] = GenerationConfig.from_model_config(UpperCAmelCase__ ) snake_case : Optional[Any] = GenerationConfig() # The generation config has loaded a few non-default parameters from the model config self.assertNotEqual(UpperCAmelCase__ , UpperCAmelCase__ ) # One of those parameters is eos_token_id -- check if it matches self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id ) self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id ) def lowerCamelCase_ ( self ): """simple docstring""" snake_case : Any = GenerationConfig() snake_case : List[str] = { "max_new_tokens": 1_024, "foo": "bar", } snake_case : int = copy.deepcopy(UpperCAmelCase__ ) snake_case : str = generation_config.update(**UpperCAmelCase__ ) # update_kwargs was not modified (no side effects) self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ ) # update_kwargs was used to update the config on valid attributes self.assertEqual(generation_config.max_new_tokens , 1_024 ) # `.update()` returns a dictionary of unused kwargs self.assertEqual(UpperCAmelCase__ , {"foo": "bar"} ) def lowerCamelCase_ ( self ): """simple docstring""" snake_case : List[str] = GenerationConfig() snake_case : Union[str, Any] = "bar" with tempfile.TemporaryDirectory("test-generation-config" ) as tmp_dir: generation_config.save_pretrained(UpperCAmelCase__ ) snake_case : Optional[Any] = GenerationConfig.from_pretrained(UpperCAmelCase__ ) # update_kwargs was used to update the config on valid attributes self.assertEqual(new_config.foo , "bar" ) snake_case : Optional[Any] = GenerationConfig.from_model_config(UpperCAmelCase__ ) assert not hasattr(UpperCAmelCase__ , "foo" ) # no new kwargs should be initialized if from config def lowerCamelCase_ ( self ): """simple docstring""" snake_case : Optional[Any] = GenerationConfig() self.assertEqual(default_config.temperature , 1.0 ) self.assertEqual(default_config.do_sample , UpperCAmelCase__ ) self.assertEqual(default_config.num_beams , 1 ) snake_case : Optional[Any] = GenerationConfig( do_sample=UpperCAmelCase__ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) self.assertEqual(config.temperature , 0.7 ) self.assertEqual(config.do_sample , UpperCAmelCase__ ) self.assertEqual(config.num_beams , 1 ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(UpperCAmelCase__ ) snake_case : Dict = GenerationConfig.from_pretrained(UpperCAmelCase__ , temperature=1.0 ) self.assertEqual(loaded_config.temperature , 1.0 ) self.assertEqual(loaded_config.do_sample , UpperCAmelCase__ ) self.assertEqual(loaded_config.num_beams , 1 ) # default value @is_staging_test class lowerCamelCase__ ( unittest.TestCase ): @classmethod def lowerCamelCase_ ( cls ): """simple docstring""" snake_case : List[Any] = TOKEN HfFolder.save_token(UpperCAmelCase__ ) @classmethod def lowerCamelCase_ ( cls ): """simple docstring""" try: delete_repo(token=cls._token , repo_id="test-generation-config" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="valid_org/test-generation-config-org" ) except HTTPError: pass def lowerCamelCase_ ( self ): """simple docstring""" snake_case : List[Any] = GenerationConfig( do_sample=UpperCAmelCase__ , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub("test-generation-config" , use_auth_token=self._token ) snake_case : Optional[int] = GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(UpperCAmelCase__ , getattr(UpperCAmelCase__ , UpperCAmelCase__ ) ) # Reset repo delete_repo(token=self._token , repo_id="test-generation-config" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( UpperCAmelCase__ , repo_id="test-generation-config" , push_to_hub=UpperCAmelCase__ , use_auth_token=self._token ) snake_case : str = GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(UpperCAmelCase__ , getattr(UpperCAmelCase__ , UpperCAmelCase__ ) ) def lowerCamelCase_ ( self ): """simple docstring""" snake_case : Union[str, Any] = GenerationConfig( do_sample=UpperCAmelCase__ , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub("valid_org/test-generation-config-org" , use_auth_token=self._token ) snake_case : Optional[Any] = GenerationConfig.from_pretrained("valid_org/test-generation-config-org" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(UpperCAmelCase__ , getattr(UpperCAmelCase__ , UpperCAmelCase__ ) ) # Reset repo delete_repo(token=self._token , repo_id="valid_org/test-generation-config-org" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( UpperCAmelCase__ , repo_id="valid_org/test-generation-config-org" , push_to_hub=UpperCAmelCase__ , use_auth_token=self._token ) snake_case : Dict = GenerationConfig.from_pretrained("valid_org/test-generation-config-org" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(UpperCAmelCase__ , getattr(UpperCAmelCase__ , UpperCAmelCase__ ) )
148
'''simple docstring''' from __future__ import annotations from statistics import mean def a_ ( lowerCamelCase : list[int] , lowerCamelCase : list[int] , lowerCamelCase : int ): lowerCAmelCase = [0] * no_of_processes lowerCAmelCase = [0] * no_of_processes # Initialize remaining_time to waiting_time. for i in range(lowerCamelCase ): lowerCAmelCase = burst_time[i] lowerCAmelCase = [] lowerCAmelCase = 0 lowerCAmelCase = 0 # When processes are not completed, # A process whose arrival time has passed \ # and has remaining execution time is put into the ready_process. # The shortest process in the ready_process, target_process is executed. while completed != no_of_processes: lowerCAmelCase = [] lowerCAmelCase = -1 for i in range(lowerCamelCase ): if (arrival_time[i] <= total_time) and (remaining_time[i] > 0): ready_process.append(lowerCamelCase ) if len(lowerCamelCase ) > 0: lowerCAmelCase = ready_process[0] for i in ready_process: if remaining_time[i] < remaining_time[target_process]: lowerCAmelCase = i total_time += burst_time[target_process] completed += 1 lowerCAmelCase = 0 lowerCAmelCase = ( total_time - arrival_time[target_process] - burst_time[target_process] ) else: total_time += 1 return waiting_time def a_ ( lowerCamelCase : list[int] , lowerCamelCase : int , lowerCamelCase : list[int] ): lowerCAmelCase = [0] * no_of_processes for i in range(lowerCamelCase ): lowerCAmelCase = burst_time[i] + waiting_time[i] return turn_around_time if __name__ == "__main__": print("""[TEST CASE 01]""") __snake_case =4 __snake_case =[2, 5, 3, 7] __snake_case =[0, 0, 0, 0] __snake_case =calculate_waitingtime(arrival_time, burst_time, no_of_processes) __snake_case =calculate_turnaroundtime( burst_time, no_of_processes, waiting_time ) # Printing the Result print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""") for i, process_id in enumerate(list(range(1, 5))): print( F'''{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t''' F'''{waiting_time[i]}\t\t\t\t{turn_around_time[i]}''' ) print(F'''\nAverage waiting time = {mean(waiting_time):.5f}''') print(F'''Average turnaround time = {mean(turn_around_time):.5f}''')
4
0
'''simple docstring''' import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Value from .base import TaskTemplate @dataclass(frozen=a ) class lowerCAmelCase__ ( a ): """simple docstring""" lowerCAmelCase__ = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} ) lowerCAmelCase__ = Features({"text": Value("string" )} ) lowerCAmelCase__ = Features({"labels": ClassLabel} ) lowerCAmelCase__ = "text" lowerCAmelCase__ = "labels" def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple ) -> Optional[int]: """simple docstring""" if self.label_column not in features: raise ValueError(f'Column {self.label_column} is not present in features.' ) if not isinstance(features[self.label_column] , __SCREAMING_SNAKE_CASE ): raise ValueError(f'Column {self.label_column} is not a ClassLabel.' ) __SCREAMING_SNAKE_CASE = copy.deepcopy(self ) __SCREAMING_SNAKE_CASE = self.label_schema.copy() __SCREAMING_SNAKE_CASE = features[self.label_column] __SCREAMING_SNAKE_CASE = label_schema return task_template @property def UpperCAmelCase__ ( self : Union[str, Any] ) -> Dict[str, str]: """simple docstring""" return { self.text_column: "text", self.label_column: "labels", }
369
'''simple docstring''' import os # Precomputes a list of the 100 first triangular numbers UpperCAmelCase : int = [int(0.5 * n * (n + 1)) for n in range(1, 1_0_1)] def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE = os.path.dirname(os.path.realpath(a__ ) ) __SCREAMING_SNAKE_CASE = os.path.join(a__ , """words.txt""" ) __SCREAMING_SNAKE_CASE = """""" with open(a__ ) as f: __SCREAMING_SNAKE_CASE = f.readline() __SCREAMING_SNAKE_CASE = [word.strip("""\"""" ) for word in words.strip("""\r\n""" ).split(""",""" )] __SCREAMING_SNAKE_CASE = [ word for word in [sum(ord(a__ ) - 64 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(a__ ) if __name__ == "__main__": print(solution())
331
0
def snake_case_ ( snake_case ) -> str: lowercase__: Any = 0 # if input_string is "aba" than new_input_string become "a|b|a" lowercase__: Dict = '' lowercase__: List[Any] = '' # append each character + "|" in new_string for range(0, length-1) for i in input_string[: len(snake_case ) - 1]: new_input_string += i + "|" # append last character new_input_string += input_string[-1] # we will store the starting and ending of previous furthest ending palindromic # substring lowercase__ , lowercase__: List[Any] = 0, 0 # length[i] shows the length of palindromic substring with center i lowercase__: Optional[int] = [1 for i in range(len(snake_case ) )] # for each character in new_string find corresponding palindromic string lowercase__: Union[str, Any] = 0 for j in range(len(snake_case ) ): lowercase__: int = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 ) while ( j - k >= 0 and j + k < len(snake_case ) and new_input_string[k + j] == new_input_string[j - k] ): k += 1 lowercase__: Union[str, Any] = 2 * k - 1 # does this string is ending after the previously explored end (that is r) ? # if yes the update the new r to the last index of this if j + k - 1 > r: lowercase__: List[Any] = j - k + 1 # noqa: E741 lowercase__: Optional[Any] = j + k - 1 # update max_length and start position if max_length < length[j]: lowercase__: Tuple = length[j] lowercase__: Tuple = j # create that string lowercase__: int = new_input_string[start - max_length // 2 : start + max_length // 2 + 1] for i in s: if i != "|": output_string += i return output_string if __name__ == "__main__": import doctest doctest.testmod()
196
from __future__ import annotations def snake_case_ ( snake_case , snake_case ) -> bool: if len(snake_case ) == 0: return False lowercase__: Any = len(snake_case ) // 2 if a_list[midpoint] == item: return True if item < a_list[midpoint]: return binary_search(a_list[:midpoint] , snake_case ) else: return binary_search(a_list[midpoint + 1 :] , snake_case ) if __name__ == "__main__": __lowerCAmelCase = input('''Enter numbers separated by comma:\n''').strip() __lowerCAmelCase = [int(item.strip()) for item in user_input.split(''',''')] __lowerCAmelCase = int(input('''Enter the number to be found in the list:\n''').strip()) __lowerCAmelCase = '''''' if binary_search(sequence, target) else '''not ''' print(F'''{target} was {not_str}found in {sequence}''')
196
1
'''simple docstring''' import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / 'utils')) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 a_ = get_tests_dir('fixtures') class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __magic_name__ ( self : List[Any] ) -> Union[str, Any]: # A mock response for an HTTP head request to emulate server down SCREAMING_SNAKE_CASE__ : List[str] =mock.Mock() SCREAMING_SNAKE_CASE__ : Any =5_00 SCREAMING_SNAKE_CASE__ : List[Any] ={} SCREAMING_SNAKE_CASE__ : Dict =HTTPError SCREAMING_SNAKE_CASE__ : Optional[Any] ={} # Download this model to make sure it's in the cache. SCREAMING_SNAKE_CASE__ : int =ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('''requests.Session.request''' , return_value=__lowercase ) as mock_head: SCREAMING_SNAKE_CASE__ : int =ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' ) # This check we did call the fake head request mock_head.assert_called() def __magic_name__ ( self : str ) -> Dict: # This test is for deprecated behavior and can be removed in v5 SCREAMING_SNAKE_CASE__ : str =ViTImageProcessor.from_pretrained( '''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''' ) def __magic_name__ ( self : Any ) -> Tuple: with self.assertRaises(__lowercase ): # config is in subfolder, the following should not work without specifying the subfolder SCREAMING_SNAKE_CASE__ : Optional[int] =AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''' ) SCREAMING_SNAKE_CASE__ : List[Any] =AutoImageProcessor.from_pretrained( '''hf-internal-testing/stable-diffusion-all-variants''' , subfolder='''feature_extractor''' ) self.assertIsNotNone(__lowercase ) @is_staging_test class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @classmethod def __magic_name__ ( cls : List[str] ) -> str: SCREAMING_SNAKE_CASE__ : Any =TOKEN HfFolder.save_token(__lowercase ) @classmethod def __magic_name__ ( cls : Dict ) -> Any: try: delete_repo(token=cls._token , repo_id='''test-image-processor''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-image-processor-org''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''test-dynamic-image-processor''' ) except HTTPError: pass def __magic_name__ ( self : List[str] ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : Optional[Any] =ViTImageProcessor.from_pretrained(__lowercase ) image_processor.push_to_hub('''test-image-processor''' , use_auth_token=self._token ) SCREAMING_SNAKE_CASE__ : Any =ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor" ) for k, v in image_processor.__dict__.items(): self.assertEqual(__lowercase , getattr(__lowercase , __lowercase ) ) # Reset repo delete_repo(token=self._token , repo_id='''test-image-processor''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( __lowercase , repo_id='''test-image-processor''' , push_to_hub=__lowercase , use_auth_token=self._token ) SCREAMING_SNAKE_CASE__ : List[Any] =ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor" ) for k, v in image_processor.__dict__.items(): self.assertEqual(__lowercase , getattr(__lowercase , __lowercase ) ) def __magic_name__ ( self : str ) -> List[str]: SCREAMING_SNAKE_CASE__ : Tuple =ViTImageProcessor.from_pretrained(__lowercase ) image_processor.push_to_hub('''valid_org/test-image-processor''' , use_auth_token=self._token ) SCREAMING_SNAKE_CASE__ : Optional[Any] =ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(__lowercase , getattr(__lowercase , __lowercase ) ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-image-processor''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( __lowercase , repo_id='''valid_org/test-image-processor-org''' , push_to_hub=__lowercase , use_auth_token=self._token ) SCREAMING_SNAKE_CASE__ : int =ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(__lowercase , getattr(__lowercase , __lowercase ) ) def __magic_name__ ( self : Tuple ) -> Any: CustomImageProcessor.register_for_auto_class() SCREAMING_SNAKE_CASE__ : Tuple =CustomImageProcessor.from_pretrained(__lowercase ) image_processor.push_to_hub('''test-dynamic-image-processor''' , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map , {'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''} , ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =AutoImageProcessor.from_pretrained( F"{USER}/test-dynamic-image-processor" , trust_remote_code=__lowercase ) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__ , '''CustomImageProcessor''' )
222
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionSAGPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __SCREAMING_SNAKE_CASE ( lowerCamelCase , lowerCamelCase , unittest.TestCase ): snake_case_ = StableDiffusionSAGPipeline snake_case_ = TEXT_TO_IMAGE_PARAMS snake_case_ = TEXT_TO_IMAGE_BATCH_PARAMS snake_case_ = TEXT_TO_IMAGE_IMAGE_PARAMS snake_case_ = TEXT_TO_IMAGE_IMAGE_PARAMS snake_case_ = False def __magic_name__ ( self : str ) -> Union[str, Any]: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Tuple =UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) SCREAMING_SNAKE_CASE__ : Optional[Any] =DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=__lowercase , set_alpha_to_one=__lowercase , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : List[Any] =AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : List[Any] =CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) SCREAMING_SNAKE_CASE__ : Optional[Any] =CLIPTextModel(__lowercase ) SCREAMING_SNAKE_CASE__ : List[str] =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) SCREAMING_SNAKE_CASE__ : List[Any] ={ '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def __magic_name__ ( self : int , __lowercase : Union[str, Any] , __lowercase : Any=0 ) -> Optional[Any]: if str(__lowercase ).startswith('''mps''' ): SCREAMING_SNAKE_CASE__ : Optional[int] =torch.manual_seed(__lowercase ) else: SCREAMING_SNAKE_CASE__ : Optional[int] =torch.Generator(device=__lowercase ).manual_seed(__lowercase ) SCREAMING_SNAKE_CASE__ : Dict ={ '''prompt''': '''.''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 1.0, '''sag_scale''': 1.0, '''output_type''': '''numpy''', } return inputs def __magic_name__ ( self : int ) -> str: super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __magic_name__ ( self : int ) -> Optional[int]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __magic_name__ ( self : int ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ : Any =StableDiffusionSAGPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' ) SCREAMING_SNAKE_CASE__ : Optional[Any] =sag_pipe.to(__lowercase ) sag_pipe.set_progress_bar_config(disable=__lowercase ) SCREAMING_SNAKE_CASE__ : List[Any] ='''.''' SCREAMING_SNAKE_CASE__ : Tuple =torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : List[Any] =sag_pipe( [prompt] , generator=__lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' ) SCREAMING_SNAKE_CASE__ : int =output.images SCREAMING_SNAKE_CASE__ : int =image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) SCREAMING_SNAKE_CASE__ : str =np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2 def __magic_name__ ( self : List[Any] ) -> Any: SCREAMING_SNAKE_CASE__ : Tuple =StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) SCREAMING_SNAKE_CASE__ : List[Any] =sag_pipe.to(__lowercase ) sag_pipe.set_progress_bar_config(disable=__lowercase ) SCREAMING_SNAKE_CASE__ : List[str] ='''.''' SCREAMING_SNAKE_CASE__ : Optional[Any] =torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =sag_pipe( [prompt] , generator=__lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' ) SCREAMING_SNAKE_CASE__ : Tuple =output.images SCREAMING_SNAKE_CASE__ : Tuple =image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) SCREAMING_SNAKE_CASE__ : List[Any] =np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2 def __magic_name__ ( self : str ) -> Any: SCREAMING_SNAKE_CASE__ : Union[str, Any] =StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) SCREAMING_SNAKE_CASE__ : Optional[int] =sag_pipe.to(__lowercase ) sag_pipe.set_progress_bar_config(disable=__lowercase ) SCREAMING_SNAKE_CASE__ : List[Any] ='''.''' SCREAMING_SNAKE_CASE__ : Dict =torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Tuple =sag_pipe( [prompt] , width=7_68 , height=5_12 , generator=__lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' , ) SCREAMING_SNAKE_CASE__ : Any =output.images assert image.shape == (1, 5_12, 7_68, 3)
222
1
'''simple docstring''' import itertools from dataclasses import dataclass from typing import Optional import pandas as pd import pyarrow as pa import datasets from datasets.table import table_cast @dataclass class _a ( datasets.BuilderConfig ): '''simple docstring''' A : Optional[datasets.Features] = None class _a ( datasets.ArrowBasedBuilder ): '''simple docstring''' A : Dict = PandasConfig def UpperCamelCase_ ( self ): '''simple docstring''' return datasets.DatasetInfo(features=self.config.features ) def UpperCamelCase_ ( self, A ): '''simple docstring''' if not self.config.data_files: raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" ) SCREAMING_SNAKE_CASE : str = dl_manager.download_and_extract(self.config.data_files ) if isinstance(__lowercase, (str, list, tuple) ): SCREAMING_SNAKE_CASE : Optional[int] = data_files if isinstance(__lowercase, __lowercase ): SCREAMING_SNAKE_CASE : List[Any] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive SCREAMING_SNAKE_CASE : str = [dl_manager.iter_files(__lowercase ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={'files': files} )] SCREAMING_SNAKE_CASE : List[str] = [] for split_name, files in data_files.items(): if isinstance(__lowercase, __lowercase ): SCREAMING_SNAKE_CASE : Union[str, Any] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive SCREAMING_SNAKE_CASE : Dict = [dl_manager.iter_files(__lowercase ) for file in files] splits.append(datasets.SplitGenerator(name=__lowercase, gen_kwargs={'files': files} ) ) return splits def UpperCamelCase_ ( self, A ): '''simple docstring''' if self.config.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example SCREAMING_SNAKE_CASE : Tuple = table_cast(__lowercase, self.config.features.arrow_schema ) return pa_table def UpperCamelCase_ ( self, A ): '''simple docstring''' for i, file in enumerate(itertools.chain.from_iterable(__lowercase ) ): with open(__lowercase, 'rb' ) as f: SCREAMING_SNAKE_CASE : str = pa.Table.from_pandas(pd.read_pickle(__lowercase ) ) yield i, self._cast_table(__lowercase )
251
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers _lowercase : Dict ="3" print("Python version:", sys.version) print("transformers version:", transformers.__version__) try: import torch print("Torch version:", torch.__version__) print("Cuda available:", torch.cuda.is_available()) print("Cuda version:", torch.version.cuda) print("CuDNN version:", torch.backends.cudnn.version()) print("Number of GPUs available:", torch.cuda.device_count()) print("NCCL version:", torch.cuda.nccl.version()) except ImportError: print("Torch version:", None) try: import deepspeed print("DeepSpeed version:", deepspeed.__version__) except ImportError: print("DeepSpeed version:", None) try: import tensorflow as tf print("TensorFlow version:", tf.__version__) print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU"))) print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU"))) except ImportError: print("TensorFlow version:", None)
170
0
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_fnet import FNetTokenizer else: __A = None __A = logging.get_logger(__name__) __A = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''} __A = { '''vocab_file''': { '''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/spiece.model''', '''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/spiece.model''', }, '''tokenizer_file''': { '''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json''', '''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json''', }, } __A = { '''google/fnet-base''': 512, '''google/fnet-large''': 512, } __A = '''▁''' class lowercase ( snake_case__): """simple docstring""" a__ : Any = VOCAB_FILES_NAMES a__ : Dict = PRETRAINED_VOCAB_FILES_MAP a__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ : List[Any] = ["input_ids", "token_type_ids"] a__ : List[str] = FNetTokenizer def __init__( self : Optional[int] , __UpperCAmelCase : str=None , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : str=True , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : List[str]="<unk>" , __UpperCAmelCase : Any="[SEP]" , __UpperCAmelCase : int="<pad>" , __UpperCAmelCase : Any="[CLS]" , __UpperCAmelCase : Optional[Any]="[MASK]" , **__UpperCAmelCase : int , ) -> Optional[int]: # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. UpperCAmelCase_= ( AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase , normalized=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token ) super().__init__( __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , **__UpperCAmelCase , ) UpperCAmelCase_= do_lower_case UpperCAmelCase_= remove_space UpperCAmelCase_= keep_accents UpperCAmelCase_= vocab_file UpperCAmelCase_= False if not self.vocab_file else True def _SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]: UpperCAmelCase_= [self.sep_token_id] UpperCAmelCase_= [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _SCREAMING_SNAKE_CASE ( self : str , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]: UpperCAmelCase_= [self.sep_token_id] UpperCAmelCase_= [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__UpperCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return UpperCAmelCase_= os.path.join( __UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ): copyfile(self.vocab_file , __UpperCAmelCase ) return (out_vocab_file,)
277
import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal __A = datasets.utils.logging.get_logger(__name__) __A = ['''names''', '''prefix'''] __A = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols'''] __A = ['''encoding_errors''', '''on_bad_lines'''] __A = ['''date_format'''] @dataclass class lowercase ( datasets.BuilderConfig): """simple docstring""" a__ : str = "," a__ : Optional[str] = None a__ : Optional[Union[int, List[int], str]] = "infer" a__ : Optional[List[str]] = None a__ : Optional[List[str]] = None a__ : Optional[Union[int, str, List[int], List[str]]] = None a__ : Optional[Union[List[int], List[str]]] = None a__ : Optional[str] = None a__ : bool = True a__ : Optional[Literal["c", "python", "pyarrow"]] = None a__ : Dict[Union[int, str], Callable[[Any], Any]] = None a__ : Optional[list] = None a__ : Optional[list] = None a__ : bool = False a__ : Optional[Union[int, List[int]]] = None a__ : Optional[int] = None a__ : Optional[Union[str, List[str]]] = None a__ : bool = True a__ : bool = True a__ : bool = False a__ : bool = True a__ : Optional[str] = None a__ : str = "." a__ : Optional[str] = None a__ : str = '"' a__ : int = 0 a__ : Optional[str] = None a__ : Optional[str] = None a__ : Optional[str] = None a__ : Optional[str] = None a__ : bool = True a__ : bool = True a__ : int = 0 a__ : bool = True a__ : bool = False a__ : Optional[str] = None a__ : int = 1_0000 a__ : Optional[datasets.Features] = None a__ : Optional[str] = "strict" a__ : Literal["error", "warn", "skip"] = "error" a__ : Optional[str] = None def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int: if self.delimiter is not None: UpperCAmelCase_= self.delimiter if self.column_names is not None: UpperCAmelCase_= self.column_names @property def _SCREAMING_SNAKE_CASE ( self : str ) -> Tuple: UpperCAmelCase_= { """sep""": self.sep, """header""": self.header, """names""": self.names, """index_col""": self.index_col, """usecols""": self.usecols, """prefix""": self.prefix, """mangle_dupe_cols""": self.mangle_dupe_cols, """engine""": self.engine, """converters""": self.converters, """true_values""": self.true_values, """false_values""": self.false_values, """skipinitialspace""": self.skipinitialspace, """skiprows""": self.skiprows, """nrows""": self.nrows, """na_values""": self.na_values, """keep_default_na""": self.keep_default_na, """na_filter""": self.na_filter, """verbose""": self.verbose, """skip_blank_lines""": self.skip_blank_lines, """thousands""": self.thousands, """decimal""": self.decimal, """lineterminator""": self.lineterminator, """quotechar""": self.quotechar, """quoting""": self.quoting, """escapechar""": self.escapechar, """comment""": self.comment, """encoding""": self.encoding, """dialect""": self.dialect, """error_bad_lines""": self.error_bad_lines, """warn_bad_lines""": self.warn_bad_lines, """skipfooter""": self.skipfooter, """doublequote""": self.doublequote, """memory_map""": self.memory_map, """float_precision""": self.float_precision, """chunksize""": self.chunksize, """encoding_errors""": self.encoding_errors, """on_bad_lines""": self.on_bad_lines, """date_format""": self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , __UpperCAmelCase ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class lowercase ( datasets.ArrowBasedBuilder): """simple docstring""" a__ : int = CsvConfig def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any: return datasets.DatasetInfo(features=self.config.features ) def _SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : Dict ) -> Optional[int]: if not self.config.data_files: raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) UpperCAmelCase_= dl_manager.download_and_extract(self.config.data_files ) if isinstance(__UpperCAmelCase , (str, list, tuple) ): UpperCAmelCase_= data_files if isinstance(__UpperCAmelCase , __UpperCAmelCase ): UpperCAmelCase_= [files] UpperCAmelCase_= [dl_manager.iter_files(__UpperCAmelCase ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )] UpperCAmelCase_= [] for split_name, files in data_files.items(): if isinstance(__UpperCAmelCase , __UpperCAmelCase ): UpperCAmelCase_= [files] UpperCAmelCase_= [dl_manager.iter_files(__UpperCAmelCase ) for file in files] splits.append(datasets.SplitGenerator(name=__UpperCAmelCase , gen_kwargs={"""files""": files} ) ) return splits def _SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : pa.Table ) -> pa.Table: if self.config.features is not None: UpperCAmelCase_= self.config.features.arrow_schema if all(not require_storage_cast(__UpperCAmelCase ) for feature in self.config.features.values() ): # cheaper cast UpperCAmelCase_= pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=__UpperCAmelCase ) else: # more expensive cast; allows str <-> int/float or str to Audio for example UpperCAmelCase_= table_cast(__UpperCAmelCase , __UpperCAmelCase ) return pa_table def _SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : List[Any] ) -> List[str]: UpperCAmelCase_= self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str UpperCAmelCase_= ( { name: dtype.to_pandas_dtype() if not require_storage_cast(__UpperCAmelCase ) else object for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(__UpperCAmelCase ) ): UpperCAmelCase_= pd.read_csv(__UpperCAmelCase , iterator=__UpperCAmelCase , dtype=__UpperCAmelCase , **self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(__UpperCAmelCase ): UpperCAmelCase_= pa.Table.from_pandas(__UpperCAmelCase ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(__UpperCAmelCase ) except ValueError as e: logger.error(F"""Failed to read file '{file}' with error {type(__UpperCAmelCase )}: {e}""" ) raise
277
1
"""simple docstring""" from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar __A = TypeVar("""KEY""") __A = TypeVar("""VAL""") @dataclass(frozen=a , slots=a ) class _lowerCAmelCase ( Generic[KEY, VAL] ): """simple docstring""" __magic_name__ :KEY __magic_name__ :VAL class _lowerCAmelCase ( _Item ): """simple docstring""" def __init__( self ): '''simple docstring''' super().__init__(__UpperCAmelCase , __UpperCAmelCase ) def __bool__( self ): '''simple docstring''' return False __A = _DeletedItem() class _lowerCAmelCase ( MutableMapping[KEY, VAL] ): """simple docstring""" def __init__( self , __UpperCAmelCase = 8 , __UpperCAmelCase = 0.75 ): '''simple docstring''' lowerCAmelCase__ :List[str] = initial_block_size lowerCAmelCase__ :list[_Item | None] = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 lowerCAmelCase__ :Tuple = capacity_factor lowerCAmelCase__ :str = 0 def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' return hash(__UpperCAmelCase ) % len(self._buckets ) def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' return (ind + 1) % len(self._buckets ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Any = self._buckets[ind] if not stored: lowerCAmelCase__ :Dict = _Item(__UpperCAmelCase , __UpperCAmelCase ) self._len += 1 return True elif stored.key == key: lowerCAmelCase__ :Optional[Any] = _Item(__UpperCAmelCase , __UpperCAmelCase ) return True else: return False def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :int = len(self._buckets ) * self._capacity_factor return len(self ) >= int(__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' if len(self._buckets ) <= self._initial_block_size: return False lowerCAmelCase__ :Optional[Any] = len(self._buckets ) * self._capacity_factor / 2 return len(self ) < limit def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = self._buckets lowerCAmelCase__ :Tuple = [None] * new_size lowerCAmelCase__ :List[Any] = 0 for item in old_buckets: if item: self._add_item(item.key , item.val ) def snake_case ( self ): '''simple docstring''' self._resize(len(self._buckets ) * 2 ) def snake_case ( self ): '''simple docstring''' self._resize(len(self._buckets ) // 2 ) def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Optional[Any] = self._get_bucket_index(__UpperCAmelCase ) for _ in range(len(self._buckets ) ): yield ind lowerCAmelCase__ :Tuple = self._get_next_ind(__UpperCAmelCase ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' for ind in self._iterate_buckets(__UpperCAmelCase ): if self._try_set(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): break def __setitem__( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' if self._is_full(): self._size_up() self._add_item(__UpperCAmelCase , __UpperCAmelCase ) def __delitem__( self , __UpperCAmelCase ): '''simple docstring''' for ind in self._iterate_buckets(__UpperCAmelCase ): lowerCAmelCase__ :int = self._buckets[ind] if item is None: raise KeyError(__UpperCAmelCase ) if item is _deleted: continue if item.key == key: lowerCAmelCase__ :List[str] = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self , __UpperCAmelCase ): '''simple docstring''' for ind in self._iterate_buckets(__UpperCAmelCase ): lowerCAmelCase__ :str = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(__UpperCAmelCase ) def __len__( self ): '''simple docstring''' return self._len def __iter__( self ): '''simple docstring''' yield from (item.key for item in self._buckets if item) def __repr__( self ): '''simple docstring''' lowerCAmelCase__ :Tuple = ' ,'.join( F"{item.key}: {item.val}" for item in self._buckets if item ) return F"HashMap({val_string})"
293
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __A = { """configuration_data2vec_audio""": ["""DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecAudioConfig"""], """configuration_data2vec_text""": [ """DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecTextConfig""", """Data2VecTextOnnxConfig""", ], """configuration_data2vec_vision""": [ """DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecVisionConfig""", """Data2VecVisionOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ """DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST""", """Data2VecAudioForAudioFrameClassification""", """Data2VecAudioForCTC""", """Data2VecAudioForSequenceClassification""", """Data2VecAudioForXVector""", """Data2VecAudioModel""", """Data2VecAudioPreTrainedModel""", ] __A = [ """DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""", """Data2VecTextForCausalLM""", """Data2VecTextForMaskedLM""", """Data2VecTextForMultipleChoice""", """Data2VecTextForQuestionAnswering""", """Data2VecTextForSequenceClassification""", """Data2VecTextForTokenClassification""", """Data2VecTextModel""", """Data2VecTextPreTrainedModel""", ] __A = [ """DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST""", """Data2VecVisionForImageClassification""", """Data2VecVisionForMaskedImageModeling""", """Data2VecVisionForSemanticSegmentation""", """Data2VecVisionModel""", """Data2VecVisionPreTrainedModel""", ] if is_tf_available(): __A = [ """TFData2VecVisionForImageClassification""", """TFData2VecVisionForSemanticSegmentation""", """TFData2VecVisionModel""", """TFData2VecVisionPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig from .configuration_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecTextConfig, DataaVecTextOnnxConfig, ) from .configuration_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecVisionConfig, DataaVecVisionOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dataavec_audio import ( DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecAudioForAudioFrameClassification, DataaVecAudioForCTC, DataaVecAudioForSequenceClassification, DataaVecAudioForXVector, DataaVecAudioModel, DataaVecAudioPreTrainedModel, ) from .modeling_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecTextForCausalLM, DataaVecTextForMaskedLM, DataaVecTextForMultipleChoice, DataaVecTextForQuestionAnswering, DataaVecTextForSequenceClassification, DataaVecTextForTokenClassification, DataaVecTextModel, DataaVecTextPreTrainedModel, ) from .modeling_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecVisionForImageClassification, DataaVecVisionForMaskedImageModeling, DataaVecVisionForSemanticSegmentation, DataaVecVisionModel, DataaVecVisionPreTrainedModel, ) if is_tf_available(): from .modeling_tf_dataavec_vision import ( TFDataaVecVisionForImageClassification, TFDataaVecVisionForSemanticSegmentation, TFDataaVecVisionModel, TFDataaVecVisionPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
293
1
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( BertTokenizer, ViltConfig, ViltForImageAndTextRetrieval, ViltForImagesAndTextClassification, ViltForMaskedLM, ViltForQuestionAnswering, ViltImageProcessor, ViltProcessor, ) from transformers.utils import logging logging.set_verbosity_info() __UpperCamelCase = logging.get_logger(__name__) def lowercase (SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple=False , SCREAMING_SNAKE_CASE_ : List[str]=False , SCREAMING_SNAKE_CASE_ : int=False ) -> Tuple: SCREAMING_SNAKE_CASE = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'transformer.blocks.{i}.norm1.weight', F'vilt.encoder.layer.{i}.layernorm_before.weight') ) rename_keys.append((F'transformer.blocks.{i}.norm1.bias', F'vilt.encoder.layer.{i}.layernorm_before.bias') ) rename_keys.append( (F'transformer.blocks.{i}.attn.proj.weight', F'vilt.encoder.layer.{i}.attention.output.dense.weight') ) rename_keys.append( (F'transformer.blocks.{i}.attn.proj.bias', F'vilt.encoder.layer.{i}.attention.output.dense.bias') ) rename_keys.append((F'transformer.blocks.{i}.norm2.weight', F'vilt.encoder.layer.{i}.layernorm_after.weight') ) rename_keys.append((F'transformer.blocks.{i}.norm2.bias', F'vilt.encoder.layer.{i}.layernorm_after.bias') ) rename_keys.append( (F'transformer.blocks.{i}.mlp.fc1.weight', F'vilt.encoder.layer.{i}.intermediate.dense.weight') ) rename_keys.append((F'transformer.blocks.{i}.mlp.fc1.bias', F'vilt.encoder.layer.{i}.intermediate.dense.bias') ) rename_keys.append((F'transformer.blocks.{i}.mlp.fc2.weight', F'vilt.encoder.layer.{i}.output.dense.weight') ) rename_keys.append((F'transformer.blocks.{i}.mlp.fc2.bias', F'vilt.encoder.layer.{i}.output.dense.bias') ) # embeddings rename_keys.extend( [ # text embeddings ('text_embeddings.word_embeddings.weight', 'vilt.embeddings.text_embeddings.word_embeddings.weight'), ( 'text_embeddings.position_embeddings.weight', 'vilt.embeddings.text_embeddings.position_embeddings.weight', ), ('text_embeddings.position_ids', 'vilt.embeddings.text_embeddings.position_ids'), ( 'text_embeddings.token_type_embeddings.weight', 'vilt.embeddings.text_embeddings.token_type_embeddings.weight', ), ('text_embeddings.LayerNorm.weight', 'vilt.embeddings.text_embeddings.LayerNorm.weight'), ('text_embeddings.LayerNorm.bias', 'vilt.embeddings.text_embeddings.LayerNorm.bias'), # patch embeddings ('transformer.cls_token', 'vilt.embeddings.cls_token'), ('transformer.patch_embed.proj.weight', 'vilt.embeddings.patch_embeddings.projection.weight'), ('transformer.patch_embed.proj.bias', 'vilt.embeddings.patch_embeddings.projection.bias'), ('transformer.pos_embed', 'vilt.embeddings.position_embeddings'), # token type embeddings ('token_type_embeddings.weight', 'vilt.embeddings.token_type_embeddings.weight'), ] ) # final layernorm + pooler rename_keys.extend( [ ('transformer.norm.weight', 'vilt.layernorm.weight'), ('transformer.norm.bias', 'vilt.layernorm.bias'), ('pooler.dense.weight', 'vilt.pooler.dense.weight'), ('pooler.dense.bias', 'vilt.pooler.dense.bias'), ] ) # classifier head(s) if vqa_model: # classification head rename_keys.extend( [ ('vqa_classifier.0.weight', 'classifier.0.weight'), ('vqa_classifier.0.bias', 'classifier.0.bias'), ('vqa_classifier.1.weight', 'classifier.1.weight'), ('vqa_classifier.1.bias', 'classifier.1.bias'), ('vqa_classifier.3.weight', 'classifier.3.weight'), ('vqa_classifier.3.bias', 'classifier.3.bias'), ] ) elif nlvr_model: # classification head rename_keys.extend( [ ('nlvr2_classifier.0.weight', 'classifier.0.weight'), ('nlvr2_classifier.0.bias', 'classifier.0.bias'), ('nlvr2_classifier.1.weight', 'classifier.1.weight'), ('nlvr2_classifier.1.bias', 'classifier.1.bias'), ('nlvr2_classifier.3.weight', 'classifier.3.weight'), ('nlvr2_classifier.3.bias', 'classifier.3.bias'), ] ) else: pass return rename_keys def lowercase (SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int ) -> Optional[int]: for i in range(config.num_hidden_layers ): SCREAMING_SNAKE_CASE = 'vilt.' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) SCREAMING_SNAKE_CASE = state_dict.pop(F'transformer.blocks.{i}.attn.qkv.weight' ) SCREAMING_SNAKE_CASE = state_dict.pop(F'transformer.blocks.{i}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE = in_proj_weight[ : config.hidden_size, : ] SCREAMING_SNAKE_CASE = in_proj_bias[: config.hidden_size] SCREAMING_SNAKE_CASE = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] SCREAMING_SNAKE_CASE = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] SCREAMING_SNAKE_CASE = in_proj_weight[ -config.hidden_size :, : ] SCREAMING_SNAKE_CASE = in_proj_bias[-config.hidden_size :] def lowercase (SCREAMING_SNAKE_CASE_ : List[str] ) -> Optional[Any]: SCREAMING_SNAKE_CASE = ['head.weight', 'head.bias'] for k in ignore_keys: state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def lowercase (SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict ) -> Union[str, Any]: SCREAMING_SNAKE_CASE = dct.pop(SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE = val @torch.no_grad() def lowercase (SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[int]: SCREAMING_SNAKE_CASE = ViltConfig(image_size=3_84 , patch_size=32 , tie_word_embeddings=SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False if "vqa" in checkpoint_url: SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = 31_29 SCREAMING_SNAKE_CASE = 'huggingface/label-files' SCREAMING_SNAKE_CASE = 'vqa2-id2label.json' SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='dataset' ) , 'r' ) ) SCREAMING_SNAKE_CASE = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE = idalabel SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()} SCREAMING_SNAKE_CASE = ViltForQuestionAnswering(SCREAMING_SNAKE_CASE_ ) elif "nlvr" in checkpoint_url: SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = 2 SCREAMING_SNAKE_CASE = {0: 'False', 1: 'True'} SCREAMING_SNAKE_CASE = {v: k for k, v in config.idalabel.items()} SCREAMING_SNAKE_CASE = 3 SCREAMING_SNAKE_CASE = ViltForImagesAndTextClassification(SCREAMING_SNAKE_CASE_ ) elif "irtr" in checkpoint_url: SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = ViltForImageAndTextRetrieval(SCREAMING_SNAKE_CASE_ ) elif "mlm_itm" in checkpoint_url: SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = ViltForMaskedLM(SCREAMING_SNAKE_CASE_ ) else: raise ValueError('Unknown model type' ) # load state_dict of original model, remove and rename some keys SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location='cpu' )['state_dict'] SCREAMING_SNAKE_CASE = create_rename_keys(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for src, dest in rename_keys: rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) read_in_q_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if mlm_model or irtr_model: SCREAMING_SNAKE_CASE = ['itm_score.fc.weight', 'itm_score.fc.bias'] for k in ignore_keys: state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # load state dict into HuggingFace model model.eval() if mlm_model: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ ) assert missing_keys == ["mlm_score.decoder.bias"] else: model.load_state_dict(SCREAMING_SNAKE_CASE_ ) # Define processor SCREAMING_SNAKE_CASE = ViltImageProcessor(size=3_84 ) SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained('bert-base-uncased' ) SCREAMING_SNAKE_CASE = ViltProcessor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Forward pass on example inputs (image + text) if nlvr_model: SCREAMING_SNAKE_CASE = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=SCREAMING_SNAKE_CASE_ ).raw ) SCREAMING_SNAKE_CASE = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=SCREAMING_SNAKE_CASE_ ).raw ) SCREAMING_SNAKE_CASE = ( 'The left image contains twice the number of dogs as the right image, and at least two dogs in total are' ' standing.' ) SCREAMING_SNAKE_CASE = processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_tensors='pt' ) SCREAMING_SNAKE_CASE = processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_tensors='pt' ) SCREAMING_SNAKE_CASE = model( input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , ) else: SCREAMING_SNAKE_CASE = Image.open(requests.get('http://images.cocodataset.org/val2017/000000039769.jpg' , stream=SCREAMING_SNAKE_CASE_ ).raw ) if mlm_model: SCREAMING_SNAKE_CASE = 'a bunch of [MASK] laying on a [MASK].' else: SCREAMING_SNAKE_CASE = 'How many cats are there?' SCREAMING_SNAKE_CASE = processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_tensors='pt' ) SCREAMING_SNAKE_CASE = model(**SCREAMING_SNAKE_CASE_ ) # Verify outputs if mlm_model: SCREAMING_SNAKE_CASE = torch.Size([1, 11, 3_05_22] ) SCREAMING_SNAKE_CASE = torch.tensor([-12.50_61, -12.51_23, -12.51_74] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, 0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) # verify masked token prediction equals "cats" SCREAMING_SNAKE_CASE = outputs.logits[0, 4, :].argmax(-1 ).item() assert tokenizer.decode([predicted_id] ) == "cats" elif vqa_model: SCREAMING_SNAKE_CASE = torch.Size([1, 31_29] ) SCREAMING_SNAKE_CASE = torch.tensor([-15.94_95, -18.14_72, -10.30_41] ) assert torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, 0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) # verify vqa prediction equals "2" SCREAMING_SNAKE_CASE = outputs.logits.argmax(-1 ).item() assert model.config.idalabel[predicted_idx] == "2" elif nlvr_model: SCREAMING_SNAKE_CASE = torch.Size([1, 2] ) SCREAMING_SNAKE_CASE = torch.tensor([-2.87_21, 2.12_91] ) assert torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) assert outputs.logits.shape == expected_shape Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ ) print(F'Saving model and processor to {pytorch_dump_folder_path}' ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) processor.save_pretrained(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''', type=str, help='''URL of the checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) __UpperCamelCase = parser.parse_args() convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
38
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { '''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''', } class lowerCAmelCase ( lowerCamelCase_ , lowerCamelCase_ ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = """resnet""" SCREAMING_SNAKE_CASE_ : Tuple = ["""basic""", """bottleneck"""] def __init__( self , lowerCAmelCase__=3 , lowerCAmelCase__=64 , lowerCAmelCase__=[256, 512, 1_024, 2_048] , lowerCAmelCase__=[3, 4, 6, 3] , lowerCAmelCase__="bottleneck" , lowerCAmelCase__="relu" , lowerCAmelCase__=False , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> Union[str, Any]: super().__init__(**lowerCAmelCase__ ) if layer_type not in self.layer_types: raise ValueError(F'layer_type={layer_type} is not one of {",".join(self.layer_types )}' ) SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = embedding_size SCREAMING_SNAKE_CASE = hidden_sizes SCREAMING_SNAKE_CASE = depths SCREAMING_SNAKE_CASE = layer_type SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = downsample_in_first_stage SCREAMING_SNAKE_CASE = ['stem'] + [F'stage{idx}' for idx in range(1 , len(lowerCAmelCase__ ) + 1 )] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_aligned_output_features_output_indices( out_features=lowerCAmelCase__ , out_indices=lowerCAmelCase__ , stage_names=self.stage_names ) class lowerCAmelCase ( lowerCamelCase_ ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = version.parse("""1.11""" ) @property def __A ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def __A ( self ) -> float: return 1e-3
38
1
import numpy as np import skfuzzy as fuzz if __name__ == "__main__": # Create universe of discourse in Python using linspace () _UpperCAmelCase : Tuple =np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False) # Create two fuzzy sets by defining any membership function # (trapmf(), gbellmf(), gaussmf(), etc). _UpperCAmelCase : int =[0, 25, 50] _UpperCAmelCase : List[Any] =[25, 50, 75] _UpperCAmelCase : Tuple =fuzz.membership.trimf(X, abca) _UpperCAmelCase : List[Any] =fuzz.membership.trimf(X, abca) # Compute the different operations using inbuilt functions. _UpperCAmelCase : Tuple =np.ones(75) _UpperCAmelCase : str =np.zeros((75,)) # 1. Union = max(µA(x), µB(x)) _UpperCAmelCase : Any =fuzz.fuzzy_or(X, young, X, middle_aged)[1] # 2. Intersection = min(µA(x), µB(x)) _UpperCAmelCase : int =fuzz.fuzzy_and(X, young, X, middle_aged)[1] # 3. Complement (A) = (1- min(µA(x)) _UpperCAmelCase : int =fuzz.fuzzy_not(young) # 4. Difference (A/B) = min(µA(x),(1- µB(x))) _UpperCAmelCase : Union[str, Any] =fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1] # 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))] _UpperCAmelCase : List[str] =young + middle_aged - (young * middle_aged) # 6. Algebraic Product = (µA(x) * µB(x)) _UpperCAmelCase : str =young * middle_aged # 7. Bounded Sum = min[1,(µA(x), µB(x))] _UpperCAmelCase : Dict =fuzz.fuzzy_and(X, one, X, young + middle_aged)[1] # 8. Bounded difference = min[0,(µA(x), µB(x))] _UpperCAmelCase : Tuple =fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1] # max-min composition # max-product composition # Plot each set A, set B and each operation result using plot() and subplot(). from matplotlib import pyplot as plt plt.figure() plt.subplot(4, 3, 1) plt.plot(X, young) plt.title("""Young""") plt.grid(True) plt.subplot(4, 3, 2) plt.plot(X, middle_aged) plt.title("""Middle aged""") plt.grid(True) plt.subplot(4, 3, 3) plt.plot(X, union) plt.title("""union""") plt.grid(True) plt.subplot(4, 3, 4) plt.plot(X, intersection) plt.title("""intersection""") plt.grid(True) plt.subplot(4, 3, 5) plt.plot(X, complement_a) plt.title("""complement_a""") plt.grid(True) plt.subplot(4, 3, 6) plt.plot(X, difference) plt.title("""difference a/b""") plt.grid(True) plt.subplot(4, 3, 7) plt.plot(X, alg_sum) plt.title("""alg_sum""") plt.grid(True) plt.subplot(4, 3, 8) plt.plot(X, alg_product) plt.title("""alg_product""") plt.grid(True) plt.subplot(4, 3, 9) plt.plot(X, bdd_sum) plt.title("""bdd_sum""") plt.grid(True) plt.subplot(4, 3, 10) plt.plot(X, bdd_difference) plt.title("""bdd_difference""") plt.grid(True) plt.subplots_adjust(hspace=0.5) plt.show()
262
"""simple docstring""" def __lowerCamelCase ( a_ : Union[str, Any] ) -> Optional[int]: __SCREAMING_SNAKE_CASE :List[str] = 1 __SCREAMING_SNAKE_CASE :Dict = 2 while i * i <= n: __SCREAMING_SNAKE_CASE :Tuple = 0 while n % i == 0: n //= i multiplicity += 1 n_divisors *= multiplicity + 1 i += 1 if n > 1: n_divisors *= 2 return n_divisors def __lowerCamelCase ( ) -> int: __SCREAMING_SNAKE_CASE :Dict = 1 __SCREAMING_SNAKE_CASE :Dict = 1 while True: i += 1 t_num += i if count_divisors(a_ ) > 5_00: break return t_num if __name__ == "__main__": print(solution())
191
0
class __lowerCAmelCase : def __init__( self: Optional[Any] , _lowerCAmelCase: List[Any] ): lowercase :List[str] = size lowercase :Union[str, Any] = [0] * size lowercase :List[str] = [0] * size @staticmethod def SCREAMING_SNAKE_CASE ( _lowerCAmelCase: Union[str, Any] ): return index | (index + 1) @staticmethod def SCREAMING_SNAKE_CASE ( _lowerCAmelCase: Optional[Any] ): return (index & (index + 1)) - 1 def SCREAMING_SNAKE_CASE ( self: Optional[int] , _lowerCAmelCase: List[str] , _lowerCAmelCase: int ): lowercase :List[Any] = value while index < self.size: lowercase :List[Any] = self.get_prev(lowerCamelCase_ ) + 1 if current_left_border == index: lowercase :Tuple = value else: lowercase :str = max(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) lowercase :List[str] = self.get_next(lowerCamelCase_ ) def SCREAMING_SNAKE_CASE ( self: Any , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: Tuple ): right -= 1 # Because of right is exclusive lowercase :List[str] = 0 while left <= right: lowercase :Tuple = self.get_prev(lowerCamelCase_ ) if left <= current_left: lowercase :Union[str, Any] = max(lowerCamelCase_ , self.tree[right] ) lowercase :str = current_left else: lowercase :int = max(lowerCamelCase_ , self.arr[right] ) right -= 1 return result if __name__ == "__main__": import doctest doctest.testmod()
361
import logging import os import threading import time try: import warnings except ImportError: _UpperCAmelCase : List[str] = None try: import msvcrt except ImportError: _UpperCAmelCase : Tuple = None try: import fcntl except ImportError: _UpperCAmelCase : Optional[Any] = None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: _UpperCAmelCase : Tuple = OSError # Data # ------------------------------------------------ _UpperCAmelCase : Optional[int] = [ "Timeout", "BaseFileLock", "WindowsFileLock", "UnixFileLock", "SoftFileLock", "FileLock", ] _UpperCAmelCase : Optional[Any] = "3.0.12" _UpperCAmelCase : int = None def UpperCAmelCase__ ( ): global _logger lowercase :List[str] = _logger or logging.getLogger(__name__ ) return _logger class __lowerCAmelCase ( lowerCAmelCase): def __init__( self: int , _lowerCAmelCase: Dict ): lowercase :Any = lock_file return None def __str__( self: Dict ): lowercase :str = F"The file lock '{self.lock_file}' could not be acquired." return temp class __lowerCAmelCase : def __init__( self: Tuple , _lowerCAmelCase: Any ): lowercase :Optional[Any] = lock return None def __enter__( self: List[Any] ): return self.lock def __exit__( self: Dict , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: Optional[int] ): self.lock.release() return None class __lowerCAmelCase : def __init__( self: Optional[Any] , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: Tuple=-1 , _lowerCAmelCase: int=None ): lowercase :Any = max_filename_length if max_filename_length is not None else 2_55 # Hash the filename if it's too long lowercase :int = self.hash_filename_if_too_long(_lowerCAmelCase , _lowerCAmelCase ) # The path to the lock file. lowercase :List[Any] = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. lowercase :Any = None # The default timeout value. lowercase :Any = timeout # We use this lock primarily for the lock counter. lowercase :Optional[int] = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. lowercase :Optional[int] = 0 return None @property def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ): return self._lock_file @property def SCREAMING_SNAKE_CASE ( self: Optional[Any] ): return self._timeout @timeout.setter def SCREAMING_SNAKE_CASE ( self: Tuple , _lowerCAmelCase: List[str] ): lowercase :Tuple = float(_lowerCAmelCase ) return None def SCREAMING_SNAKE_CASE ( self: int ): raise NotImplementedError() def SCREAMING_SNAKE_CASE ( self: int ): raise NotImplementedError() @property def SCREAMING_SNAKE_CASE ( self: Optional[Any] ): return self._lock_file_fd is not None def SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _lowerCAmelCase: List[Any]=None , _lowerCAmelCase: Union[str, Any]=0.05 ): # Use the default timeout, if no timeout is provided. if timeout is None: lowercase :List[str] = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 lowercase :Any = id(self ) lowercase :Optional[int] = self._lock_file lowercase :Optional[Any] = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(F"Attempting to acquire lock {lock_id} on {lock_filename}" ) self._acquire() if self.is_locked: logger().debug(F"Lock {lock_id} acquired on {lock_filename}" ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(F"Timeout on acquiring lock {lock_id} on {lock_filename}" ) raise Timeout(self._lock_file ) else: logger().debug( F"Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ..." ) time.sleep(_lowerCAmelCase ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: lowercase :Union[str, Any] = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def SCREAMING_SNAKE_CASE ( self: Tuple , _lowerCAmelCase: Tuple=False ): with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: lowercase :Union[str, Any] = id(self ) lowercase :str = self._lock_file logger().debug(F"Attempting to release lock {lock_id} on {lock_filename}" ) self._release() lowercase :List[str] = 0 logger().debug(F"Lock {lock_id} released on {lock_filename}" ) return None def __enter__( self: Tuple ): self.acquire() return self def __exit__( self: Union[str, Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: List[str] , _lowerCAmelCase: Dict ): self.release() return None def __del__( self: Optional[Any] ): self.release(force=_lowerCAmelCase ) return None def SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _lowerCAmelCase: str , _lowerCAmelCase: int ): lowercase :Union[str, Any] = os.path.basename(_lowerCAmelCase ) if len(_lowerCAmelCase ) > max_length and max_length > 0: lowercase :Dict = os.path.dirname(_lowerCAmelCase ) lowercase :Any = str(hash(_lowerCAmelCase ) ) lowercase :Union[str, Any] = filename[: max_length - len(_lowerCAmelCase ) - 8] + "..." + hashed_filename + ".lock" return os.path.join(_lowerCAmelCase , _lowerCAmelCase ) else: return path class __lowerCAmelCase ( lowerCAmelCase): def __init__( self: int , _lowerCAmelCase: int , _lowerCAmelCase: Optional[Any]=-1 , _lowerCAmelCase: List[Any]=None ): from .file_utils import relative_to_absolute_path super().__init__(_lowerCAmelCase , timeout=_lowerCAmelCase , max_filename_length=_lowerCAmelCase ) lowercase :Optional[int] = "\\\\?\\" + relative_to_absolute_path(self.lock_file ) def SCREAMING_SNAKE_CASE ( self: Any ): lowercase :int = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: lowercase :Tuple = os.open(self._lock_file , _lowerCAmelCase ) except OSError: pass else: try: msvcrt.locking(_lowerCAmelCase , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(_lowerCAmelCase ) else: lowercase :Any = fd return None def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ): lowercase :Any = self._lock_file_fd lowercase :Tuple = None msvcrt.locking(_lowerCAmelCase , msvcrt.LK_UNLCK , 1 ) os.close(_lowerCAmelCase ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class __lowerCAmelCase ( lowerCAmelCase): def __init__( self: str , _lowerCAmelCase: Tuple , _lowerCAmelCase: Dict=-1 , _lowerCAmelCase: Tuple=None ): lowercase :List[str] = os.statvfs(os.path.dirname(_lowerCAmelCase ) ).f_namemax super().__init__(_lowerCAmelCase , timeout=_lowerCAmelCase , max_filename_length=_lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self: int ): lowercase :Any = os.O_RDWR | os.O_CREAT | os.O_TRUNC lowercase :Optional[int] = os.open(self._lock_file , _lowerCAmelCase ) try: fcntl.flock(_lowerCAmelCase , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(_lowerCAmelCase ) else: lowercase :Optional[Any] = fd return None def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ): # Do not remove the lockfile: # # https://github.com/benediktschmitt/py-filelock/issues/31 # https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition lowercase :Dict = self._lock_file_fd lowercase :Union[str, Any] = None fcntl.flock(_lowerCAmelCase , fcntl.LOCK_UN ) os.close(_lowerCAmelCase ) return None class __lowerCAmelCase ( lowerCAmelCase): def SCREAMING_SNAKE_CASE ( self: List[Any] ): lowercase :str = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: lowercase :List[Any] = os.open(self._lock_file , _lowerCAmelCase ) except OSError: pass else: lowercase :int = fd return None def SCREAMING_SNAKE_CASE ( self: Optional[Any] ): os.close(self._lock_file_fd ) lowercase :int = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None _UpperCAmelCase : Tuple = None if msvcrt: _UpperCAmelCase : str = WindowsFileLock elif fcntl: _UpperCAmelCase : List[Any] = UnixFileLock else: _UpperCAmelCase : Optional[int] = SoftFileLock if warnings is not None: warnings.warn("only soft file lock is available")
158
0
'''simple docstring''' __lowerCamelCase = {'''a''': ['''c''', '''b'''], '''b''': ['''d''', '''e'''], '''c''': [], '''d''': [], '''e''': []} __lowerCamelCase = ['''a''', '''b''', '''c''', '''d''', '''e'''] def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Any: A_ = start # add current to visited visited.append(UpperCAmelCase__ ) A_ = edges[current] for neighbor in neighbors: # if neighbor not in visited, visit if neighbor not in visited: A_ = topological_sort(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) # if all neighbors visited add current to sort sort.append(UpperCAmelCase__ ) # if all vertices haven't been visited select a new one to visit if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ): for vertice in vertices: if vertice not in visited: A_ = topological_sort(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) # return sort return sort if __name__ == "__main__": __lowerCamelCase = topological_sort('''a''', [], []) print(sort)
162
'''simple docstring''' import argparse import datetime import json import time import warnings from logging import getLogger from pathlib import Path from typing import Dict, List import torch from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params __lowerCamelCase = getLogger(__name__) __lowerCamelCase = '''cuda''' if torch.cuda.is_available() else '''cpu''' def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ = 8, UpperCAmelCase__ = DEFAULT_DEVICE, UpperCAmelCase__=False, UpperCAmelCase__="summarization", UpperCAmelCase__=None, **UpperCAmelCase__, ) -> Dict: A_ = Path(UpperCAmelCase__ ).open("""w""", encoding="""utf-8""" ) A_ = str(UpperCAmelCase__ ) A_ = AutoModelForSeqaSeqLM.from_pretrained(UpperCAmelCase__ ).to(UpperCAmelCase__ ) if fpaa: A_ = model.half() A_ = AutoTokenizer.from_pretrained(UpperCAmelCase__ ) logger.info(F'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type. A_ = time.time() # update config with task specific params use_task_specific_params(UpperCAmelCase__, UpperCAmelCase__ ) if prefix is None: A_ = prefix or getattr(model.config, """prefix""", """""" ) or """""" for examples_chunk in tqdm(list(chunks(UpperCAmelCase__, UpperCAmelCase__ ) ) ): A_ = [prefix + text for text in examples_chunk] A_ = tokenizer(UpperCAmelCase__, return_tensors="""pt""", truncation=UpperCAmelCase__, padding="""longest""" ).to(UpperCAmelCase__ ) A_ = model.generate( input_ids=batch.input_ids, attention_mask=batch.attention_mask, **UpperCAmelCase__, ) A_ = tokenizer.batch_decode(UpperCAmelCase__, skip_special_tokens=UpperCAmelCase__, clean_up_tokenization_spaces=UpperCAmelCase__ ) for hypothesis in dec: fout.write(hypothesis + """\n""" ) fout.flush() fout.close() A_ = int(time.time() - start_time ) # seconds A_ = len(UpperCAmelCase__ ) return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs, 4 )} def UpperCAmelCase__ ( ) -> Optional[int]: return datetime.datetime.now().strftime("""%Y-%m-%d %H:%M:%S""" ) def UpperCAmelCase__ ( UpperCAmelCase__=True ) -> Any: A_ = argparse.ArgumentParser() parser.add_argument("""model_name""", type=UpperCAmelCase__, help="""like facebook/bart-large-cnn,t5-base, etc.""" ) parser.add_argument("""input_path""", type=UpperCAmelCase__, help="""like cnn_dm/test.source""" ) parser.add_argument("""save_path""", type=UpperCAmelCase__, help="""where to save summaries""" ) parser.add_argument("""--reference_path""", type=UpperCAmelCase__, required=UpperCAmelCase__, help="""like cnn_dm/test.target""" ) parser.add_argument("""--score_path""", type=UpperCAmelCase__, required=UpperCAmelCase__, default="""metrics.json""", help="""where to save metrics""" ) parser.add_argument("""--device""", type=UpperCAmelCase__, required=UpperCAmelCase__, default=UpperCAmelCase__, help="""cuda, cuda:1, cpu etc.""" ) parser.add_argument( """--prefix""", type=UpperCAmelCase__, required=UpperCAmelCase__, default=UpperCAmelCase__, help="""will be added to the begininng of src examples""" ) parser.add_argument("""--task""", type=UpperCAmelCase__, default="""summarization""", help="""used for task_specific_params + metrics""" ) parser.add_argument("""--bs""", type=UpperCAmelCase__, default=8, required=UpperCAmelCase__, help="""batch size""" ) parser.add_argument( """--n_obs""", type=UpperCAmelCase__, default=-1, required=UpperCAmelCase__, help="""How many observations. Defaults to all.""" ) parser.add_argument("""--fp16""", action="""store_true""" ) parser.add_argument("""--dump-args""", action="""store_true""", help="""print the custom hparams with the results""" ) parser.add_argument( """--info""", nargs="""?""", type=UpperCAmelCase__, const=datetime_now(), help=( """use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g.""" """ lang=en-ru. If no value is passed, the current datetime string will be used.""" ), ) # Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate A_ , A_ = parser.parse_known_args() A_ = parse_numeric_n_bool_cl_kwargs(UpperCAmelCase__ ) if parsed_args and verbose: print(F'''parsed the following generate kwargs: {parsed_args}''' ) A_ = [""" """ + x.rstrip() if """t5""" in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()] if args.n_obs > 0: A_ = examples[: args.n_obs] Path(args.save_path ).parent.mkdir(exist_ok=UpperCAmelCase__ ) if args.reference_path is None and Path(args.score_path ).exists(): warnings.warn(F'''score_path {args.score_path} will be overwritten unless you type ctrl-c.''' ) if args.device == "cpu" and args.fpaa: # this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half' raise ValueError("""Can't mix --fp16 and --device cpu""" ) A_ = generate_summaries_or_translations( UpperCAmelCase__, args.save_path, args.model_name, batch_size=args.bs, device=args.device, fpaa=args.fpaa, task=args.task, prefix=args.prefix, **UpperCAmelCase__, ) if args.reference_path is None: return {} # Compute scores A_ = calculate_bleu if """translation""" in args.task else calculate_rouge A_ = [x.rstrip() for x in open(args.save_path ).readlines()] A_ = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(UpperCAmelCase__ )] A_ = score_fn(UpperCAmelCase__, UpperCAmelCase__ ) scores.update(UpperCAmelCase__ ) if args.dump_args: scores.update(UpperCAmelCase__ ) if args.info: A_ = args.info if verbose: print(UpperCAmelCase__ ) if args.score_path is not None: json.dump(UpperCAmelCase__, open(args.score_path, """w""" ) ) return scores if __name__ == "__main__": # Usage for MT: # python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@ run_generate(verbose=True)
162
1
'''simple docstring''' import argparse import io import requests import torch from omegaconf import OmegaConf from diffusers import AutoencoderKL from diffusers.pipelines.stable_diffusion.convert_from_ckpt import ( assign_to_checkpoint, conv_attn_to_linear, create_vae_diffusers_config, renew_vae_attention_paths, renew_vae_resnet_paths, ) def a ( __a , __a ) -> List[Any]: '''simple docstring''' UpperCamelCase__ :List[str] = checkpoint UpperCamelCase__ :Any = {} UpperCamelCase__ :Tuple = vae_state_dict['''encoder.conv_in.weight'''] UpperCamelCase__ :List[str] = vae_state_dict['''encoder.conv_in.bias'''] UpperCamelCase__ :Dict = vae_state_dict['''encoder.conv_out.weight'''] UpperCamelCase__ :Optional[int] = vae_state_dict['''encoder.conv_out.bias'''] UpperCamelCase__ :Dict = vae_state_dict['''encoder.norm_out.weight'''] UpperCamelCase__ :Optional[Any] = vae_state_dict['''encoder.norm_out.bias'''] UpperCamelCase__ :Union[str, Any] = vae_state_dict['''decoder.conv_in.weight'''] UpperCamelCase__ :str = vae_state_dict['''decoder.conv_in.bias'''] UpperCamelCase__ :Optional[int] = vae_state_dict['''decoder.conv_out.weight'''] UpperCamelCase__ :int = vae_state_dict['''decoder.conv_out.bias'''] UpperCamelCase__ :List[str] = vae_state_dict['''decoder.norm_out.weight'''] UpperCamelCase__ :Union[str, Any] = vae_state_dict['''decoder.norm_out.bias'''] UpperCamelCase__ :Tuple = vae_state_dict['''quant_conv.weight'''] UpperCamelCase__ :List[str] = vae_state_dict['''quant_conv.bias'''] UpperCamelCase__ :List[Any] = vae_state_dict['''post_quant_conv.weight'''] UpperCamelCase__ :Tuple = vae_state_dict['''post_quant_conv.bias'''] # Retrieves the keys for the encoder down blocks only UpperCamelCase__ :Tuple = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''encoder.down''' in layer} ) UpperCamelCase__ :Tuple = { layer_id: [key for key in vae_state_dict if f'''down.{layer_id}''' in key] for layer_id in range(__a ) } # Retrieves the keys for the decoder up blocks only UpperCamelCase__ :List[Any] = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''decoder.up''' in layer} ) UpperCamelCase__ :Tuple = { layer_id: [key for key in vae_state_dict if f'''up.{layer_id}''' in key] for layer_id in range(__a ) } for i in range(__a ): UpperCamelCase__ :Union[str, Any] = [key for key in down_blocks[i] if f'''down.{i}''' in key and f'''down.{i}.downsample''' not in key] if f'''encoder.down.{i}.downsample.conv.weight''' in vae_state_dict: UpperCamelCase__ :Tuple = vae_state_dict.pop( f'''encoder.down.{i}.downsample.conv.weight''' ) UpperCamelCase__ :List[str] = vae_state_dict.pop( f'''encoder.down.{i}.downsample.conv.bias''' ) UpperCamelCase__ :Union[str, Any] = renew_vae_resnet_paths(__a ) UpperCamelCase__ :Union[str, Any] = {'''old''': f'''down.{i}.block''', '''new''': f'''down_blocks.{i}.resnets'''} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) UpperCamelCase__ :Union[str, Any] = [key for key in vae_state_dict if '''encoder.mid.block''' in key] UpperCamelCase__ :Optional[Any] = 2 for i in range(1 , num_mid_res_blocks + 1 ): UpperCamelCase__ :int = [key for key in mid_resnets if f'''encoder.mid.block_{i}''' in key] UpperCamelCase__ :Any = renew_vae_resnet_paths(__a ) UpperCamelCase__ :Optional[int] = {'''old''': f'''mid.block_{i}''', '''new''': f'''mid_block.resnets.{i - 1}'''} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) UpperCamelCase__ :int = [key for key in vae_state_dict if '''encoder.mid.attn''' in key] UpperCamelCase__ :Union[str, Any] = renew_vae_attention_paths(__a ) UpperCamelCase__ :Any = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) conv_attn_to_linear(__a ) for i in range(__a ): UpperCamelCase__ :Union[str, Any] = num_up_blocks - 1 - i UpperCamelCase__ :int = [ key for key in up_blocks[block_id] if f'''up.{block_id}''' in key and f'''up.{block_id}.upsample''' not in key ] if f'''decoder.up.{block_id}.upsample.conv.weight''' in vae_state_dict: UpperCamelCase__ :str = vae_state_dict[ f'''decoder.up.{block_id}.upsample.conv.weight''' ] UpperCamelCase__ :Dict = vae_state_dict[ f'''decoder.up.{block_id}.upsample.conv.bias''' ] UpperCamelCase__ :Union[str, Any] = renew_vae_resnet_paths(__a ) UpperCamelCase__ :Dict = {'''old''': f'''up.{block_id}.block''', '''new''': f'''up_blocks.{i}.resnets'''} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) UpperCamelCase__ :Tuple = [key for key in vae_state_dict if '''decoder.mid.block''' in key] UpperCamelCase__ :Tuple = 2 for i in range(1 , num_mid_res_blocks + 1 ): UpperCamelCase__ :List[str] = [key for key in mid_resnets if f'''decoder.mid.block_{i}''' in key] UpperCamelCase__ :str = renew_vae_resnet_paths(__a ) UpperCamelCase__ :str = {'''old''': f'''mid.block_{i}''', '''new''': f'''mid_block.resnets.{i - 1}'''} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) UpperCamelCase__ :Optional[Any] = [key for key in vae_state_dict if '''decoder.mid.attn''' in key] UpperCamelCase__ :Optional[int] = renew_vae_attention_paths(__a ) UpperCamelCase__ :Union[str, Any] = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) conv_attn_to_linear(__a ) return new_checkpoint def a ( __a , __a , ) -> str: '''simple docstring''' UpperCamelCase__ :Optional[Any] = requests.get( ''' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml''' ) UpperCamelCase__ :Optional[Any] = io.BytesIO(r.content ) UpperCamelCase__ :Dict = OmegaConf.load(__a ) UpperCamelCase__ :List[Any] = 512 UpperCamelCase__ :List[str] = '''cuda''' if torch.cuda.is_available() else '''cpu''' if checkpoint_path.endswith('''safetensors''' ): from safetensors import safe_open UpperCamelCase__ :Union[str, Any] = {} with safe_open(__a , framework='''pt''' , device='''cpu''' ) as f: for key in f.keys(): UpperCamelCase__ :List[Any] = f.get_tensor(__a ) else: UpperCamelCase__ :Any = torch.load(__a , map_location=__a )['''state_dict'''] # Convert the VAE model. UpperCamelCase__ :Optional[Any] = create_vae_diffusers_config(__a , image_size=__a ) UpperCamelCase__ :List[str] = custom_convert_ldm_vae_checkpoint(__a , __a ) UpperCamelCase__ :Union[str, Any] = AutoencoderKL(**__a ) vae.load_state_dict(__a ) vae.save_pretrained(__a ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser() parser.add_argument('''--vae_pt_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''') parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''') __snake_case = parser.parse_args() vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
219
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_distilbert import DistilBertTokenizer __snake_case = logging.get_logger(__name__) __snake_case = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} __snake_case = { '''vocab_file''': { '''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''', '''distilbert-base-uncased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt''' ), '''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''', '''distilbert-base-cased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt''' ), '''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''', '''distilbert-base-multilingual-cased''': ( '''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''', '''distilbert-base-uncased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json''' ), '''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''', '''distilbert-base-cased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json''' ), '''distilbert-base-german-cased''': ( '''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json''' ), '''distilbert-base-multilingual-cased''': ( '''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json''' ), }, } __snake_case = { '''distilbert-base-uncased''': 512, '''distilbert-base-uncased-distilled-squad''': 512, '''distilbert-base-cased''': 512, '''distilbert-base-cased-distilled-squad''': 512, '''distilbert-base-german-cased''': 512, '''distilbert-base-multilingual-cased''': 512, } __snake_case = { '''distilbert-base-uncased''': {'''do_lower_case''': True}, '''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True}, '''distilbert-base-cased''': {'''do_lower_case''': False}, '''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False}, '''distilbert-base-german-cased''': {'''do_lower_case''': False}, '''distilbert-base-multilingual-cased''': {'''do_lower_case''': False}, } class lowercase ( A__ ): """simple docstring""" _a = VOCAB_FILES_NAMES _a = PRETRAINED_VOCAB_FILES_MAP _a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _a = PRETRAINED_INIT_CONFIGURATION _a = ['input_ids', 'attention_mask'] _a = DistilBertTokenizer def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_="[UNK]" , UpperCamelCase_="[SEP]" , UpperCamelCase_="[PAD]" , UpperCamelCase_="[CLS]" , UpperCamelCase_="[MASK]" , UpperCamelCase_=True , UpperCamelCase_=None , **UpperCamelCase_ , ): '''simple docstring''' super().__init__( UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , ) UpperCamelCase__ :int = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , UpperCamelCase_ ) != do_lower_case or normalizer_state.get('''strip_accents''' , UpperCamelCase_ ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , UpperCamelCase_ ) != tokenize_chinese_chars ): UpperCamelCase__ :int = getattr(UpperCamelCase_ , normalizer_state.pop('''type''' ) ) UpperCamelCase__ :Optional[Any] = do_lower_case UpperCamelCase__ :Optional[Any] = strip_accents UpperCamelCase__ :List[Any] = tokenize_chinese_chars UpperCamelCase__ :Any = normalizer_class(**UpperCamelCase_ ) UpperCamelCase__ :int = do_lower_case def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=None ): '''simple docstring''' UpperCamelCase__ :Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ): '''simple docstring''' UpperCamelCase__ :List[str] = [self.sep_token_id] UpperCamelCase__ :List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ): '''simple docstring''' UpperCamelCase__ :str = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ ) return tuple(UpperCamelCase_ )
219
1
from random import shuffle import tensorflow as tf from numpy import array def A ( lowercase , lowercase ) -> Union[str, Any]: '''simple docstring''' UpperCamelCase = int(lowercase ) assert noofclusters < len(lowercase ) # Find out the dimensionality UpperCamelCase = len(vectors[0] ) # Will help select random centroids from among the available vectors UpperCamelCase = list(range(len(lowercase ) ) ) shuffle(lowercase ) # GRAPH OF COMPUTATION # We initialize a new graph and set it as the default during each run # of this algorithm. This ensures that as this function is called # multiple times, the default graph doesn't keep getting crowded with # unused ops and Variables from previous function calls. UpperCamelCase = tf.Graph() with graph.as_default(): # SESSION OF COMPUTATION UpperCamelCase = tf.Session() ##CONSTRUCTING THE ELEMENTS OF COMPUTATION ##First lets ensure we have a Variable vector for each centroid, ##initialized to one of the vectors from the available data points UpperCamelCase = [ tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase ) ] ##These nodes will assign the centroid Variables the appropriate ##values UpperCamelCase = tf.placeholder('float64' , [dim] ) UpperCamelCase = [] for centroid in centroids: cent_assigns.append(tf.assign(lowercase , lowercase ) ) ##Variables for cluster assignments of individual vectors(initialized ##to 0 at first) UpperCamelCase = [tf.Variable(0 ) for i in range(len(lowercase ) )] ##These nodes will assign an assignment Variable the appropriate ##value UpperCamelCase = tf.placeholder('int32' ) UpperCamelCase = [] for assignment in assignments: cluster_assigns.append(tf.assign(lowercase , lowercase ) ) ##Now lets construct the node that will compute the mean # The placeholder for the input UpperCamelCase = tf.placeholder('float' , [None, dim] ) # The Node/op takes the input and computes a mean along the 0th # dimension, i.e. the list of input vectors UpperCamelCase = tf.reduce_mean(lowercase , 0 ) ##Node for computing Euclidean distances # Placeholders for input UpperCamelCase = tf.placeholder('float' , [dim] ) UpperCamelCase = tf.placeholder('float' , [dim] ) UpperCamelCase = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase , lowercase ) , 2 ) ) ) ##This node will figure out which cluster to assign a vector to, ##based on Euclidean distances of the vector from the centroids. # Placeholder for input UpperCamelCase = tf.placeholder('float' , [noofclusters] ) UpperCamelCase = tf.argmin(lowercase , 0 ) ##INITIALIZING STATE VARIABLES ##This will help initialization of all Variables defined with respect ##to the graph. The Variable-initializer should be defined after ##all the Variables have been constructed, so that each of them ##will be included in the initialization. UpperCamelCase = tf.initialize_all_variables() # Initialize all variables sess.run(lowercase ) ##CLUSTERING ITERATIONS # Now perform the Expectation-Maximization steps of K-Means clustering # iterations. To keep things simple, we will only do a set number of # iterations, instead of using a Stopping Criterion. UpperCamelCase = 100 for _ in range(lowercase ): ##EXPECTATION STEP ##Based on the centroid locations till last iteration, compute ##the _expected_ centroid assignments. # Iterate over each vector for vector_n in range(len(lowercase ) ): UpperCamelCase = vectors[vector_n] # Compute Euclidean distance between this vector and each # centroid. Remember that this list cannot be named #'centroid_distances', since that is the input to the # cluster assignment node. UpperCamelCase = [ sess.run(lowercase , feed_dict={va: vect, va: sess.run(lowercase )} ) for centroid in centroids ] # Now use the cluster assignment node, with the distances # as the input UpperCamelCase = sess.run( lowercase , feed_dict={centroid_distances: distances} ) # Now assign the value to the appropriate state variable sess.run( cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} ) ##MAXIMIZATION STEP # Based on the expected state computed from the Expectation Step, # compute the locations of the centroids so as to maximize the # overall objective of minimizing within-cluster Sum-of-Squares for cluster_n in range(lowercase ): # Collect all the vectors assigned to this cluster UpperCamelCase = [ vectors[i] for i in range(len(lowercase ) ) if sess.run(assignments[i] ) == cluster_n ] # Compute new centroid location UpperCamelCase = sess.run( lowercase , feed_dict={mean_input: array(lowercase )} ) # Assign value to appropriate variable sess.run( cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} ) # Return centroids and assignments UpperCamelCase = sess.run(lowercase ) UpperCamelCase = sess.run(lowercase ) return centroids, assignments
222
from unittest import TestCase from datasets import Dataset from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters def A ( ) -> Union[str, Any]: '''simple docstring''' UpperCamelCase = { 'repo_name': ['test_repo1', 'test_repo2', 'test_repo3'], 'path': ['test_1.py', 'test_2.py', 'unit_test.py'], 'content': ['a ' * 20, 'a ' * 30, 'b ' * 7], } UpperCamelCase = Dataset.from_dict(lowercase ) return dataset class lowercase ( _SCREAMING_SNAKE_CASE ): def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" UpperCamelCase = get_dataset() UpperCamelCase = make_duplicate_clusters(A_ , 0.85 ) self.assertEqual(len(duplicate_clusters[0] ) , 2 ) def __UpperCamelCase ( self ) -> Any: """simple docstring""" UpperCamelCase = get_dataset() UpperCamelCase , UpperCamelCase = deduplicate_dataset(A_ ) self.assertEqual(len(A_ ) , 2 ) print(A_ ) self.assertEqual(duplicate_clusters[0][0]['copies'] , 2 ) self.assertEqual(duplicate_clusters[0][0]['is_extreme'] , A_ )
222
1
'''simple docstring''' import unittest import numpy as np from transformers import DistilBertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.distilbert.modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, ) class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=9_9 , snake_case_=3_2 , snake_case_=5 , snake_case_=4 , snake_case_=3_7 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_1_2 , snake_case_=1_6 , snake_case_=2 , snake_case_=0.02 , snake_case_=4 , ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = parent UpperCAmelCase_ : Any = batch_size UpperCAmelCase_ : Optional[Any] = seq_length UpperCAmelCase_ : Optional[int] = is_training UpperCAmelCase_ : Optional[Any] = use_attention_mask UpperCAmelCase_ : Optional[int] = use_token_type_ids UpperCAmelCase_ : int = use_labels UpperCAmelCase_ : Union[str, Any] = vocab_size UpperCAmelCase_ : Optional[int] = hidden_size UpperCAmelCase_ : Tuple = num_hidden_layers UpperCAmelCase_ : Tuple = num_attention_heads UpperCAmelCase_ : Dict = intermediate_size UpperCAmelCase_ : Tuple = hidden_act UpperCAmelCase_ : Optional[Any] = hidden_dropout_prob UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob UpperCAmelCase_ : Dict = max_position_embeddings UpperCAmelCase_ : Optional[Any] = type_vocab_size UpperCAmelCase_ : str = type_sequence_label_size UpperCAmelCase_ : List[str] = initializer_range UpperCAmelCase_ : int = num_choices def _UpperCamelCase ( self ): '''simple docstring''' UpperCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase_ : Dict = None if self.use_attention_mask: UpperCAmelCase_ : Any = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase_ : str = DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=__A , ) return config, input_ids, attention_mask def _UpperCamelCase ( self ): '''simple docstring''' UpperCAmelCase_ : Tuple = self.prepare_config_and_inputs() UpperCAmelCase_ : List[str] = config_and_inputs UpperCAmelCase_ : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_flax class __SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase_ :Tuple = ( ( FlaxDistilBertModel, FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertForQuestionAnswering, ) if is_flax_available() else () ) def _UpperCamelCase ( self ): '''simple docstring''' UpperCAmelCase_ : str = FlaxDistilBertModelTester(self ) @slow def _UpperCamelCase ( self ): '''simple docstring''' for model_class_name in self.all_model_classes: UpperCAmelCase_ : Dict = model_class_name.from_pretrained('distilbert-base-uncased' ) UpperCAmelCase_ : str = model(np.ones((1, 1) ) ) self.assertIsNotNone(__A ) @require_flax class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' @slow def _UpperCamelCase ( self ): '''simple docstring''' UpperCAmelCase_ : List[str] = FlaxDistilBertModel.from_pretrained('distilbert-base-uncased' ) UpperCAmelCase_ : List[Any] = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] ) UpperCAmelCase_ : List[str] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) UpperCAmelCase_ : List[Any] = model(__A , attention_mask=__A )[0] UpperCAmelCase_ : List[Any] = (1, 1_1, 7_6_8) self.assertEqual(output.shape , __A ) UpperCAmelCase_ : Tuple = np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __A , atol=1E-4 ) )
351
'''simple docstring''' def _lowerCamelCase ( lowerCamelCase_ : str ): """simple docstring""" UpperCAmelCase_ : Optional[int] = [0] * len(lowerCamelCase_ ) for i in range(1 , len(lowerCamelCase_ ) ): # use last results for better performance - dynamic programming UpperCAmelCase_ : List[Any] = prefix_result[i - 1] while j > 0 and input_string[i] != input_string[j]: UpperCAmelCase_ : str = prefix_result[j - 1] if input_string[i] == input_string[j]: j += 1 UpperCAmelCase_ : Any = j return prefix_result def _lowerCamelCase ( lowerCamelCase_ : str ): """simple docstring""" return max(prefix_function(lowerCamelCase_ ) ) if __name__ == "__main__": import doctest doctest.testmod()
274
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCamelCase_ = { """configuration_groupvit""": [ """GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GroupViTConfig""", """GroupViTOnnxConfig""", """GroupViTTextConfig""", """GroupViTVisionConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ """GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """GroupViTModel""", """GroupViTPreTrainedModel""", """GroupViTTextModel""", """GroupViTVisionModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ """TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFGroupViTModel""", """TFGroupViTPreTrainedModel""", """TFGroupViTTextModel""", """TFGroupViTVisionModel""", ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
309
'''simple docstring''' from collections.abc import Callable import numpy as np def _UpperCAmelCase ( _lowerCamelCase : Callable , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ) -> np.ndarray: _lowerCAmelCase : Union[str, Any] = int(np.ceil((x_end - xa) / step_size ) ) _lowerCAmelCase : Tuple = np.zeros((n + 1,) ) _lowerCAmelCase : List[Any] = ya _lowerCAmelCase : int = xa for k in range(_lowerCamelCase ): _lowerCAmelCase : Optional[Any] = y[k] + step_size * ode_func(_lowerCamelCase , y[k] ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
309
1
import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: __lowerCamelCase : Optional[int] = None __lowerCamelCase : Tuple = logging.get_logger(__name__) __lowerCamelCase : Dict = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""} __lowerCamelCase : Dict = { """vocab_file""": { """facebook/mbart-large-en-ro""": ( """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model""" ), """facebook/mbart-large-cc25""": ( """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model""" ), }, """tokenizer_file""": { """facebook/mbart-large-en-ro""": """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json""", """facebook/mbart-large-cc25""": """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json""", }, } __lowerCamelCase : Any = { """facebook/mbart-large-en-ro""": 1024, """facebook/mbart-large-cc25""": 1024, } # fmt: off __lowerCamelCase : Any = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""] class A__ ( __snake_case ): _UpperCAmelCase :Optional[int] = VOCAB_FILES_NAMES _UpperCAmelCase :int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase :List[Any] = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase :Optional[Any] = ['input_ids', 'attention_mask'] _UpperCAmelCase :Union[str, Any] = MBartTokenizer _UpperCAmelCase :List[int] = [] _UpperCAmelCase :List[int] = [] def __init__( self , A_=None , A_=None , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_=None , A_=None , A_=None , **A_ , ): '''simple docstring''' UpperCamelCase : int = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token super().__init__( vocab_file=A_ , tokenizer_file=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , cls_token=A_ , unk_token=A_ , pad_token=A_ , mask_token=A_ , src_lang=A_ , tgt_lang=A_ , additional_special_tokens=A_ , **A_ , ) UpperCamelCase : List[Any] = vocab_file UpperCamelCase : Any = False if not self.vocab_file else True UpperCamelCase : str = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} ) UpperCamelCase : Union[str, Any] = { lang_code: self.convert_tokens_to_ids(A_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES } UpperCamelCase : int = src_lang if src_lang is not None else "en_XX" UpperCamelCase : List[str] = self.convert_tokens_to_ids(self._src_lang ) UpperCamelCase : Dict = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def __UpperCamelCase( self ): '''simple docstring''' return self._src_lang @src_lang.setter def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : Tuple = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def __UpperCamelCase( self , A_ , A_ = None ): '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def __UpperCamelCase( self , A_ , A_ = None ): '''simple docstring''' UpperCamelCase : List[Any] = [self.sep_token_id] UpperCamelCase : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __UpperCamelCase( self , A_ , A_ , A_ , A_ , **A_ ): '''simple docstring''' if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" ) UpperCamelCase : List[str] = src_lang UpperCamelCase : Optional[int] = self(A_ , add_special_tokens=A_ , return_tensors=A_ , **A_ ) UpperCamelCase : Optional[int] = self.convert_tokens_to_ids(A_ ) UpperCamelCase : Optional[int] = tgt_lang_id return inputs def __UpperCamelCase( self , A_ , A_ = "en_XX" , A_ = None , A_ = "ro_RO" , **A_ , ): '''simple docstring''' UpperCamelCase : int = src_lang UpperCamelCase : int = tgt_lang return super().prepare_seqaseq_batch(A_ , A_ , **A_ ) def __UpperCamelCase( self ): '''simple docstring''' return self.set_src_lang_special_tokens(self.src_lang ) def __UpperCamelCase( self ): '''simple docstring''' return self.set_tgt_lang_special_tokens(self.tgt_lang ) def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : Dict = self.convert_tokens_to_ids(A_ ) UpperCamelCase : Optional[int] = [] UpperCamelCase : Tuple = [self.eos_token_id, self.cur_lang_code] UpperCamelCase : Dict = self.convert_ids_to_tokens(self.prefix_tokens ) UpperCamelCase : Tuple = self.convert_ids_to_tokens(self.suffix_tokens ) UpperCamelCase : str = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : Union[str, Any] = self.convert_tokens_to_ids(A_ ) UpperCamelCase : Any = [] UpperCamelCase : int = [self.eos_token_id, self.cur_lang_code] UpperCamelCase : Any = self.convert_ids_to_tokens(self.prefix_tokens ) UpperCamelCase : int = self.convert_ids_to_tokens(self.suffix_tokens ) UpperCamelCase : int = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def __UpperCamelCase( self , A_ , A_ = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(A_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory.""" ) return UpperCamelCase : Optional[Any] = os.path.join( A_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ): copyfile(self.vocab_file , A_ ) return (out_vocab_file,)
140
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM @require_tf @require_sentencepiece @require_tokenizers class A__ ( unittest.TestCase ): @slow def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" ) UpperCamelCase : Tuple = AutoTokenizer.from_pretrained("google/mt5-small" ) UpperCamelCase : Dict = tokenizer("Hello there" , return_tensors="tf" ).input_ids UpperCamelCase : int = tokenizer("Hi I am" , return_tensors="tf" ).input_ids UpperCamelCase : Union[str, Any] = model(A_ , labels=A_ ).loss UpperCamelCase : List[str] = -tf.math.reduce_mean(A_ ).numpy() UpperCamelCase : Union[str, Any] = -21.22_81_68 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
140
1
'''simple docstring''' import argparse import torch from safetensors.torch import load_file from diffusers import StableDiffusionPipeline def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): """simple docstring""" _SCREAMING_SNAKE_CASE : Any = StableDiffusionPipeline.from_pretrained(_UpperCAmelCase , torch_dtype=torch.floataa ) # load LoRA weight from .safetensors _SCREAMING_SNAKE_CASE : Optional[Any] = load_file(_UpperCAmelCase ) _SCREAMING_SNAKE_CASE : Optional[int] = [] # directly update weight in diffusers model for key in state_dict: # it is suggested to print out the key, it usually will be something like below # "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight" # as we have set the alpha beforehand, so just skip if ".alpha" in key or key in visited: continue if "text" in key: _SCREAMING_SNAKE_CASE : Union[str, Any] = key.split(""".""" )[0].split(LORA_PREFIX_TEXT_ENCODER + """_""" )[-1].split("""_""" ) _SCREAMING_SNAKE_CASE : List[Any] = pipeline.text_encoder else: _SCREAMING_SNAKE_CASE : Union[str, Any] = key.split(""".""" )[0].split(LORA_PREFIX_UNET + """_""" )[-1].split("""_""" ) _SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline.unet # find the target layer _SCREAMING_SNAKE_CASE : List[str] = layer_infos.pop(0 ) while len(_UpperCAmelCase ) > -1: try: _SCREAMING_SNAKE_CASE : Any = curr_layer.__getattr__(_UpperCAmelCase ) if len(_UpperCAmelCase ) > 0: _SCREAMING_SNAKE_CASE : Union[str, Any] = layer_infos.pop(0 ) elif len(_UpperCAmelCase ) == 0: break except Exception: if len(_UpperCAmelCase ) > 0: temp_name += "_" + layer_infos.pop(0 ) else: _SCREAMING_SNAKE_CASE : Optional[Any] = layer_infos.pop(0 ) _SCREAMING_SNAKE_CASE : Optional[int] = [] if "lora_down" in key: pair_keys.append(key.replace("""lora_down""" , """lora_up""" ) ) pair_keys.append(_UpperCAmelCase ) else: pair_keys.append(_UpperCAmelCase ) pair_keys.append(key.replace("""lora_up""" , """lora_down""" ) ) # update weight if len(state_dict[pair_keys[0]].shape ) == 4: _SCREAMING_SNAKE_CASE : Dict = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) _SCREAMING_SNAKE_CASE : int = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(_UpperCAmelCase , _UpperCAmelCase ).unsqueeze(2 ).unsqueeze(3 ) else: _SCREAMING_SNAKE_CASE : Any = state_dict[pair_keys[0]].to(torch.floataa ) _SCREAMING_SNAKE_CASE : Any = state_dict[pair_keys[1]].to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(_UpperCAmelCase , _UpperCAmelCase ) # update visited list for item in pair_keys: visited.append(_UpperCAmelCase ) return pipeline if __name__ == "__main__": UpperCAmelCase_ : Any = argparse.ArgumentParser() parser.add_argument( '--base_model_path', default=None, type=str, required=True, help='Path to the base model in diffusers format.' ) parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument( '--lora_prefix_unet', default='lora_unet', type=str, help='The prefix of UNet weight in safetensors' ) parser.add_argument( '--lora_prefix_text_encoder', default='lora_te', type=str, help='The prefix of text encoder weight in safetensors', ) parser.add_argument('--alpha', default=0.75, type=float, help='The merging ratio in W = W0 + alpha * deltaW') parser.add_argument( '--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.' ) parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)') UpperCAmelCase_ : Optional[Any] = parser.parse_args() UpperCAmelCase_ : Dict = args.base_model_path UpperCAmelCase_ : str = args.checkpoint_path UpperCAmelCase_ : Optional[Any] = args.dump_path UpperCAmelCase_ : Optional[Any] = args.lora_prefix_unet UpperCAmelCase_ : str = args.lora_prefix_text_encoder UpperCAmelCase_ : Dict = args.alpha UpperCAmelCase_ : Dict = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha) UpperCAmelCase_ : str = pipe.to(args.device) pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
200
import torch from transformers import CamembertForMaskedLM, CamembertTokenizer def A ( _UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int]=5 ) -> List[Any]: '''simple docstring''' # Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py assert masked_input.count('<mask>' ) == 1 _UpperCAmelCase = torch.tensor(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ).unsqueeze(0 ) # Batch size 1 _UpperCAmelCase = model(_UpperCAmelCase )[0] # The last hidden-state is the first element of the output tuple _UpperCAmelCase = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item() _UpperCAmelCase = logits[0, masked_index, :] _UpperCAmelCase = logits.softmax(dim=0 ) _UpperCAmelCase , _UpperCAmelCase = prob.topk(k=_UpperCAmelCase , dim=0 ) _UpperCAmelCase = ' '.join( [tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(_UpperCAmelCase ) )] ) _UpperCAmelCase = tokenizer.mask_token _UpperCAmelCase = [] for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(' ' ) ): _UpperCAmelCase = predicted_token_bpe.replace('\u2581' , ' ' ) if " {0}".format(_UpperCAmelCase ) in masked_input: topk_filled_outputs.append( ( masked_input.replace(' {0}'.format(_UpperCAmelCase ) , _UpperCAmelCase ), values[index].item(), predicted_token, ) ) else: topk_filled_outputs.append( ( masked_input.replace(_UpperCAmelCase , _UpperCAmelCase ), values[index].item(), predicted_token, ) ) return topk_filled_outputs UpperCAmelCase__ = CamembertTokenizer.from_pretrained("camembert-base") UpperCAmelCase__ = CamembertForMaskedLM.from_pretrained("camembert-base") model.eval() UpperCAmelCase__ = "Le camembert est <mask> :)" print(fill_mask(masked_input, model, tokenizer, topk=3))
339
0
"""simple docstring""" from ..utils import DummyObject, requires_backends class lowerCamelCase__ ( metaclass=snake_case_ ): SCREAMING_SNAKE_CASE = ['''torch'''] def __init__( self ,*A ,**A ): requires_backends(self ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) class lowerCamelCase__ ( metaclass=snake_case_ ): SCREAMING_SNAKE_CASE = ['''torch'''] def __init__( self ,*A ,**A ): requires_backends(self ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) class lowerCamelCase__ ( metaclass=snake_case_ ): SCREAMING_SNAKE_CASE = ['''torch'''] def __init__( self ,*A ,**A ): requires_backends(self ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) class lowerCamelCase__ ( metaclass=snake_case_ ): SCREAMING_SNAKE_CASE = ['''torch'''] def __init__( self ,*A ,**A ): requires_backends(self ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) class lowerCamelCase__ ( metaclass=snake_case_ ): SCREAMING_SNAKE_CASE = ['''torch'''] def __init__( self ,*A ,**A ): requires_backends(self ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) class lowerCamelCase__ ( metaclass=snake_case_ ): SCREAMING_SNAKE_CASE = ['''torch'''] def __init__( self ,*A ,**A ): requires_backends(self ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) class lowerCamelCase__ ( metaclass=snake_case_ ): SCREAMING_SNAKE_CASE = ['''torch'''] def __init__( self ,*A ,**A ): requires_backends(self ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) class lowerCamelCase__ ( metaclass=snake_case_ ): SCREAMING_SNAKE_CASE = ['''torch'''] def __init__( self ,*A ,**A ): requires_backends(self ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) class lowerCamelCase__ ( metaclass=snake_case_ ): SCREAMING_SNAKE_CASE = ['''torch'''] def __init__( self ,*A ,**A ): requires_backends(self ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) class lowerCamelCase__ ( metaclass=snake_case_ ): SCREAMING_SNAKE_CASE = ['''torch'''] def __init__( self ,*A ,**A ): requires_backends(self ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) class lowerCamelCase__ ( metaclass=snake_case_ ): SCREAMING_SNAKE_CASE = ['''torch'''] def __init__( self ,*A ,**A ): requires_backends(self ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) def _a ( *_snake_case , **_snake_case ): """simple docstring""" requires_backends(_snake_case , ["""torch"""] ) def _a ( *_snake_case , **_snake_case ): """simple docstring""" requires_backends(_snake_case , ["""torch"""] ) def _a ( *_snake_case , **_snake_case ): """simple docstring""" requires_backends(_snake_case , ["""torch"""] ) def _a ( *_snake_case , **_snake_case ): """simple docstring""" requires_backends(_snake_case , ["""torch"""] ) def _a ( *_snake_case , **_snake_case ): """simple docstring""" requires_backends(_snake_case , ["""torch"""] ) def _a ( *_snake_case , **_snake_case ): """simple docstring""" requires_backends(_snake_case , ["""torch"""] ) def _a ( *_snake_case , **_snake_case ): """simple docstring""" requires_backends(_snake_case , ["""torch"""] ) class lowerCamelCase__ ( metaclass=snake_case_ ): SCREAMING_SNAKE_CASE = ['''torch'''] def __init__( self ,*A ,**A ): requires_backends(self ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) class lowerCamelCase__ ( metaclass=snake_case_ ): SCREAMING_SNAKE_CASE = ['''torch'''] def __init__( self ,*A ,**A ): requires_backends(self ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) class lowerCamelCase__ ( metaclass=snake_case_ ): SCREAMING_SNAKE_CASE = ['''torch'''] def __init__( self ,*A ,**A ): requires_backends(self ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) class lowerCamelCase__ ( metaclass=snake_case_ ): SCREAMING_SNAKE_CASE = ['''torch'''] def __init__( self ,*A ,**A ): requires_backends(self ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) class lowerCamelCase__ ( metaclass=snake_case_ ): SCREAMING_SNAKE_CASE = ['''torch'''] def __init__( self ,*A ,**A ): requires_backends(self ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) class lowerCamelCase__ ( metaclass=snake_case_ ): SCREAMING_SNAKE_CASE = ['''torch'''] def __init__( self ,*A ,**A ): requires_backends(self ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) class lowerCamelCase__ ( metaclass=snake_case_ ): SCREAMING_SNAKE_CASE = ['''torch'''] def __init__( self ,*A ,**A ): requires_backends(self ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) class lowerCamelCase__ ( metaclass=snake_case_ ): SCREAMING_SNAKE_CASE = ['''torch'''] def __init__( self ,*A ,**A ): requires_backends(self ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) class lowerCamelCase__ ( metaclass=snake_case_ ): SCREAMING_SNAKE_CASE = ['''torch'''] def __init__( self ,*A ,**A ): requires_backends(self ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) class lowerCamelCase__ ( metaclass=snake_case_ ): SCREAMING_SNAKE_CASE = ['''torch'''] def __init__( self ,*A ,**A ): requires_backends(self ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) class lowerCamelCase__ ( metaclass=snake_case_ ): SCREAMING_SNAKE_CASE = ['''torch'''] def __init__( self ,*A ,**A ): requires_backends(self ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) class lowerCamelCase__ ( metaclass=snake_case_ ): SCREAMING_SNAKE_CASE = ['''torch'''] def __init__( self ,*A ,**A ): requires_backends(self ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) class lowerCamelCase__ ( metaclass=snake_case_ ): SCREAMING_SNAKE_CASE = ['''torch'''] def __init__( self ,*A ,**A ): requires_backends(self ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) class lowerCamelCase__ ( metaclass=snake_case_ ): SCREAMING_SNAKE_CASE = ['''torch'''] def __init__( self ,*A ,**A ): requires_backends(self ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) class lowerCamelCase__ ( metaclass=snake_case_ ): SCREAMING_SNAKE_CASE = ['''torch'''] def __init__( self ,*A ,**A ): requires_backends(self ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) class lowerCamelCase__ ( metaclass=snake_case_ ): SCREAMING_SNAKE_CASE = ['''torch'''] def __init__( self ,*A ,**A ): requires_backends(self ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) class lowerCamelCase__ ( metaclass=snake_case_ ): SCREAMING_SNAKE_CASE = ['''torch'''] def __init__( self ,*A ,**A ): requires_backends(self ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) class lowerCamelCase__ ( metaclass=snake_case_ ): SCREAMING_SNAKE_CASE = ['''torch'''] def __init__( self ,*A ,**A ): requires_backends(self ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) class lowerCamelCase__ ( metaclass=snake_case_ ): SCREAMING_SNAKE_CASE = ['''torch'''] def __init__( self ,*A ,**A ): requires_backends(self ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) class lowerCamelCase__ ( metaclass=snake_case_ ): SCREAMING_SNAKE_CASE = ['''torch'''] def __init__( self ,*A ,**A ): requires_backends(self ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) class lowerCamelCase__ ( metaclass=snake_case_ ): SCREAMING_SNAKE_CASE = ['''torch'''] def __init__( self ,*A ,**A ): requires_backends(self ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) class lowerCamelCase__ ( metaclass=snake_case_ ): SCREAMING_SNAKE_CASE = ['''torch'''] def __init__( self ,*A ,**A ): requires_backends(self ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) class lowerCamelCase__ ( metaclass=snake_case_ ): SCREAMING_SNAKE_CASE = ['''torch'''] def __init__( self ,*A ,**A ): requires_backends(self ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) class lowerCamelCase__ ( metaclass=snake_case_ ): SCREAMING_SNAKE_CASE = ['''torch'''] def __init__( self ,*A ,**A ): requires_backends(self ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) class lowerCamelCase__ ( metaclass=snake_case_ ): SCREAMING_SNAKE_CASE = ['''torch'''] def __init__( self ,*A ,**A ): requires_backends(self ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) class lowerCamelCase__ ( metaclass=snake_case_ ): SCREAMING_SNAKE_CASE = ['''torch'''] def __init__( self ,*A ,**A ): requires_backends(self ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) class lowerCamelCase__ ( metaclass=snake_case_ ): SCREAMING_SNAKE_CASE = ['''torch'''] def __init__( self ,*A ,**A ): requires_backends(self ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) class lowerCamelCase__ ( metaclass=snake_case_ ): SCREAMING_SNAKE_CASE = ['''torch'''] def __init__( self ,*A ,**A ): requires_backends(self ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) class lowerCamelCase__ ( metaclass=snake_case_ ): SCREAMING_SNAKE_CASE = ['''torch'''] def __init__( self ,*A ,**A ): requires_backends(self ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) class lowerCamelCase__ ( metaclass=snake_case_ ): SCREAMING_SNAKE_CASE = ['''torch'''] def __init__( self ,*A ,**A ): requires_backends(self ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) class lowerCamelCase__ ( metaclass=snake_case_ ): SCREAMING_SNAKE_CASE = ['''torch'''] def __init__( self ,*A ,**A ): requires_backends(self ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) class lowerCamelCase__ ( metaclass=snake_case_ ): SCREAMING_SNAKE_CASE = ['''torch'''] def __init__( self ,*A ,**A ): requires_backends(self ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) class lowerCamelCase__ ( metaclass=snake_case_ ): SCREAMING_SNAKE_CASE = ['''torch'''] def __init__( self ,*A ,**A ): requires_backends(self ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) class lowerCamelCase__ ( metaclass=snake_case_ ): SCREAMING_SNAKE_CASE = ['''torch'''] def __init__( self ,*A ,**A ): requires_backends(self ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) class lowerCamelCase__ ( metaclass=snake_case_ ): SCREAMING_SNAKE_CASE = ['''torch'''] def __init__( self ,*A ,**A ): requires_backends(self ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) class lowerCamelCase__ ( metaclass=snake_case_ ): SCREAMING_SNAKE_CASE = ['''torch'''] def __init__( self ,*A ,**A ): requires_backends(self ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) class lowerCamelCase__ ( metaclass=snake_case_ ): SCREAMING_SNAKE_CASE = ['''torch'''] def __init__( self ,*A ,**A ): requires_backends(self ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) class lowerCamelCase__ ( metaclass=snake_case_ ): SCREAMING_SNAKE_CASE = ['''torch'''] def __init__( self ,*A ,**A ): requires_backends(self ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) class lowerCamelCase__ ( metaclass=snake_case_ ): SCREAMING_SNAKE_CASE = ['''torch'''] def __init__( self ,*A ,**A ): requires_backends(self ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] ) @classmethod def _UpperCamelCase ( cls ,*A ,**A ): requires_backends(cls ,["""torch"""] )
356
"""simple docstring""" import argparse import gc import json import os import shutil import warnings import torch from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer try: from transformers import LlamaTokenizerFast except ImportError as e: warnings.warn(e) warnings.warn( """The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion""" ) _UpperCamelCase = None _UpperCamelCase = { """7B""": 11008, """13B""": 13824, """30B""": 17920, """65B""": 22016, """70B""": 28672, } _UpperCamelCase = { """7B""": 1, """7Bf""": 1, """13B""": 2, """13Bf""": 2, """30B""": 4, """65B""": 8, """70B""": 8, """70Bf""": 8, } def _a ( _snake_case , _snake_case=1 , _snake_case=256 ): """simple docstring""" return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of) def _a ( _snake_case ): """simple docstring""" with open(_snake_case , """r""" ) as f: return json.load(_snake_case ) def _a ( _snake_case , _snake_case ): """simple docstring""" with open(_snake_case , """w""" ) as f: json.dump(_snake_case , _snake_case ) def _a ( _snake_case , _snake_case , _snake_case , _snake_case=True ): """simple docstring""" os.makedirs(_snake_case , exist_ok=_snake_case ) UpperCAmelCase = os.path.join(_snake_case , """tmp""" ) os.makedirs(_snake_case , exist_ok=_snake_case ) UpperCAmelCase = read_json(os.path.join(_snake_case , """params.json""" ) ) UpperCAmelCase = NUM_SHARDS[model_size] UpperCAmelCase = params["""n_layers"""] UpperCAmelCase = params["""n_heads"""] UpperCAmelCase = n_heads // num_shards UpperCAmelCase = params["""dim"""] UpperCAmelCase = dim // n_heads UpperCAmelCase = 10000.0 UpperCAmelCase = 1.0 / (base ** (torch.arange(0 , _snake_case , 2 ).float() / dims_per_head)) if "n_kv_heads" in params: UpperCAmelCase = params["""n_kv_heads"""] # for GQA / MQA UpperCAmelCase = n_heads_per_shard // num_key_value_heads UpperCAmelCase = dim // num_key_value_heads else: # compatibility with other checkpoints UpperCAmelCase = n_heads UpperCAmelCase = n_heads_per_shard UpperCAmelCase = dim # permute for sliced rotary def permute(_snake_case , _snake_case=n_heads , _snake_case=dim , _snake_case=dim ): return w.view(_snake_case , dima // n_heads // 2 , 2 , _snake_case ).transpose(1 , 2 ).reshape(_snake_case , _snake_case ) print(F'''Fetching all parameters from the checkpoint at {input_base_path}.''' ) # Load weights if model_size == "7B": # Not sharded # (The sharded implementation would also work, but this is simpler.) UpperCAmelCase = torch.load(os.path.join(_snake_case , """consolidated.00.pth""" ) , map_location="""cpu""" ) else: # Sharded UpperCAmelCase = [ torch.load(os.path.join(_snake_case , F'''consolidated.{i:02d}.pth''' ) , map_location="""cpu""" ) for i in range(_snake_case ) ] UpperCAmelCase = 0 UpperCAmelCase = {"""weight_map""": {}} for layer_i in range(_snake_case ): UpperCAmelCase = F'''pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin''' if model_size == "7B": # Unsharded UpperCAmelCase = { F'''model.layers.{layer_i}.self_attn.q_proj.weight''': permute( loaded[F'''layers.{layer_i}.attention.wq.weight'''] ), F'''model.layers.{layer_i}.self_attn.k_proj.weight''': permute( loaded[F'''layers.{layer_i}.attention.wk.weight'''] ), F'''model.layers.{layer_i}.self_attn.v_proj.weight''': loaded[F'''layers.{layer_i}.attention.wv.weight'''], F'''model.layers.{layer_i}.self_attn.o_proj.weight''': loaded[F'''layers.{layer_i}.attention.wo.weight'''], F'''model.layers.{layer_i}.mlp.gate_proj.weight''': loaded[F'''layers.{layer_i}.feed_forward.w1.weight'''], F'''model.layers.{layer_i}.mlp.down_proj.weight''': loaded[F'''layers.{layer_i}.feed_forward.w2.weight'''], F'''model.layers.{layer_i}.mlp.up_proj.weight''': loaded[F'''layers.{layer_i}.feed_forward.w3.weight'''], F'''model.layers.{layer_i}.input_layernorm.weight''': loaded[F'''layers.{layer_i}.attention_norm.weight'''], F'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[F'''layers.{layer_i}.ffn_norm.weight'''], } else: # Sharded # Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share # the same storage object, saving attention_norm and ffn_norm will save other weights too, which is # redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned. UpperCAmelCase = { F'''model.layers.{layer_i}.input_layernorm.weight''': loaded[0][ F'''layers.{layer_i}.attention_norm.weight''' ].clone(), F'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[0][ F'''layers.{layer_i}.ffn_norm.weight''' ].clone(), } UpperCAmelCase = permute( torch.cat( [ loaded[i][F'''layers.{layer_i}.attention.wq.weight'''].view(_snake_case , _snake_case , _snake_case ) for i in range(_snake_case ) ] , dim=0 , ).reshape(_snake_case , _snake_case ) ) UpperCAmelCase = permute( torch.cat( [ loaded[i][F'''layers.{layer_i}.attention.wk.weight'''].view( _snake_case , _snake_case , _snake_case ) for i in range(_snake_case ) ] , dim=0 , ).reshape(_snake_case , _snake_case ) , _snake_case , _snake_case , _snake_case , ) UpperCAmelCase = torch.cat( [ loaded[i][F'''layers.{layer_i}.attention.wv.weight'''].view( _snake_case , _snake_case , _snake_case ) for i in range(_snake_case ) ] , dim=0 , ).reshape(_snake_case , _snake_case ) UpperCAmelCase = torch.cat( [loaded[i][F'''layers.{layer_i}.attention.wo.weight'''] for i in range(_snake_case )] , dim=1 ) UpperCAmelCase = torch.cat( [loaded[i][F'''layers.{layer_i}.feed_forward.w1.weight'''] for i in range(_snake_case )] , dim=0 ) UpperCAmelCase = torch.cat( [loaded[i][F'''layers.{layer_i}.feed_forward.w2.weight'''] for i in range(_snake_case )] , dim=1 ) UpperCAmelCase = torch.cat( [loaded[i][F'''layers.{layer_i}.feed_forward.w3.weight'''] for i in range(_snake_case )] , dim=0 ) UpperCAmelCase = inv_freq for k, v in state_dict.items(): UpperCAmelCase = filename param_count += v.numel() torch.save(_snake_case , os.path.join(_snake_case , _snake_case ) ) UpperCAmelCase = F'''pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin''' if model_size == "7B": # Unsharded UpperCAmelCase = { """model.embed_tokens.weight""": loaded["""tok_embeddings.weight"""], """model.norm.weight""": loaded["""norm.weight"""], """lm_head.weight""": loaded["""output.weight"""], } else: UpperCAmelCase = { """model.norm.weight""": loaded[0]["""norm.weight"""], """model.embed_tokens.weight""": torch.cat( [loaded[i]["""tok_embeddings.weight"""] for i in range(_snake_case )] , dim=1 ), """lm_head.weight""": torch.cat([loaded[i]["""output.weight"""] for i in range(_snake_case )] , dim=0 ), } for k, v in state_dict.items(): UpperCAmelCase = filename param_count += v.numel() torch.save(_snake_case , os.path.join(_snake_case , _snake_case ) ) # Write configs UpperCAmelCase = {"""total_size""": param_count * 2} write_json(_snake_case , os.path.join(_snake_case , """pytorch_model.bin.index.json""" ) ) UpperCAmelCase = params["""ffn_dim_multiplier"""] if """ffn_dim_multiplier""" in params else 1 UpperCAmelCase = params["""multiple_of"""] if """multiple_of""" in params else 256 UpperCAmelCase = LlamaConfig( hidden_size=_snake_case , intermediate_size=compute_intermediate_size(_snake_case , _snake_case , _snake_case ) , num_attention_heads=params["""n_heads"""] , num_hidden_layers=params["""n_layers"""] , rms_norm_eps=params["""norm_eps"""] , num_key_value_heads=_snake_case , ) config.save_pretrained(_snake_case ) # Make space so we can load the model properly now. del state_dict del loaded gc.collect() print("""Loading the checkpoint in a Llama model.""" ) UpperCAmelCase = LlamaForCausalLM.from_pretrained(_snake_case , torch_dtype=torch.floataa , low_cpu_mem_usage=_snake_case ) # Avoid saving this as part of the config. del model.config._name_or_path print("""Saving in the Transformers format.""" ) model.save_pretrained(_snake_case , safe_serialization=_snake_case ) shutil.rmtree(_snake_case ) def _a ( _snake_case , _snake_case ): """simple docstring""" UpperCAmelCase = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast print(F'''Saving a {tokenizer_class.__name__} to {tokenizer_path}.''' ) UpperCAmelCase = tokenizer_class(_snake_case ) tokenizer.save_pretrained(_snake_case ) def _a ( ): """simple docstring""" UpperCAmelCase = argparse.ArgumentParser() parser.add_argument( """--input_dir""" , help="""Location of LLaMA weights, which contains tokenizer.model and model folders""" , ) parser.add_argument( """--model_size""" , choices=["""7B""", """7Bf""", """13B""", """13Bf""", """30B""", """65B""", """70B""", """70Bf""", """tokenizer_only"""] , ) parser.add_argument( """--output_dir""" , help="""Location to write HF model and tokenizer""" , ) parser.add_argument("""--safe_serialization""" , type=_snake_case , help="""Whether or not to save using `safetensors`.""" ) UpperCAmelCase = parser.parse_args() if args.model_size != "tokenizer_only": write_model( model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , ) UpperCAmelCase = os.path.join(args.input_dir , """tokenizer.model""" ) write_tokenizer(args.output_dir , _snake_case ) if __name__ == "__main__": main()
234
0
'''simple docstring''' import warnings from pathlib import Path from typing import List, Tuple, Union import fire from torch import nn from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel from transformers.utils import logging UpperCAmelCase_ : Dict = logging.get_logger(__name__) def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): """simple docstring""" _SCREAMING_SNAKE_CASE : Union[str, Any] = nn.ModuleList([src_layers[i] for i in layers_to_copy] ) assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ), f"""{len(SCREAMING_SNAKE_CASE__ )} != {len(SCREAMING_SNAKE_CASE__ )}""" dest_layers.load_state_dict(layers_to_copy.state_dict() ) UpperCAmelCase_ : Dict = { # maps num layers in teacher -> num_layers in student -> which teacher layers to copy. # 12: bart, 16: pegasus, 6: marian/Helsinki-NLP 12: { 1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher 2: [0, 6], 3: [0, 6, 11], 4: [0, 4, 8, 11], 6: [0, 2, 4, 7, 9, 11], 9: [0, 1, 2, 4, 5, 7, 9, 10, 11], 12: list(range(12)), }, 16: { # maps num layers in student -> which teacher layers to copy 1: [0], 2: [0, 15], 3: [0, 8, 15], 4: [0, 5, 10, 15], 6: [0, 3, 6, 9, 12, 15], 8: [0, 2, 4, 6, 8, 10, 12, 15], 9: [0, 1, 3, 5, 7, 9, 11, 13, 15], 12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15], 16: list(range(16)), }, 6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))}, } UpperCAmelCase_ : List[str] = { # maps num layers in student -> which teacher layers to copy. 6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]}, 12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]}, 16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]}, } def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): """simple docstring""" try: _SCREAMING_SNAKE_CASE : Optional[Any] = LAYERS_TO_COPY[n_teacher][n_student] return val except KeyError: if n_student != n_teacher: warnings.warn( f"""no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first""" f""" {n_student}""" ) return list(range(SCREAMING_SNAKE_CASE__ ) ) def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): """simple docstring""" if n_student > n_teacher: raise ValueError(f"""Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}""" ) elif n_teacher == n_student: return list(range(SCREAMING_SNAKE_CASE__ ) ) elif n_student == 1: return [n_teacher - 1] else: return LAYERS_TO_SUPERVISE[n_teacher][n_student] def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = "student" , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ , ): """simple docstring""" _SCREAMING_SNAKE_CASE : str = """encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.""" assert (e is not None) or (d is not None), _msg if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ ).save_pretrained(SCREAMING_SNAKE_CASE__ ) # purely for convenience _SCREAMING_SNAKE_CASE : List[str] = AutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE__ ).eval() else: assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), f"""teacher must be a model or string got type {type(SCREAMING_SNAKE_CASE__ )}""" _SCREAMING_SNAKE_CASE : Tuple = teacher.config.to_diff_dict() try: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = teacher.config.encoder_layers, teacher.config.decoder_layers if e is None: _SCREAMING_SNAKE_CASE : str = teacher_e if d is None: _SCREAMING_SNAKE_CASE : Tuple = teacher_d init_kwargs.update({"""encoder_layers""": e, """decoder_layers""": d} ) except AttributeError: # T5 if hasattr(teacher.config , """num_encoder_layers""" ): _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers else: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = teacher.config.num_layers, teacher.config.num_decoder_layers if e is None: _SCREAMING_SNAKE_CASE : Optional[Any] = teacher_e if d is None: _SCREAMING_SNAKE_CASE : Optional[int] = teacher_d if hasattr(teacher.config , """num_encoder_layers""" ): init_kwargs.update({"""num_encoder_layers""": e, """num_decoder_layers""": d} ) else: init_kwargs.update({"""num_layers""": e, """num_decoder_layers""": d} ) # Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs init_kwargs.update(SCREAMING_SNAKE_CASE__ ) # Copy weights _SCREAMING_SNAKE_CASE : List[str] = teacher.config_class(**SCREAMING_SNAKE_CASE__ ) _SCREAMING_SNAKE_CASE : str = AutoModelForSeqaSeqLM.from_config(SCREAMING_SNAKE_CASE__ ) # Start by copying the full teacher state dict this will copy the first N teacher layers to the student. _SCREAMING_SNAKE_CASE : int = student.load_state_dict(teacher.state_dict() , strict=SCREAMING_SNAKE_CASE__ ) assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys. if copy_first_teacher_layers: # Our copying is done. We just log and save _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = list(range(SCREAMING_SNAKE_CASE__ ) ), list(range(SCREAMING_SNAKE_CASE__ ) ) logger.info( f"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to""" f""" {save_path}""" ) student.save_pretrained(SCREAMING_SNAKE_CASE__ ) return student, e_layers_to_copy, d_layers_to_copy # Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer. if e_layers_to_copy is None: _SCREAMING_SNAKE_CASE : List[int] = pick_layers_to_copy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if d_layers_to_copy is None: _SCREAMING_SNAKE_CASE : List[int] = pick_layers_to_copy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) try: if hasattr( SCREAMING_SNAKE_CASE__ , """prophetnet""" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , SCREAMING_SNAKE_CASE__ ) copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , SCREAMING_SNAKE_CASE__ ) else: copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , SCREAMING_SNAKE_CASE__ ) copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , SCREAMING_SNAKE_CASE__ ) except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block copy_layers(teacher.encoder.block , student.encoder.block , SCREAMING_SNAKE_CASE__ ) copy_layers(teacher.decoder.block , student.decoder.block , SCREAMING_SNAKE_CASE__ ) logger.info( f"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}""" ) _SCREAMING_SNAKE_CASE : Dict = { """teacher_type""": teacher.config.model_type, """copied_encoder_layers""": e_layers_to_copy, """copied_decoder_layers""": d_layers_to_copy, } student.save_pretrained(SCREAMING_SNAKE_CASE__ ) # Save information about copying for easier reproducibility return student, e_layers_to_copy, d_layers_to_copy if __name__ == "__main__": fire.Fire(create_student_by_copying_alternating_layers)
200
'''simple docstring''' import doctest import glob import importlib import inspect import os import re from contextlib import contextmanager from functools import wraps from unittest.mock import patch import numpy as np import pytest from absl.testing import parameterized import datasets from datasets import load_metric from .utils import for_all_test_methods, local, slow # mark all tests as integration UpperCAmelCase_ : Union[str, Any] = pytest.mark.integration UpperCAmelCase_ : List[Any] = {'comet'} UpperCAmelCase_ : int = importlib.util.find_spec('fairseq') is not None UpperCAmelCase_ : Optional[Any] = {'code_eval'} UpperCAmelCase_ : Optional[int] = os.name == 'nt' UpperCAmelCase_ : Dict = {'bertscore', 'frugalscore', 'perplexity'} UpperCAmelCase_ : Dict = importlib.util.find_spec('transformers') is not None def snake_case_ ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" @wraps(SCREAMING_SNAKE_CASE__ ) def wrapper(self , SCREAMING_SNAKE_CASE__ ): if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ: self.skipTest("""\"test requires Fairseq\"""" ) else: test_case(self , SCREAMING_SNAKE_CASE__ ) return wrapper def snake_case_ ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" @wraps(SCREAMING_SNAKE_CASE__ ) def wrapper(self , SCREAMING_SNAKE_CASE__ ): if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS: self.skipTest("""\"test requires transformers\"""" ) else: test_case(self , SCREAMING_SNAKE_CASE__ ) return wrapper def snake_case_ ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" @wraps(SCREAMING_SNAKE_CASE__ ) def wrapper(self , SCREAMING_SNAKE_CASE__ ): if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS: self.skipTest("""\"test not supported on Windows\"""" ) else: test_case(self , SCREAMING_SNAKE_CASE__ ) return wrapper def snake_case_ ( ): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("""./metrics/*/""" )] return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished @parameterized.named_parameters(get_local_metric_names() ) @for_all_test_methods( _snake_case , _snake_case , _snake_case ) @local class lowercase__ ( parameterized.TestCase ): '''simple docstring''' A_ : Optional[int] = {} A_ : Union[str, Any] = None @pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" ) @pytest.mark.filterwarnings("""ignore:load_metric is deprecated:FutureWarning""" ) def UpperCAmelCase_ ( self , __snake_case ): _SCREAMING_SNAKE_CASE : str = """[...]""" _SCREAMING_SNAKE_CASE : Any = importlib.import_module( datasets.load.metric_module_factory(os.path.join("""metrics""" , __snake_case ) ).module_path ) _SCREAMING_SNAKE_CASE : Optional[int] = datasets.load.import_main_class(metric_module.__name__ , dataset=__snake_case ) # check parameters _SCREAMING_SNAKE_CASE : Tuple = inspect.signature(metric._compute ).parameters self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs # run doctest with self.patch_intensive_calls(__snake_case , metric_module.__name__ ): with self.use_local_metrics(): try: _SCREAMING_SNAKE_CASE : int = doctest.testmod(__snake_case , verbose=__snake_case , raise_on_error=__snake_case ) except doctest.UnexpectedException as e: raise e.exc_info[1] # raise the exception that doctest caught self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @slow def UpperCAmelCase_ ( self , __snake_case ): _SCREAMING_SNAKE_CASE : List[Any] = """[...]""" _SCREAMING_SNAKE_CASE : Optional[Any] = importlib.import_module( datasets.load.metric_module_factory(os.path.join("""metrics""" , __snake_case ) ).module_path ) # run doctest with self.use_local_metrics(): _SCREAMING_SNAKE_CASE : List[str] = doctest.testmod(__snake_case , verbose=__snake_case , raise_on_error=__snake_case ) self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @contextmanager def UpperCAmelCase_ ( self , __snake_case , __snake_case ): if metric_name in self.INTENSIVE_CALLS_PATCHER: with self.INTENSIVE_CALLS_PATCHER[metric_name](__snake_case ): yield else: yield @contextmanager def UpperCAmelCase_ ( self ): def load_local_metric(__snake_case , *__snake_case , **__snake_case ): return load_metric(os.path.join("""metrics""" , __snake_case ) , *__snake_case , **__snake_case ) with patch("""datasets.load_metric""" ) as mock_load_metric: _SCREAMING_SNAKE_CASE : Union[str, Any] = load_local_metric yield @classmethod def UpperCAmelCase_ ( cls , __snake_case ): def wrapper(__snake_case ): _SCREAMING_SNAKE_CASE : Any = contextmanager(__snake_case ) _SCREAMING_SNAKE_CASE : int = patcher return patcher return wrapper @LocalMetricTest.register_intensive_calls_patcher("""bleurt""" ) def snake_case_ ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" import tensorflow.compat.va as tf from bleurt.score import Predictor tf.flags.DEFINE_string("""sv""" , """""" , """""" ) # handle pytest cli flags class lowercase__ ( _snake_case ): '''simple docstring''' def UpperCAmelCase_ ( self , __snake_case ): assert len(input_dict["""input_ids"""] ) == 2 return np.array([1.03, 1.04] ) # mock predict_fn which is supposed to do a forward pass with a bleurt model with patch("""bleurt.score._create_predictor""" ) as mock_create_predictor: _SCREAMING_SNAKE_CASE : Any = MockedPredictor() yield @LocalMetricTest.register_intensive_calls_patcher("""bertscore""" ) def snake_case_ ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" import torch def bert_cos_score_idf(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): return torch.tensor([[1.0, 1.0, 1.0]] * len(SCREAMING_SNAKE_CASE__ ) ) # mock get_model which is supposed to do download a bert model # mock bert_cos_score_idf which is supposed to do a forward pass with a bert model with patch("""bert_score.scorer.get_model""" ), patch( """bert_score.scorer.bert_cos_score_idf""" ) as mock_bert_cos_score_idf: _SCREAMING_SNAKE_CASE : Any = bert_cos_score_idf yield @LocalMetricTest.register_intensive_calls_patcher("""comet""" ) def snake_case_ ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" def load_from_checkpoint(SCREAMING_SNAKE_CASE__ ): class lowercase__ : '''simple docstring''' def UpperCAmelCase_ ( self , __snake_case , *__snake_case , **__snake_case ): assert len(__snake_case ) == 2 _SCREAMING_SNAKE_CASE : Dict = [0.19, 0.92] return scores, sum(__snake_case ) / len(__snake_case ) return Model() # mock load_from_checkpoint which is supposed to do download a bert model # mock load_from_checkpoint which is supposed to do download a bert model with patch("""comet.download_model""" ) as mock_download_model: _SCREAMING_SNAKE_CASE : Any = None with patch("""comet.load_from_checkpoint""" ) as mock_load_from_checkpoint: _SCREAMING_SNAKE_CASE : List[str] = load_from_checkpoint yield def snake_case_ ( ): """simple docstring""" _SCREAMING_SNAKE_CASE : Union[str, Any] = load_metric(os.path.join("""metrics""" , """seqeval""" ) ) _SCREAMING_SNAKE_CASE : List[str] = """ERROR""" _SCREAMING_SNAKE_CASE : Tuple = f"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}""" with pytest.raises(SCREAMING_SNAKE_CASE__ , match=re.escape(SCREAMING_SNAKE_CASE__ ) ): metric.compute(predictions=[] , references=[] , scheme=SCREAMING_SNAKE_CASE__ )
200
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { """transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""", } class SCREAMING_SNAKE_CASE ( a_ ): """simple docstring""" lowerCamelCase : List[str] ="transfo-xl" lowerCamelCase : str =["mems"] lowerCamelCase : Any ={ "n_token": "vocab_size", "hidden_size": "d_model", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : Tuple , lowerCAmelCase : List[Any]=26_77_35 , lowerCAmelCase : Union[str, Any]=[2_00_00, 4_00_00, 20_00_00] , lowerCAmelCase : Optional[Any]=10_24 , lowerCAmelCase : int=10_24 , lowerCAmelCase : List[str]=16 , lowerCAmelCase : Optional[Any]=64 , lowerCAmelCase : int=40_96 , lowerCAmelCase : Optional[int]=4 , lowerCAmelCase : int=False , lowerCAmelCase : int=18 , lowerCAmelCase : Any=16_00 , lowerCAmelCase : Tuple=10_00 , lowerCAmelCase : Dict=True , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Optional[int]=0 , lowerCAmelCase : Any=-1 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Optional[int]=0.1 , lowerCAmelCase : List[str]=0.0 , lowerCAmelCase : Any=True , lowerCAmelCase : Optional[Any]="normal" , lowerCAmelCase : Dict=0.01 , lowerCAmelCase : Optional[Any]=0.01 , lowerCAmelCase : List[str]=0.02 , lowerCAmelCase : int=1e-5 , lowerCAmelCase : Optional[Any]=0 , **lowerCAmelCase : Dict , ) -> List[str]: """simple docstring""" __lowerCAmelCase : Dict = vocab_size __lowerCAmelCase : Optional[Any] = [] self.cutoffs.extend(lowerCAmelCase ) if proj_share_all_but_first: __lowerCAmelCase : Tuple = [False] + [True] * len(self.cutoffs ) else: __lowerCAmelCase : Any = [False] + [False] * len(self.cutoffs ) __lowerCAmelCase : Dict = d_model __lowerCAmelCase : str = d_embed __lowerCAmelCase : Optional[Any] = d_head __lowerCAmelCase : List[Any] = d_inner __lowerCAmelCase : List[Any] = div_val __lowerCAmelCase : Optional[int] = pre_lnorm __lowerCAmelCase : List[str] = n_layer __lowerCAmelCase : Optional[Any] = n_head __lowerCAmelCase : Union[str, Any] = mem_len __lowerCAmelCase : List[Any] = same_length __lowerCAmelCase : Optional[int] = attn_type __lowerCAmelCase : str = clamp_len __lowerCAmelCase : List[Any] = sample_softmax __lowerCAmelCase : List[Any] = adaptive __lowerCAmelCase : List[str] = dropout __lowerCAmelCase : Dict = dropatt __lowerCAmelCase : Any = untie_r __lowerCAmelCase : Tuple = init __lowerCAmelCase : Dict = init_range __lowerCAmelCase : Dict = proj_init_std __lowerCAmelCase : int = init_std __lowerCAmelCase : Optional[int] = layer_norm_epsilon super().__init__(eos_token_id=lowerCAmelCase , **lowerCAmelCase ) @property def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]: """simple docstring""" logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' ) return -1 @max_position_embeddings.setter def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase : Tuple ) -> List[Any]: """simple docstring""" raise NotImplementedError( f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
139
__UpperCAmelCase = """ # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git """ __UpperCAmelCase = [{"""type""": """code""", """content""": INSTALL_CONTENT}] __UpperCAmelCase = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
139
1
import warnings from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class _SCREAMING_SNAKE_CASE ( _a ): snake_case__ : Union[str, Any] = ["""image_processor""", """tokenizer"""] snake_case__ : Optional[Any] = """FlavaImageProcessor""" snake_case__ : List[str] = ("""BertTokenizer""", """BertTokenizerFast""") def __init__( self : List[Any] , __lowerCamelCase : List[str]=None , __lowerCamelCase : Dict=None , **__lowerCamelCase : Tuple ): UpperCamelCase :Union[str, Any] = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , __lowerCamelCase , ) UpperCamelCase :List[Any] = kwargs.pop("""feature_extractor""" ) UpperCamelCase :int = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(__lowerCamelCase , __lowerCamelCase ) UpperCamelCase :Optional[int] = self.image_processor def __call__( self : Optional[Any] , __lowerCamelCase : Optional[ImageInput] = None , __lowerCamelCase : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , __lowerCamelCase : bool = True , __lowerCamelCase : Union[bool, str, PaddingStrategy] = False , __lowerCamelCase : Union[bool, str, TruncationStrategy] = False , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : int = 0 , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Union[str, TensorType]] = None , **__lowerCamelCase : int , ): if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: UpperCamelCase :List[str] = self.tokenizer( text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , ) if images is not None: UpperCamelCase :Dict = self.image_processor( __lowerCamelCase , return_image_mask=__lowerCamelCase , return_codebook_pixels=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , ) if text is not None and images is not None: encoding.update(__lowerCamelCase ) return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__lowerCamelCase ) , tensor_type=__lowerCamelCase ) def _A ( self : Any , *__lowerCamelCase : int , **__lowerCamelCase : Optional[int] ): return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase ) def _A ( self : Union[str, Any] , *__lowerCamelCase : Dict , **__lowerCamelCase : List[str] ): return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase ) @property def _A ( self : Tuple ): UpperCamelCase :Optional[Any] = self.tokenizer.model_input_names UpperCamelCase :Any = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def _A ( self : Optional[Any] ): warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __lowerCamelCase , ) return self.image_processor_class @property def _A ( self : List[Any] ): warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __lowerCamelCase , ) return self.image_processor
38
import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.testing_utils import require_tensorflow_text, require_tf, slow if is_tf_available(): import tensorflow as tf if is_tensorflow_text_available(): from transformers.models.bert import TFBertTokenizer UpperCAmelCase_ : Optional[Any] = ['''bert-base-uncased''', '''bert-base-cased'''] UpperCAmelCase_ : List[str] = '''hf-internal-testing/tiny-bert-tf-only''' if is_tf_available(): class _SCREAMING_SNAKE_CASE ( tf.keras.Model ): def __init__( self : List[str] , __lowerCamelCase : Union[str, Any] ): super().__init__() UpperCamelCase :Any = tokenizer UpperCamelCase :List[str] = AutoConfig.from_pretrained(__lowerCamelCase ) UpperCamelCase :List[str] = TFAutoModel.from_config(__lowerCamelCase ) def _A ( self : Tuple , __lowerCamelCase : str ): UpperCamelCase :str = self.tokenizer(__lowerCamelCase ) UpperCamelCase :Any = self.bert(**__lowerCamelCase ) return out["pooler_output"] @require_tf @require_tensorflow_text class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def _A ( self : Dict ): super().setUp() UpperCamelCase :int = [ BertTokenizer.from_pretrained(__lowerCamelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2) ] # repeat for when fast_bert_tokenizer=false UpperCamelCase :Any = [TFBertTokenizer.from_pretrained(__lowerCamelCase ) for checkpoint in TOKENIZER_CHECKPOINTS] + [ TFBertTokenizer.from_pretrained(__lowerCamelCase , use_fast_bert_tokenizer=__lowerCamelCase ) for checkpoint in TOKENIZER_CHECKPOINTS ] assert len(self.tokenizers ) == len(self.tf_tokenizers ) UpperCamelCase :Any = [ """This is a straightforward English test sentence.""", """This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""", """Now we're going to add some Chinese: 一 二 三 一二三""", """And some much more rare Chinese: 齉 堃 齉堃""", """Je vais aussi écrire en français pour tester les accents""", """Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""", ] UpperCamelCase :Union[str, Any] = list(zip(self.test_sentences , self.test_sentences[::-1] ) ) def _A ( self : Optional[int] ): for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ): for test_inputs in (self.test_sentences, self.paired_sentences): UpperCamelCase :Any = tokenizer(__lowerCamelCase , return_tensors="""tf""" , padding="""longest""" ) UpperCamelCase :str = tf_tokenizer(__lowerCamelCase ) for key in python_outputs.keys(): self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) ) self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) ) @slow def _A ( self : Dict ): for tf_tokenizer in self.tf_tokenizers: UpperCamelCase :str = tf_tokenizer(self.paired_sentences ) UpperCamelCase :Any = tf_tokenizer( text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , ) for key in merged_outputs.keys(): self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) ) @slow def _A ( self : List[str] ): for tf_tokenizer in self.tf_tokenizers: UpperCamelCase :List[Any] = tf.function(__lowerCamelCase ) for test_inputs in (self.test_sentences, self.paired_sentences): UpperCamelCase :Any = tf.constant(__lowerCamelCase ) UpperCamelCase :List[str] = compiled_tokenizer(__lowerCamelCase ) UpperCamelCase :Optional[Any] = tf_tokenizer(__lowerCamelCase ) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) ) @slow def _A ( self : Tuple ): for tf_tokenizer in self.tf_tokenizers: UpperCamelCase :List[str] = ModelToSave(tokenizer=__lowerCamelCase ) UpperCamelCase :Union[str, Any] = tf.convert_to_tensor(self.test_sentences ) UpperCamelCase :Union[str, Any] = model(__lowerCamelCase ) # Build model with some sample inputs with TemporaryDirectory() as tempdir: UpperCamelCase :List[str] = Path(__lowerCamelCase ) / """saved.model""" model.save(__lowerCamelCase ) UpperCamelCase :List[Any] = tf.keras.models.load_model(__lowerCamelCase ) UpperCamelCase :Dict = loaded_model(__lowerCamelCase ) # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 )
38
1
"""simple docstring""" import random def lowerCamelCase_ (UpperCamelCase__ : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] ): _UpperCAmelCase : Tuple = a[left_index] _UpperCAmelCase : List[Any] = left_index + 1 for j in range(left_index + 1 , lowerCAmelCase__ ): if a[j] < pivot: _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = a[i], a[j] i += 1 _UpperCAmelCase , _UpperCAmelCase : Tuple = a[i - 1], a[left_index] return i - 1 def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict ): if left < right: _UpperCAmelCase : Optional[Any] = random.randint(lowerCAmelCase__ , right - 1 ) _UpperCAmelCase , _UpperCAmelCase : Tuple = ( a[left], a[pivot], ) # switches the pivot with the left most bound _UpperCAmelCase : List[Any] = partition(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) quick_sort_random( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # recursive quicksort to the left of the pivot point quick_sort_random( lowerCAmelCase__ , pivot_index + 1 , lowerCAmelCase__ ) # recursive quicksort to the right of the pivot point def lowerCamelCase_ (): _UpperCAmelCase : List[Any] = input('''Enter numbers separated by a comma:\n''' ).strip() _UpperCAmelCase : Tuple = [int(lowerCAmelCase__ ) for item in user_input.split(''',''' )] quick_sort_random(lowerCAmelCase__ , 0 , len(lowerCAmelCase__ ) ) print(lowerCAmelCase__ ) if __name__ == "__main__": main()
365
"""simple docstring""" from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class _UpperCAmelCase : '''simple docstring''' a__ =42 a__ =None # Automatically constructed a__ ="dict" a__ =None a__ =field(default='''Translation''' ,init=a ,repr=a ) def __call__( self ) -> List[Any]: return pa.struct({lang: pa.string() for lang in sorted(self.languages )} ) def __lowerCAmelCase ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]: from .features import Value return {k: Value('''string''' ) for k in sorted(self.languages )} @dataclass class _UpperCAmelCase : '''simple docstring''' a__ =None a__ =None a__ =None # Automatically constructed a__ ="dict" a__ =None a__ =field(default='''TranslationVariableLanguages''' ,init=a ,repr=a ) def __lowerCAmelCase ( self ) -> Dict: _UpperCAmelCase : int = sorted(set(self.languages ) ) if self.languages else None _UpperCAmelCase : List[str] = len(self.languages ) if self.languages else None def __call__( self ) -> str: return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} ) def __lowerCAmelCase ( self , A ) -> List[Any]: _UpperCAmelCase : List[str] = set(self.languages ) if self.languages and set(A ) - lang_set: raise ValueError( f'Some languages in example ({", ".join(sorted(set(A ) - lang_set ) )}) are not in valid set ({", ".join(A )}).' ) # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. _UpperCAmelCase : Dict = [] for lang, text in translation_dict.items(): if isinstance(A , A ): translation_tuples.append((lang, text) ) else: translation_tuples.extend([(lang, el) for el in text] ) # Ensure translations are in ascending order by language code. _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = zip(*sorted(A ) ) return {"language": languages, "translation": translations} def __lowerCAmelCase ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]: from .features import Sequence, Value return { "language": Sequence(Value('''string''' ) ), "translation": Sequence(Value('''string''' ) ), }
68
0
from __future__ import annotations class snake_case_ : def __init__( self : str , lowercase_ : str=None ) -> List[str]: lowercase__ : str = data lowercase__ : Union[str, Any] = None def __repr__( self : int ) -> int: lowercase__ : Union[str, Any] = [] lowercase__ : List[str] = self while temp: string_rep.append(F'''{temp.data}''' ) lowercase__ : Optional[int] = temp.next return "->".join(lowercase_ ) def lowercase_ ( _lowerCamelCase : list): if not elements_list: raise Exception("The Elements List is empty") lowercase__ : int = Node(elements_list[0]) for i in range(1 , len(_lowerCamelCase)): lowercase__ : Optional[Any] = Node(elements_list[i]) lowercase__ : Tuple = current.next return head def lowercase_ ( _lowerCamelCase : Node): if head_node is not None and isinstance(_lowerCamelCase , _lowerCamelCase): print_reverse(head_node.next) print(head_node.data) def lowercase_ ( ): from doctest import testmod testmod() lowercase__ : List[str] = make_linked_list([14, 52, 14, 12, 43]) print("Linked List:") print(_lowerCamelCase) print("Elements in Reverse:") print_reverse(_lowerCamelCase) if __name__ == "__main__": main()
87
'''simple docstring''' import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD torch.set_grad_enabled(False) def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str=False ): '''simple docstring''' _lowerCAmelCase = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''module.blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''module.blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append( (F'''module.blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((F'''module.blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''module.blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''module.blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((F'''module.blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''module.blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''module.blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''module.blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ ("module.cls_token", "vit.embeddings.cls_token"), ("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"), ("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"), ("module.pos_embed", "vit.embeddings.position_embeddings"), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("module.norm.weight", "layernorm.weight"), ("module.norm.bias", "layernorm.bias"), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" _lowerCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("norm.weight", "vit.layernorm.weight"), ("norm.bias", "vit.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) return rename_keys def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str]=False ): '''simple docstring''' for i in range(config.num_hidden_layers ): if base_model: _lowerCAmelCase = "" else: _lowerCAmelCase = "vit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _lowerCAmelCase = state_dict.pop(F'''module.blocks.{i}.attn.qkv.weight''' ) _lowerCAmelCase = state_dict.pop(F'''module.blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict _lowerCAmelCase = in_proj_weight[ : config.hidden_size, : ] _lowerCAmelCase = in_proj_bias[: config.hidden_size] _lowerCAmelCase = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _lowerCAmelCase = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _lowerCAmelCase = in_proj_weight[ -config.hidden_size :, : ] _lowerCAmelCase = in_proj_bias[-config.hidden_size :] def __a(SCREAMING_SNAKE_CASE_ : str ): '''simple docstring''' _lowerCAmelCase = ["head.weight", "head.bias"] for k in ignore_keys: state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def __a(SCREAMING_SNAKE_CASE_ : int ): '''simple docstring''' _lowerCAmelCase = [ "module.fc.fc1.weight", "module.fc.fc1.bias", "module.fc.bn1.weight", "module.fc.bn1.bias", "module.fc.bn1.running_mean", "module.fc.bn1.running_var", "module.fc.bn1.num_batches_tracked", "module.fc.fc2.weight", "module.fc.fc2.bias", "module.fc.bn2.weight", "module.fc.bn2.bias", "module.fc.bn2.running_mean", "module.fc.bn2.running_var", "module.fc.bn2.num_batches_tracked", "module.fc.fc3.weight", "module.fc.fc3.bias", ] for k in ignore_keys: state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def __a(SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str ): '''simple docstring''' _lowerCAmelCase = dct.pop(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = val def __a(SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple ): '''simple docstring''' _lowerCAmelCase = ViTMSNConfig() _lowerCAmelCase = 1000 _lowerCAmelCase = "datasets/huggingface/label-files" _lowerCAmelCase = "imagenet-1k-id2label.json" _lowerCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , "r" ) ) _lowerCAmelCase = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()} _lowerCAmelCase = idalabel _lowerCAmelCase = {v: k for k, v in idalabel.items()} if "s16" in checkpoint_url: _lowerCAmelCase = 384 _lowerCAmelCase = 1536 _lowerCAmelCase = 6 elif "l16" in checkpoint_url: _lowerCAmelCase = 1024 _lowerCAmelCase = 4096 _lowerCAmelCase = 24 _lowerCAmelCase = 16 _lowerCAmelCase = 0.1 elif "b4" in checkpoint_url: _lowerCAmelCase = 4 elif "l7" in checkpoint_url: _lowerCAmelCase = 7 _lowerCAmelCase = 1024 _lowerCAmelCase = 4096 _lowerCAmelCase = 24 _lowerCAmelCase = 16 _lowerCAmelCase = 0.1 _lowerCAmelCase = ViTMSNModel(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location="cpu" )["target_encoder"] _lowerCAmelCase = ViTImageProcessor(size=config.image_size ) remove_projection_head(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = create_rename_keys(SCREAMING_SNAKE_CASE_ , base_model=SCREAMING_SNAKE_CASE_ ) for src, dest in rename_keys: rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) read_in_q_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , base_model=SCREAMING_SNAKE_CASE_ ) model.load_state_dict(SCREAMING_SNAKE_CASE_ ) model.eval() _lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg" _lowerCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ) _lowerCAmelCase = ViTImageProcessor( size=config.image_size , image_mean=SCREAMING_SNAKE_CASE_ , image_std=SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="pt" ) # forward pass torch.manual_seed(2 ) _lowerCAmelCase = model(**SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = outputs.last_hidden_state # The following Colab Notebook was used to generate these outputs: # https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb if "s16" in checkpoint_url: _lowerCAmelCase = torch.tensor([[-1.0915, -1.4876, -1.1809]] ) elif "b16" in checkpoint_url: _lowerCAmelCase = torch.tensor([[14.2889, -18.9045, 11.7281]] ) elif "l16" in checkpoint_url: _lowerCAmelCase = torch.tensor([[41.5028, -22.8681, 45.6475]] ) elif "b4" in checkpoint_url: _lowerCAmelCase = torch.tensor([[-4.3868, 5.2932, -0.4137]] ) else: _lowerCAmelCase = torch.tensor([[-0.1792, -0.6465, 2.4263]] ) # verify logits assert torch.allclose(last_hidden_state[:, 0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint_url", default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar", type=str, help="URL of the checkpoint you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) _SCREAMING_SNAKE_CASE = parser.parse_args() convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
158
0
import argparse import re from pathlib import Path import requests import torch from PIL import Image from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor from transformers import ( EfficientFormerConfig, EfficientFormerForImageClassificationWithTeacher, EfficientFormerImageProcessor, ) from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def lowerCAmelCase_ (lowerCAmelCase__: int , lowerCAmelCase__: List[Any] ): """simple docstring""" UpperCAmelCase_: Optional[Any] = old_name if "patch_embed" in old_name: UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: int = old_name.split(""".""" ) if layer == "0": UpperCAmelCase_: Optional[int] = old_name.replace("""0""" , """convolution1""" ) elif layer == "1": UpperCAmelCase_: List[Any] = old_name.replace("""1""" , """batchnorm_before""" ) elif layer == "3": UpperCAmelCase_: int = old_name.replace("""3""" , """convolution2""" ) else: UpperCAmelCase_: int = old_name.replace("""4""" , """batchnorm_after""" ) if "network" in old_name and re.search(r"""\d\.\d""" , lowerCAmelCase__ ): UpperCAmelCase_: int = r"""\b\d{2}\b""" if bool(re.search(lowerCAmelCase__ , lowerCAmelCase__ ) ): UpperCAmelCase_: Optional[int] = re.search(r"""\d\.\d\d.""" , lowerCAmelCase__ ).group() else: UpperCAmelCase_: str = re.search(r"""\d\.\d.""" , lowerCAmelCase__ ).group() if int(match[0] ) < 6: UpperCAmelCase_: Optional[int] = old_name.replace(lowerCAmelCase__ , """""" ) UpperCAmelCase_: Optional[int] = trimmed_name.replace("""network""" , match[0] + """.meta4D_layers.blocks.""" + match[2:-1] ) UpperCAmelCase_: Tuple = """intermediate_stages.""" + trimmed_name else: UpperCAmelCase_: Union[str, Any] = old_name.replace(lowerCAmelCase__ , """""" ) if int(match[2] ) < num_meta4D_last_stage: UpperCAmelCase_: Optional[int] = trimmed_name.replace("""network""" , """meta4D_layers.blocks.""" + match[2] ) else: UpperCAmelCase_: str = str(int(match[2] ) - num_meta4D_last_stage ) UpperCAmelCase_: Any = trimmed_name.replace("""network""" , """meta3D_layers.blocks.""" + layer_index ) if "norm1" in old_name: UpperCAmelCase_: Optional[int] = trimmed_name.replace("""norm1""" , """layernorm1""" ) elif "norm2" in old_name: UpperCAmelCase_: Optional[Any] = trimmed_name.replace("""norm2""" , """layernorm2""" ) elif "fc1" in old_name: UpperCAmelCase_: Dict = trimmed_name.replace("""fc1""" , """linear_in""" ) elif "fc2" in old_name: UpperCAmelCase_: Any = trimmed_name.replace("""fc2""" , """linear_out""" ) UpperCAmelCase_: str = """last_stage.""" + trimmed_name elif "network" in old_name and re.search(r""".\d.""" , lowerCAmelCase__ ): UpperCAmelCase_: Tuple = old_name.replace("""network""" , """intermediate_stages""" ) if "fc" in new_name: UpperCAmelCase_: List[Any] = new_name.replace("""fc""" , """convolution""" ) elif ("norm1" in new_name) and ("layernorm1" not in new_name): UpperCAmelCase_: List[str] = new_name.replace("""norm1""" , """batchnorm_before""" ) elif ("norm2" in new_name) and ("layernorm2" not in new_name): UpperCAmelCase_: Union[str, Any] = new_name.replace("""norm2""" , """batchnorm_after""" ) if "proj" in new_name: UpperCAmelCase_: List[str] = new_name.replace("""proj""" , """projection""" ) if "dist_head" in new_name: UpperCAmelCase_: Optional[int] = new_name.replace("""dist_head""" , """distillation_classifier""" ) elif "head" in new_name: UpperCAmelCase_: Tuple = new_name.replace("""head""" , """classifier""" ) elif "patch_embed" in new_name: UpperCAmelCase_: Optional[int] = """efficientformer.""" + new_name elif new_name == "norm.weight" or new_name == "norm.bias": UpperCAmelCase_: Any = new_name.replace("""norm""" , """layernorm""" ) UpperCAmelCase_: str = """efficientformer.""" + new_name else: UpperCAmelCase_: Optional[Any] = """efficientformer.encoder.""" + new_name return new_name def lowerCAmelCase_ (lowerCAmelCase__: Any , lowerCAmelCase__: Dict ): """simple docstring""" for key in checkpoint.copy().keys(): UpperCAmelCase_: Tuple = checkpoint.pop(lowerCAmelCase__ ) UpperCAmelCase_: Tuple = val return checkpoint def lowerCAmelCase_ (): """simple docstring""" UpperCAmelCase_: Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg""" UpperCAmelCase_: Union[str, Any] = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw ) return image def lowerCAmelCase_ (lowerCAmelCase__: Path , lowerCAmelCase__: Path , lowerCAmelCase__: Path , lowerCAmelCase__: bool ): """simple docstring""" UpperCAmelCase_: int = torch.load(lowerCAmelCase__ , map_location="""cpu""" )["""model"""] UpperCAmelCase_: int = EfficientFormerConfig.from_json_file(lowerCAmelCase__ ) UpperCAmelCase_: Any = EfficientFormerForImageClassificationWithTeacher(lowerCAmelCase__ ) UpperCAmelCase_: Dict = """_""".join(checkpoint_path.split("""/""" )[-1].split(""".""" )[0].split("""_""" )[:-1] ) UpperCAmelCase_: List[str] = config.depths[-1] - config.num_metaad_blocks + 1 UpperCAmelCase_: Any = convert_torch_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ ) model.load_state_dict(lowerCAmelCase__ ) model.eval() UpperCAmelCase_: Tuple = { """bilinear""": PILImageResampling.BILINEAR, """bicubic""": PILImageResampling.BICUBIC, """nearest""": PILImageResampling.NEAREST, } # prepare image UpperCAmelCase_: Optional[Any] = prepare_img() UpperCAmelCase_: List[Any] = 2_5_6 UpperCAmelCase_: Any = 2_2_4 UpperCAmelCase_: Dict = EfficientFormerImageProcessor( size={"""shortest_edge""": image_size} , crop_size={"""height""": crop_size, """width""": crop_size} , resample=pillow_resamplings["""bicubic"""] , ) UpperCAmelCase_: Union[str, Any] = processor(images=lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values # original processing pipeline UpperCAmelCase_: Tuple = Compose( [ Resize(lowerCAmelCase__ , interpolation=pillow_resamplings["""bicubic"""] ), CenterCrop(lowerCAmelCase__ ), ToTensor(), Normalize(lowerCAmelCase__ , lowerCAmelCase__ ), ] ) UpperCAmelCase_: Dict = image_transforms(lowerCAmelCase__ ).unsqueeze(0 ) assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ ) UpperCAmelCase_: List[str] = model(lowerCAmelCase__ ) UpperCAmelCase_: Union[str, Any] = outputs.logits UpperCAmelCase_: Optional[Any] = (1, 1_0_0_0) if "l1" in model_name: UpperCAmelCase_: int = torch.Tensor( [-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] ) assert torch.allclose(logits[0, :1_0] , lowerCAmelCase__ , atol=1e-3 ) assert logits.shape == expected_shape elif "l3" in model_name: UpperCAmelCase_: Union[str, Any] = torch.Tensor( [-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] ) assert torch.allclose(logits[0, :1_0] , lowerCAmelCase__ , atol=1e-3 ) assert logits.shape == expected_shape elif "l7" in model_name: UpperCAmelCase_: Tuple = torch.Tensor( [-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] ) assert logits.shape == expected_shape else: raise ValueError( F'Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7' ) # Save Checkpoints Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ ) model.save_pretrained(lowerCAmelCase__ ) print(F'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' ) processor.save_pretrained(lowerCAmelCase__ ) print(F'Processor successfuly saved at {pytorch_dump_path}' ) if push_to_hub: print("""Pushing model to the hub...""" ) model.push_to_hub( repo_id=F'Bearnardd/{pytorch_dump_path}' , commit_message="""Add model""" , use_temp_dir=lowerCAmelCase__ , ) processor.push_to_hub( repo_id=F'Bearnardd/{pytorch_dump_path}' , commit_message="""Add image processor""" , use_temp_dir=lowerCAmelCase__ , ) if __name__ == "__main__": a : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '--pytorch_model_path', default=None, type=str, required=True, help='Path to EfficientFormer pytorch checkpoint.', ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The json file for EfficientFormer model config.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub') parser.add_argument( '--no-push_to_hub', dest='push_to_hub', action='store_false', help='Do not push model and image processor to the hub', ) parser.set_defaults(push_to_hub=True) a : int = parser.parse_args() convert_efficientformer_checkpoint( checkpoint_path=args.pytorch_model_path, efficientformer_config_file=args.config_file, pytorch_dump_path=args.pytorch_dump_path, push_to_hub=args.push_to_hub, )
82
from __future__ import annotations def lowerCAmelCase_ (lowerCAmelCase__: list[float] ): """simple docstring""" UpperCAmelCase_: Union[str, Any] = 0.00 UpperCAmelCase_: List[str] = 0 for resistor in resistors: if resistor <= 0: UpperCAmelCase_: Dict = F'Resistor at index {index} has a negative or zero value!' raise ValueError(lowerCAmelCase__ ) first_sum += 1 / float(lowerCAmelCase__ ) index += 1 return 1 / first_sum def lowerCAmelCase_ (lowerCAmelCase__: list[float] ): """simple docstring""" UpperCAmelCase_: Any = 0.00 UpperCAmelCase_: int = 0 for resistor in resistors: sum_r += resistor if resistor < 0: UpperCAmelCase_: int = F'Resistor at index {index} has a negative value!' raise ValueError(lowerCAmelCase__ ) index += 1 return sum_r if __name__ == "__main__": import doctest doctest.testmod()
82
1
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import torch from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available @dataclass class _a ( UpperCamelCase__): """simple docstring""" UpperCamelCase__ = 42 try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_text_to_video_synth import TextToVideoSDPipeline from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401 from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
149
from ..utils import DummyObject, requires_backends class _a ( metaclass=UpperCamelCase__): """simple docstring""" UpperCamelCase__ = ["""flax""", """transformers"""] def __init__( self: Optional[int] , *__lowerCamelCase: Union[str, Any] , **__lowerCamelCase: List[str] ): '''simple docstring''' requires_backends(self , ["flax", "transformers"] ) @classmethod def UpperCAmelCase_ ( cls: Optional[int] , *__lowerCamelCase: str , **__lowerCamelCase: Any ): '''simple docstring''' requires_backends(cls , ["flax", "transformers"] ) @classmethod def UpperCAmelCase_ ( cls: Any , *__lowerCamelCase: List[str] , **__lowerCamelCase: Any ): '''simple docstring''' requires_backends(cls , ["flax", "transformers"] ) class _a ( metaclass=UpperCamelCase__): """simple docstring""" UpperCamelCase__ = ["""flax""", """transformers"""] def __init__( self: List[str] , *__lowerCamelCase: str , **__lowerCamelCase: int ): '''simple docstring''' requires_backends(self , ["flax", "transformers"] ) @classmethod def UpperCAmelCase_ ( cls: Any , *__lowerCamelCase: Union[str, Any] , **__lowerCamelCase: Optional[int] ): '''simple docstring''' requires_backends(cls , ["flax", "transformers"] ) @classmethod def UpperCAmelCase_ ( cls: str , *__lowerCamelCase: List[str] , **__lowerCamelCase: str ): '''simple docstring''' requires_backends(cls , ["flax", "transformers"] ) class _a ( metaclass=UpperCamelCase__): """simple docstring""" UpperCamelCase__ = ["""flax""", """transformers"""] def __init__( self: List[Any] , *__lowerCamelCase: Optional[int] , **__lowerCamelCase: Union[str, Any] ): '''simple docstring''' requires_backends(self , ["flax", "transformers"] ) @classmethod def UpperCAmelCase_ ( cls: Optional[Any] , *__lowerCamelCase: Dict , **__lowerCamelCase: Union[str, Any] ): '''simple docstring''' requires_backends(cls , ["flax", "transformers"] ) @classmethod def UpperCAmelCase_ ( cls: Any , *__lowerCamelCase: Dict , **__lowerCamelCase: str ): '''simple docstring''' requires_backends(cls , ["flax", "transformers"] ) class _a ( metaclass=UpperCamelCase__): """simple docstring""" UpperCamelCase__ = ["""flax""", """transformers"""] def __init__( self: Any , *__lowerCamelCase: Any , **__lowerCamelCase: List[Any] ): '''simple docstring''' requires_backends(self , ["flax", "transformers"] ) @classmethod def UpperCAmelCase_ ( cls: List[Any] , *__lowerCamelCase: Optional[int] , **__lowerCamelCase: Any ): '''simple docstring''' requires_backends(cls , ["flax", "transformers"] ) @classmethod def UpperCAmelCase_ ( cls: Tuple , *__lowerCamelCase: int , **__lowerCamelCase: Any ): '''simple docstring''' requires_backends(cls , ["flax", "transformers"] )
149
1
from __future__ import annotations import unittest import numpy as np from transformers import OPTConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int=None , __UpperCAmelCase : Tuple=None ) -> Optional[int]: if attention_mask is None: SCREAMING_SNAKE_CASE_ = tf.cast(tf.math.not_equal(__UpperCAmelCase , config.pad_token_id ) , tf.inta ) return {"input_ids": input_ids, "attention_mask": attention_mask} @require_tf class lowerCamelCase_ : '''simple docstring''' lowercase_ = OPTConfig lowercase_ = {} lowercase_ = "gelu" def __init__( self : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple=13 , _lowerCAmelCase : int=7 , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Any=False , _lowerCAmelCase : List[str]=99 , _lowerCAmelCase : Tuple=16 , _lowerCAmelCase : Dict=2 , _lowerCAmelCase : str=4 , _lowerCAmelCase : Union[str, Any]=4 , _lowerCAmelCase : Any="gelu" , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : List[str]=0.1 , _lowerCAmelCase : Optional[int]=20 , _lowerCAmelCase : Union[str, Any]=2 , _lowerCAmelCase : Dict=1 , _lowerCAmelCase : List[str]=0 , _lowerCAmelCase : int=16 , _lowerCAmelCase : List[str]=16 , ): SCREAMING_SNAKE_CASE_ = parent SCREAMING_SNAKE_CASE_ = batch_size SCREAMING_SNAKE_CASE_ = seq_length SCREAMING_SNAKE_CASE_ = is_training SCREAMING_SNAKE_CASE_ = use_labels SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = hidden_size SCREAMING_SNAKE_CASE_ = num_hidden_layers SCREAMING_SNAKE_CASE_ = num_attention_heads SCREAMING_SNAKE_CASE_ = intermediate_size SCREAMING_SNAKE_CASE_ = hidden_act SCREAMING_SNAKE_CASE_ = hidden_dropout_prob SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = eos_token_id SCREAMING_SNAKE_CASE_ = pad_token_id SCREAMING_SNAKE_CASE_ = bos_token_id SCREAMING_SNAKE_CASE_ = embed_dim SCREAMING_SNAKE_CASE_ = word_embed_proj_dim SCREAMING_SNAKE_CASE_ = False def lowerCAmelCase_ ( self : List[str] ): SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) SCREAMING_SNAKE_CASE_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) SCREAMING_SNAKE_CASE_ = tf.concat([input_ids, eos_tensor] , axis=1 ) SCREAMING_SNAKE_CASE_ = self.config_cls( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=_lowerCAmelCase , **self.config_updates , ) SCREAMING_SNAKE_CASE_ = prepare_opt_inputs_dict(_lowerCAmelCase , _lowerCAmelCase ) return config, inputs_dict def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] ): SCREAMING_SNAKE_CASE_ = TFOPTModel(config=_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = inputs_dict['input_ids'] SCREAMING_SNAKE_CASE_ = input_ids[:1, :] SCREAMING_SNAKE_CASE_ = inputs_dict['attention_mask'][:1, :] SCREAMING_SNAKE_CASE_ = 1 # first forward pass SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , use_cache=_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids SCREAMING_SNAKE_CASE_ = ids_tensor((self.batch_size, 3) , config.vocab_size ) SCREAMING_SNAKE_CASE_ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and SCREAMING_SNAKE_CASE_ = tf.concat([input_ids, next_tokens] , axis=-1 ) SCREAMING_SNAKE_CASE_ = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )[0] SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice SCREAMING_SNAKE_CASE_ = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) SCREAMING_SNAKE_CASE_ = output_from_no_past[:, -3:, random_slice_idx] SCREAMING_SNAKE_CASE_ = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(_lowerCAmelCase , _lowerCAmelCase , rtol=1E-3 ) @require_tf class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' lowercase_ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else () lowercase_ = (TFOPTForCausalLM,) if is_tf_available() else () lowercase_ = ( {"feature-extraction": TFOPTModel, "text-generation": TFOPTForCausalLM} if is_tf_available() else {} ) lowercase_ = False lowercase_ = False lowercase_ = False lowercase_ = 10 def lowerCAmelCase_ ( self : str ): SCREAMING_SNAKE_CASE_ = TFOPTModelTester(self ) SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_lowerCAmelCase ) def lowerCAmelCase_ ( self : Tuple ): self.config_tester.run_common_tests() def lowerCAmelCase_ ( self : int ): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*_lowerCAmelCase ) def lowerCAmelCase_ ( self : List[str] ): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common() def _get_word_embedding_weight(_lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple ): if hasattr(_lowerCAmelCase , 'weight' ): return embedding_layer.weight else: # Here we build the word embeddings weights if not exists. # And then we retry to get the attribute once built. model.build() if hasattr(_lowerCAmelCase , 'weight' ): return embedding_layer.weight else: return None for model_class in self.all_model_classes: for size in [config.vocab_size - 10, config.vocab_size + 10]: # build the embeddings SCREAMING_SNAKE_CASE_ = model_class(config=_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = _get_word_embedding_weight(_lowerCAmelCase , model.get_input_embeddings() ) SCREAMING_SNAKE_CASE_ = _get_word_embedding_weight(_lowerCAmelCase , model.get_output_embeddings() ) # reshape the embeddings model.resize_token_embeddings(_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = _get_word_embedding_weight(_lowerCAmelCase , model.get_input_embeddings() ) SCREAMING_SNAKE_CASE_ = _get_word_embedding_weight(_lowerCAmelCase , model.get_output_embeddings() ) # check that the resized embeddings size matches the desired size. SCREAMING_SNAKE_CASE_ = size if size is not None else config.vocab_size self.assertEqual(new_input_embeddings.shape[0] , _lowerCAmelCase ) # check that weights remain the same after resizing SCREAMING_SNAKE_CASE_ = True for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ): if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0: SCREAMING_SNAKE_CASE_ = False self.assertTrue(_lowerCAmelCase ) if old_output_embeddings is not None and new_output_embeddings is not None: self.assertEqual(new_output_embeddings.shape[0] , _lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = True for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ): if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0: SCREAMING_SNAKE_CASE_ = False self.assertTrue(_lowerCAmelCase ) def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] ) -> List[Any]: return tf.constant(__UpperCAmelCase , dtype=tf.intaa ) @require_tf class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' lowercase_ = 99 def lowerCAmelCase_ ( self : List[str] ): SCREAMING_SNAKE_CASE_ = tf.ones((4, 1) , dtype=tf.intaa ) * 2 SCREAMING_SNAKE_CASE_ = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 ) SCREAMING_SNAKE_CASE_ = input_ids.shape[0] SCREAMING_SNAKE_CASE_ = OPTConfig( vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size @require_sentencepiece @require_tf class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def lowerCAmelCase_ ( self : Any ): SCREAMING_SNAKE_CASE_ = TFOPTModel.from_pretrained('facebook/opt-350m' ) SCREAMING_SNAKE_CASE_ = _long_tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] ) SCREAMING_SNAKE_CASE_ = tf.not_equal(_lowerCAmelCase , model.config.pad_token_id ) with tf.GradientTape(): SCREAMING_SNAKE_CASE_ = model(input_ids=_lowerCAmelCase , attention_mask=_lowerCAmelCase ).last_hidden_state SCREAMING_SNAKE_CASE_ = (1, 11, 512) self.assertEqual(output.shape , _lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = tf.constant( [[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]] ) self.assertTrue(np.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=4E-3 ) ) SCREAMING_SNAKE_CASE_ = tf.function(_lowerCAmelCase , jit_compile=_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = xla_generate(_lowerCAmelCase , _lowerCAmelCase )[0] self.assertTrue(np.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=4E-2 ) ) @require_tf @slow class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self : List[str] ): super().setUp() SCREAMING_SNAKE_CASE_ = 'facebook/opt-350m' def lowerCAmelCase_ ( self : Any ): SCREAMING_SNAKE_CASE_ = TFOPTForCausalLM.from_pretrained(self.path_model ) SCREAMING_SNAKE_CASE_ = GPTaTokenizer.from_pretrained(self.path_model ) SCREAMING_SNAKE_CASE_ = [ 'Today is a beautiful day and I want to', 'In the city of', 'Paris is the capital of France and', 'Computers and mobile phones have taken', ] # verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False SCREAMING_SNAKE_CASE_ = tokenizer(_lowerCAmelCase , return_tensors='tf' , padding=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 ) SCREAMING_SNAKE_CASE_ = tf.constant( [ [1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670], [-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822], [0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703], [6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477], ] ) self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-4 ) ) SCREAMING_SNAKE_CASE_ = tf.function(_lowerCAmelCase , jit_compile=_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 ) self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-4 ) ) @require_tf @slow class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' @property def lowerCAmelCase_ ( self : Tuple ): return [ "Today is a beautiful day and I want", "In the city of", "Paris is the capital of France and", "Computers and mobile phones have taken", ] def lowerCAmelCase_ ( self : Dict ): SCREAMING_SNAKE_CASE_ = 'facebook/opt-125m' SCREAMING_SNAKE_CASE_ = [ 'Today is a beautiful day and I want to', 'In the city of New York, the city', 'Paris is the capital of France and the capital', 'Computers and mobile phones have taken over the', ] SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = GPTaTokenizer.from_pretrained(_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = TFOPTForCausalLM.from_pretrained(_lowerCAmelCase ) for prompt in self.prompts: SCREAMING_SNAKE_CASE_ = tokenizer(_lowerCAmelCase , return_tensors='tf' ).input_ids SCREAMING_SNAKE_CASE_ = model.generate(_lowerCAmelCase , max_length=10 ) SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase ) predicted_outputs += generated_string self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase ) def lowerCAmelCase_ ( self : List[Any] ): SCREAMING_SNAKE_CASE_ = 'facebook/opt-350m' SCREAMING_SNAKE_CASE_ = GPTaTokenizer.from_pretrained(_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = TFOPTForCausalLM.from_pretrained(_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = 'left' # use different length sentences to test batching SCREAMING_SNAKE_CASE_ = [ 'Hello, my dog is a little', 'Today, I', ] SCREAMING_SNAKE_CASE_ = tokenizer(_lowerCAmelCase , return_tensors='tf' , padding=_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = inputs['input_ids'] SCREAMING_SNAKE_CASE_ = model.generate(input_ids=_lowerCAmelCase , attention_mask=inputs['attention_mask'] ) SCREAMING_SNAKE_CASE_ = tokenizer(sentences[0] , return_tensors='tf' ).input_ids SCREAMING_SNAKE_CASE_ = model.generate(input_ids=_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = inputs_non_padded.shape[-1] - tf.math.reduce_sum( tf.cast(inputs['attention_mask'][-1] , tf.intaa ) ) SCREAMING_SNAKE_CASE_ = tokenizer(sentences[1] , return_tensors='tf' ).input_ids SCREAMING_SNAKE_CASE_ = model.generate(input_ids=_lowerCAmelCase , max_length=model.config.max_length - num_paddings ) SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = tokenizer.decode(output_padded[0] , skip_special_tokens=_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = [ 'Hello, my dog is a little bit of a dork.\nI\'m a little bit', 'Today, I was in the middle of a conversation with a friend about the', ] self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase ) self.assertListEqual(_lowerCAmelCase , [non_padded_sentence, padded_sentence] ) def lowerCAmelCase_ ( self : Tuple ): SCREAMING_SNAKE_CASE_ = 'facebook/opt-350m' SCREAMING_SNAKE_CASE_ = [ 'Today is a beautiful day and I want to', 'In the city of San Francisco, the city', 'Paris is the capital of France and the capital', 'Computers and mobile phones have taken over the', ] SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = GPTaTokenizer.from_pretrained(_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = TFOPTForCausalLM.from_pretrained(_lowerCAmelCase ) for prompt in self.prompts: SCREAMING_SNAKE_CASE_ = tokenizer(_lowerCAmelCase , return_tensors='tf' ).input_ids SCREAMING_SNAKE_CASE_ = model.generate(_lowerCAmelCase , max_length=10 ) SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase ) predicted_outputs += generated_string self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
354
from abc import ABC, abstractmethod from argparse import ArgumentParser class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ): '''simple docstring''' @staticmethod @abstractmethod def lowerCAmelCase_ ( _lowerCAmelCase : ArgumentParser ): raise NotImplementedError() @abstractmethod def lowerCAmelCase_ ( self : Dict ): raise NotImplementedError()
210
0
from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str) -> Optional[int]: '''simple docstring''' __UpperCamelCase : str = analyze_text(_lowerCamelCase) __UpperCamelCase : Optional[int] = list(" " + ascii_lowercase) # what is our total sum of probabilities. __UpperCamelCase : int = sum(single_char_strings.values()) # one length string __UpperCamelCase : Optional[Any] = 0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: __UpperCamelCase : List[str] = single_char_strings[ch] __UpperCamelCase : str = my_str / all_sum my_fir_sum += prob * math.loga(_lowerCamelCase) # entropy formula. # print entropy print(F'{round(-1 * my_fir_sum):.1f}') # two len string __UpperCamelCase : Optional[int] = sum(two_char_strings.values()) __UpperCamelCase : List[str] = 0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: __UpperCamelCase : Union[str, Any] = cha + cha if sequence in two_char_strings: __UpperCamelCase : List[Any] = two_char_strings[sequence] __UpperCamelCase : Any = int(_lowerCamelCase) / all_sum my_sec_sum += prob * math.loga(_lowerCamelCase) # print second entropy print(F'{round(-1 * my_sec_sum):.1f}') # print the difference between them print(F'{round((-1 * my_sec_sum) - (-1 * my_fir_sum)):.1f}') def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str) -> Union[str, Any]: '''simple docstring''' __UpperCamelCase : Any = Counter() # type: ignore __UpperCamelCase : Tuple = Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0 , len(_lowerCamelCase) - 1): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def _SCREAMING_SNAKE_CASE ( ) -> int: '''simple docstring''' import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
232
from torch import nn class _lowercase ( nn.Module ): def __init__( self : Any , snake_case : Dict , snake_case : Union[str, Any] ) -> Dict: """simple docstring""" super().__init__() UpperCamelCase_ : List[Any] = class_size UpperCamelCase_ : List[Any] = embed_size # self.mlp1 = nn.Linear(embed_size, embed_size) # self.mlp2 = (nn.Linear(embed_size, class_size)) UpperCamelCase_ : int = nn.Linear(snake_case , snake_case ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case : Any ) -> str: """simple docstring""" UpperCamelCase_ : Dict = self.mlp(snake_case ) return logits
175
0
def lowerCAmelCase_ ( __a , __a , __a ) -> int: """simple docstring""" if exponent == 1: return base if exponent % 2 == 0: lowerCamelCase__: Tuple =_modexpt(__a , exponent // 2 , __a ) % modulo_value return (x * x) % modulo_value else: return (base * _modexpt(__a , exponent - 1 , __a )) % modulo_value def lowerCAmelCase_ ( __a = 1777 , __a = 1855 , __a = 8 ) -> int: """simple docstring""" lowerCamelCase__: int =base for _ in range(1 , __a ): lowerCamelCase__: Tuple =_modexpt(__a , __a , 10**digits ) return result if __name__ == "__main__": print(f'{solution() = }')
273
from argparse import ArgumentParser, Namespace from ..utils import logging from . import BaseTransformersCLICommand def lowerCAmelCase_ ( __a ) -> int: """simple docstring""" return ConvertCommand( args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name ) __A = "\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n" class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' @staticmethod def SCREAMING_SNAKE_CASE_ (UpperCAmelCase_ : ArgumentParser) ->str: '''simple docstring''' lowerCamelCase__: Dict =parser.add_parser( "convert" , help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints." , ) train_parser.add_argument("--model_type" , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="Model's type.") train_parser.add_argument( "--tf_checkpoint" , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="TensorFlow checkpoint path or folder.") train_parser.add_argument( "--pytorch_dump_output" , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="Path to the PyTorch saved model output.") train_parser.add_argument("--config" , type=UpperCAmelCase_ , default="" , help="Configuration file path or folder.") train_parser.add_argument( "--finetuning_task_name" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help="Optional fine-tuning task name if the TF model was a finetuned model." , ) train_parser.set_defaults(func=UpperCAmelCase_) def __init__(self : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : str , *UpperCAmelCase_ : Optional[int] , ) ->List[str]: '''simple docstring''' lowerCamelCase__: Dict =logging.get_logger("transformers-cli/converting") self._logger.info(F"""Loading model {model_type}""") lowerCamelCase__: Any =model_type lowerCamelCase__: Optional[int] =tf_checkpoint lowerCamelCase__: Any =pytorch_dump_output lowerCamelCase__: Union[str, Any] =config lowerCamelCase__: str =finetuning_task_name def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Tuple: '''simple docstring''' if self._model_type == "albert": try: from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(UpperCAmelCase_) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output) elif self._model_type == "bert": try: from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(UpperCAmelCase_) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output) elif self._model_type == "funnel": try: from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(UpperCAmelCase_) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output) elif self._model_type == "t5": try: from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch except ImportError: raise ImportError(UpperCAmelCase_) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output) elif self._model_type == "gpt": from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import ( convert_openai_checkpoint_to_pytorch, ) convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output) elif self._model_type == "transfo_xl": try: from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import ( convert_transfo_xl_checkpoint_to_pytorch, ) except ImportError: raise ImportError(UpperCAmelCase_) if "ckpt" in self._tf_checkpoint.lower(): lowerCamelCase__: Tuple =self._tf_checkpoint lowerCamelCase__: List[str] ="" else: lowerCamelCase__: Any =self._tf_checkpoint lowerCamelCase__: Dict ="" convert_transfo_xl_checkpoint_to_pytorch( UpperCAmelCase_ , self._config , self._pytorch_dump_output , UpperCAmelCase_) elif self._model_type == "gpt2": try: from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import ( convert_gpta_checkpoint_to_pytorch, ) except ImportError: raise ImportError(UpperCAmelCase_) convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output) elif self._model_type == "xlnet": try: from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import ( convert_xlnet_checkpoint_to_pytorch, ) except ImportError: raise ImportError(UpperCAmelCase_) convert_xlnet_checkpoint_to_pytorch( self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name) elif self._model_type == "xlm": from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import ( convert_xlm_checkpoint_to_pytorch, ) convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output) elif self._model_type == "lxmert": from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import ( convert_lxmert_checkpoint_to_pytorch, ) convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output) elif self._model_type == "rembert": from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import ( convert_rembert_tf_checkpoint_to_pytorch, ) convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output) else: raise ValueError( "--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]")
273
1
'''simple docstring''' from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) UpperCAmelCase_ : List[Any] = _symbol_database.Default() UpperCAmelCase_ : Optional[Any] = _descriptor_pool.Default().AddSerializedFile( B'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03' ) UpperCAmelCase_ : str = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', _globals) if _descriptor._USE_C_DESCRIPTORS is False: UpperCAmelCase_ : Union[str, Any] = None UpperCAmelCase_ : int = B'H\003' # (generated by protobuf compiler, but `_TRAINERSPEC` is not defined) # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001" # _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001" UpperCAmelCase_ : Optional[int] = 45 UpperCAmelCase_ : int = 1581 UpperCAmelCase_ : List[Any] = 1517 UpperCAmelCase_ : Optional[int] = 1570 UpperCAmelCase_ : Any = 1584 UpperCAmelCase_ : Union[str, Any] = 1793 UpperCAmelCase_ : List[str] = 1795 UpperCAmelCase_ : str = 1916 UpperCAmelCase_ : Any = 1864 UpperCAmelCase_ : str = 1905 UpperCAmelCase_ : Dict = 1919 UpperCAmelCase_ : str = 2429 UpperCAmelCase_ : Union[str, Any] = 2208 UpperCAmelCase_ : Union[str, Any] = 2418 UpperCAmelCase_ : Union[str, Any] = 2323 UpperCAmelCase_ : Union[str, Any] = 2407 # @@protoc_insertion_point(module_scope)
200
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class lowercase__ ( unittest.TestCase ): '''simple docstring''' def __init__( self , __snake_case , __snake_case=7 , __snake_case=3 , __snake_case=18 , __snake_case=30 , __snake_case=400 , __snake_case=True , __snake_case=None , __snake_case=True , __snake_case=None , ): _SCREAMING_SNAKE_CASE : Any = size if size is not None else {"""shortest_edge""": 20} _SCREAMING_SNAKE_CASE : int = crop_size if crop_size is not None else {"""height""": 18, """width""": 18} _SCREAMING_SNAKE_CASE : Dict = parent _SCREAMING_SNAKE_CASE : Any = batch_size _SCREAMING_SNAKE_CASE : Any = num_channels _SCREAMING_SNAKE_CASE : Dict = image_size _SCREAMING_SNAKE_CASE : Any = min_resolution _SCREAMING_SNAKE_CASE : Any = max_resolution _SCREAMING_SNAKE_CASE : Union[str, Any] = do_resize _SCREAMING_SNAKE_CASE : Tuple = size _SCREAMING_SNAKE_CASE : Optional[int] = do_center_crop _SCREAMING_SNAKE_CASE : str = crop_size def UpperCAmelCase_ ( self ): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, } @require_torch @require_vision class lowercase__ ( _snake_case , unittest.TestCase ): '''simple docstring''' A_ : Union[str, Any] = MobileNetVaImageProcessor if is_vision_available() else None def UpperCAmelCase_ ( self ): _SCREAMING_SNAKE_CASE : int = MobileNetVaImageProcessingTester(self ) @property def UpperCAmelCase_ ( self ): return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase_ ( self ): _SCREAMING_SNAKE_CASE : str = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__snake_case , """do_resize""" ) ) self.assertTrue(hasattr(__snake_case , """size""" ) ) self.assertTrue(hasattr(__snake_case , """do_center_crop""" ) ) self.assertTrue(hasattr(__snake_case , """crop_size""" ) ) def UpperCAmelCase_ ( self ): _SCREAMING_SNAKE_CASE : str = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 20} ) self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} ) _SCREAMING_SNAKE_CASE : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} ) def UpperCAmelCase_ ( self ): pass def UpperCAmelCase_ ( self ): # Initialize image_processing _SCREAMING_SNAKE_CASE : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _SCREAMING_SNAKE_CASE : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case ) for image in image_inputs: self.assertIsInstance(__snake_case , Image.Image ) # Test not batched input _SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _SCREAMING_SNAKE_CASE : int = image_processing(__snake_case , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def UpperCAmelCase_ ( self ): # Initialize image_processing _SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _SCREAMING_SNAKE_CASE : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case ) for image in image_inputs: self.assertIsInstance(__snake_case , np.ndarray ) # Test not batched input _SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(__snake_case , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def UpperCAmelCase_ ( self ): # Initialize image_processing _SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _SCREAMING_SNAKE_CASE : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case ) for image in image_inputs: self.assertIsInstance(__snake_case , torch.Tensor ) # Test not batched input _SCREAMING_SNAKE_CASE : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _SCREAMING_SNAKE_CASE : str = image_processing(__snake_case , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , )
200
1
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available a_ = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ 'MRA_PRETRAINED_MODEL_ARCHIVE_LIST', 'MraForMaskedLM', 'MraForMultipleChoice', 'MraForQuestionAnswering', 'MraForSequenceClassification', 'MraForTokenClassification', 'MraLayer', 'MraModel', 'MraPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mra import ( MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraLayer, MraModel, MraPreTrainedModel, ) else: import sys a_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
351
'''simple docstring''' from __future__ import annotations import os from collections.abc import Mapping a_ = tuple[int, int] class __SCREAMING_SNAKE_CASE : def __init__( self : Any , __lowercase : set[int] , __lowercase : Mapping[EdgeT, int] ) -> None: SCREAMING_SNAKE_CASE__ : set[int] =vertices SCREAMING_SNAKE_CASE__ : dict[EdgeT, int] ={ (min(__lowercase ), max(__lowercase )): weight for edge, weight in edges.items() } def __magic_name__ ( self : Union[str, Any] , __lowercase : EdgeT , __lowercase : int ) -> None: self.vertices.add(edge[0] ) self.vertices.add(edge[1] ) SCREAMING_SNAKE_CASE__ : List[str] =weight def __magic_name__ ( self : Optional[Any] ) -> Graph: SCREAMING_SNAKE_CASE__ : Graph =Graph({min(self.vertices )} , {} ) SCREAMING_SNAKE_CASE__ : EdgeT SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : EdgeT SCREAMING_SNAKE_CASE__ : int while len(subgraph.vertices ) < len(self.vertices ): SCREAMING_SNAKE_CASE__ : Any =max(self.edges.values() ) + 1 for edge, weight in self.edges.items(): if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices): if weight < min_weight: SCREAMING_SNAKE_CASE__ : List[str] =edge SCREAMING_SNAKE_CASE__ : Any =weight subgraph.add_edge(__lowercase , __lowercase ) return subgraph def _a( UpperCamelCase__ : str = "p107_network.txt" ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str =os.path.abspath(os.path.dirname(UpperCamelCase__ ) ) SCREAMING_SNAKE_CASE__ : str =os.path.join(UpperCamelCase__, UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : dict[EdgeT, int] ={} SCREAMING_SNAKE_CASE__ : list[str] SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : int with open(UpperCamelCase__ ) as f: SCREAMING_SNAKE_CASE__ : Any =f.read().strip().split('''\n''' ) SCREAMING_SNAKE_CASE__ : Optional[Any] =[line.split(''',''' ) for line in data] for edgea in range(1, len(UpperCamelCase__ ) ): for edgea in range(UpperCamelCase__ ): if adjaceny_matrix[edgea][edgea] != "-": SCREAMING_SNAKE_CASE__ : List[Any] =int(adjaceny_matrix[edgea][edgea] ) SCREAMING_SNAKE_CASE__ : Graph =Graph(set(range(len(UpperCamelCase__ ) ) ), UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Graph =graph.prims_algorithm() SCREAMING_SNAKE_CASE__ : int =sum(graph.edges.values() ) SCREAMING_SNAKE_CASE__ : int =sum(subgraph.edges.values() ) return initial_total - optimal_total if __name__ == "__main__": print(F'''{solution() = }''')
222
0
import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class __lowerCAmelCase ( lowerCamelCase__ , unittest.TestCase ): __lowerCamelCase = DiTPipeline __lowerCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS __lowerCamelCase = PipelineTesterMixin.required_optional_params - { '''latents''', '''num_images_per_prompt''', '''callback''', '''callback_steps''', } __lowerCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS __lowerCamelCase = False def snake_case ( self ): """simple docstring""" torch.manual_seed(0 ) _lowerCAmelCase = TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_snake_case , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=_snake_case , ) _lowerCAmelCase = AutoencoderKL() _lowerCAmelCase = DDIMScheduler() _lowerCAmelCase = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler} return components def snake_case ( self , _snake_case , _snake_case=0 ): """simple docstring""" if str(_snake_case ).startswith("""mps""" ): _lowerCAmelCase = torch.manual_seed(_snake_case ) else: _lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case ) _lowerCAmelCase = { """class_labels""": [1], """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def snake_case ( self ): """simple docstring""" _lowerCAmelCase = """cpu""" _lowerCAmelCase = self.get_dummy_components() _lowerCAmelCase = self.pipeline_class(**_snake_case ) pipe.to(_snake_case ) pipe.set_progress_bar_config(disable=_snake_case ) _lowerCAmelCase = self.get_dummy_inputs(_snake_case ) _lowerCAmelCase = pipe(**_snake_case ).images _lowerCAmelCase = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3) ) _lowerCAmelCase = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] ) _lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(_snake_case , 1e-3 ) def snake_case ( self ): """simple docstring""" self._test_inference_batch_single_identical(relax_max_difference=_snake_case , expected_max_diff=1e-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def snake_case ( self ): """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) @require_torch_gpu @slow class __lowerCAmelCase ( unittest.TestCase ): def snake_case ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case ( self ): """simple docstring""" _lowerCAmelCase = torch.manual_seed(0 ) _lowerCAmelCase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" ) pipe.to("""cuda""" ) _lowerCAmelCase = ["""vase""", """umbrella""", """white shark""", """white wolf"""] _lowerCAmelCase = pipe.get_label_ids(_snake_case ) _lowerCAmelCase = pipe(_snake_case , generator=_snake_case , num_inference_steps=40 , output_type="""np""" ).images for word, image in zip(_snake_case , _snake_case ): _lowerCAmelCase = load_numpy( F'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' ) assert np.abs((expected_image - image).max() ) < 1e-2 def snake_case ( self ): """simple docstring""" _lowerCAmelCase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" ) _lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to("""cuda""" ) _lowerCAmelCase = ["""vase""", """umbrella"""] _lowerCAmelCase = pipe.get_label_ids(_snake_case ) _lowerCAmelCase = torch.manual_seed(0 ) _lowerCAmelCase = pipe(_snake_case , generator=_snake_case , num_inference_steps=25 , output_type="""np""" ).images for word, image in zip(_snake_case , _snake_case ): _lowerCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" F'/dit/{word}_512.npy' ) assert np.abs((expected_image - image).max() ) < 1e-1
82
import numpy as np from nltk.translate import meteor_score import datasets from datasets.config import importlib_metadata, version UpperCAmelCase__ = version.parse(importlib_metadata.version("nltk")) if NLTK_VERSION >= version.Version("3.6.4"): from nltk import word_tokenize UpperCAmelCase__ = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n" UpperCAmelCase__ = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n" UpperCAmelCase__ = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowerCAmelCase ( datasets.Metric ): def _lowerCamelCase ( self : List[Any]) -> List[Any]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence'), 'references': datasets.Value('string' , id='sequence'), }) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[ 'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score', 'https://en.wikipedia.org/wiki/METEOR', ] , ) def _lowerCamelCase ( self : Optional[Any] , A : List[str]) -> List[Any]: """simple docstring""" import nltk nltk.download('wordnet') if NLTK_VERSION >= version.Version('3.6.5'): nltk.download('punkt') if NLTK_VERSION >= version.Version('3.6.6'): nltk.download('omw-1.4') def _lowerCamelCase ( self : Optional[Any] , A : Tuple , A : Optional[int] , A : List[Any]=0.9 , A : Optional[Any]=3 , A : Optional[int]=0.5) -> Any: """simple docstring""" if NLTK_VERSION >= version.Version('3.6.5'): _UpperCAmelCase = [ meteor_score.single_meteor_score( word_tokenize(A) , word_tokenize(A) , alpha=A , beta=A , gamma=A) for ref, pred in zip(A , A) ] else: _UpperCAmelCase = [ meteor_score.single_meteor_score(A , A , alpha=A , beta=A , gamma=A) for ref, pred in zip(A , A) ] return {"meteor": np.mean(A)}
339
0
'''simple docstring''' import unittest import numpy as np from transformers import BertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.bert.modeling_flax_bert import ( FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, ) class __lowerCamelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any]=13 , SCREAMING_SNAKE_CASE : Any=7 , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : int=True , SCREAMING_SNAKE_CASE : int=True , SCREAMING_SNAKE_CASE : List[Any]=True , SCREAMING_SNAKE_CASE : List[str]=99 , SCREAMING_SNAKE_CASE : Tuple=32 , SCREAMING_SNAKE_CASE : int=5 , SCREAMING_SNAKE_CASE : Any=4 , SCREAMING_SNAKE_CASE : int=37 , SCREAMING_SNAKE_CASE : Any="gelu" , SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE : Any=512 , SCREAMING_SNAKE_CASE : Union[str, Any]=16 , SCREAMING_SNAKE_CASE : List[str]=2 , SCREAMING_SNAKE_CASE : str=0.02 , SCREAMING_SNAKE_CASE : List[Any]=4 , ): _A : Optional[int] = parent _A : Union[str, Any] = batch_size _A : Tuple = seq_length _A : Union[str, Any] = is_training _A : Union[str, Any] = use_attention_mask _A : List[Any] = use_token_type_ids _A : int = use_labels _A : Optional[Any] = vocab_size _A : Tuple = hidden_size _A : Optional[Any] = num_hidden_layers _A : List[str] = num_attention_heads _A : str = intermediate_size _A : Dict = hidden_act _A : Any = hidden_dropout_prob _A : int = attention_probs_dropout_prob _A : Union[str, Any] = max_position_embeddings _A : Dict = type_vocab_size _A : Union[str, Any] = type_sequence_label_size _A : Union[str, Any] = initializer_range _A : Union[str, Any] = num_choices def A ( self : str): _A : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) _A : List[str] = None if self.use_attention_mask: _A : Any = random_attention_mask([self.batch_size, self.seq_length]) _A : List[str] = None if self.use_token_type_ids: _A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) _A : Dict = BertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def A ( self : Dict): _A : List[Any] = self.prepare_config_and_inputs() _A , _A , _A , _A : Union[str, Any] = config_and_inputs _A : List[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask} return config, inputs_dict def A ( self : List[Any]): _A : Dict = self.prepare_config_and_inputs() _A , _A , _A , _A : List[Any] = config_and_inputs _A : List[Any] = True _A : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) _A : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2) return ( config, input_ids, attention_mask, encoder_hidden_states, encoder_attention_mask, ) @require_flax class __lowerCamelCase ( a_ , unittest.TestCase ): """simple docstring""" a = True a = ( ( FlaxBertModel, FlaxBertForPreTraining, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForQuestionAnswering, FlaxBertForNextSentencePrediction, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertForQuestionAnswering, ) if is_flax_available() else () ) def A ( self : Optional[Any]): _A : Optional[int] = FlaxBertModelTester(self) @slow def A ( self : List[Any]): # Only check this for base model, not necessary for all model classes. # This will also help speed-up tests. _A : Optional[int] = FlaxBertModel.from_pretrained('bert-base-cased') _A : Optional[Any] = model(np.ones((1, 1))) self.assertIsNotNone(SCREAMING_SNAKE_CASE)
227
'''simple docstring''' import unittest import numpy as np import torch from torch import nn from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import enable_full_determinism, skip_mps from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class __lowerCamelCase ( a_ , unittest.TestCase ): """simple docstring""" a = KandinskyVaaPriorPipeline a = ["prompt"] a = ["prompt", "negative_prompt"] a = [ "num_images_per_prompt", "generator", "num_inference_steps", "latents", "negative_prompt", "guidance_scale", "output_type", "return_dict", ] a = False @property def A ( self : List[str]): return 32 @property def A ( self : List[Any]): return 32 @property def A ( self : Dict): return self.time_input_dim @property def A ( self : Tuple): return self.time_input_dim * 4 @property def A ( self : Optional[int]): return 100 @property def A ( self : Dict): _A : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip') return tokenizer @property def A ( self : Optional[Any]): torch.manual_seed(0) _A : Any = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModelWithProjection(SCREAMING_SNAKE_CASE) @property def A ( self : List[Any]): torch.manual_seed(0) _A : Optional[Any] = { 'num_attention_heads': 2, 'attention_head_dim': 12, 'embedding_dim': self.text_embedder_hidden_size, 'num_layers': 1, } _A : Any = PriorTransformer(**SCREAMING_SNAKE_CASE) # clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0 _A : str = nn.Parameter(torch.ones(model.clip_std.shape)) return model @property def A ( self : List[str]): torch.manual_seed(0) _A : List[Any] = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , ) _A : Union[str, Any] = CLIPVisionModelWithProjection(SCREAMING_SNAKE_CASE) return model @property def A ( self : int): _A : Optional[Any] = CLIPImageProcessor( crop_size=224 , do_center_crop=SCREAMING_SNAKE_CASE , do_normalize=SCREAMING_SNAKE_CASE , do_resize=SCREAMING_SNAKE_CASE , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=224 , ) return image_processor def A ( self : Optional[Any]): _A : Optional[int] = self.dummy_prior _A : Dict = self.dummy_image_encoder _A : Dict = self.dummy_text_encoder _A : str = self.dummy_tokenizer _A : Optional[Any] = self.dummy_image_processor _A : Optional[Any] = UnCLIPScheduler( variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1000 , clip_sample=SCREAMING_SNAKE_CASE , clip_sample_range=10.0 , ) _A : Dict = { 'prior': prior, 'image_encoder': image_encoder, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'scheduler': scheduler, 'image_processor': image_processor, } return components def A ( self : Tuple , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str]=0): if str(SCREAMING_SNAKE_CASE).startswith('mps'): _A : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE) else: _A : int = torch.Generator(device=SCREAMING_SNAKE_CASE).manual_seed(SCREAMING_SNAKE_CASE) _A : List[Any] = { 'prompt': 'horse', 'generator': generator, 'guidance_scale': 4.0, 'num_inference_steps': 2, 'output_type': 'np', } return inputs def A ( self : List[Any]): _A : str = 'cpu' _A : Tuple = self.get_dummy_components() _A : List[Any] = self.pipeline_class(**SCREAMING_SNAKE_CASE) _A : Any = pipe.to(SCREAMING_SNAKE_CASE) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE) _A : Dict = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE)) _A : str = output.image_embeds _A : Optional[int] = pipe( **self.get_dummy_inputs(SCREAMING_SNAKE_CASE) , return_dict=SCREAMING_SNAKE_CASE , )[0] _A : Optional[int] = image[0, -10:] _A : int = image_from_tuple[0, -10:] assert image.shape == (1, 32) _A : Dict = np.array( [-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 @skip_mps def A ( self : Any): _A : Tuple = torch_device == 'cpu' _A : Optional[int] = True _A : Tuple = False self._test_inference_batch_single_identical( test_max_difference=SCREAMING_SNAKE_CASE , relax_max_difference=SCREAMING_SNAKE_CASE , test_mean_pixel_difference=SCREAMING_SNAKE_CASE , ) @skip_mps def A ( self : int): _A : Tuple = torch_device == 'cpu' _A : Optional[Any] = False self._test_attention_slicing_forward_pass( test_max_difference=SCREAMING_SNAKE_CASE , test_mean_pixel_difference=SCREAMING_SNAKE_CASE , )
227
1
"""simple docstring""" import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler lowercase_ = 1_6 lowercase_ = 3_2 def lowercase ( lowerCAmelCase__ : Accelerator , lowerCAmelCase__ : int = 16 , lowerCAmelCase__ : str = "bert-base-cased" ) -> List[str]: __a = AutoTokenizer.from_pretrained(lowerCAmelCase__ ) __a = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(lowerCAmelCase__ : Any ): # max_length=None => use the model max length (it's actually the default) __a = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset __a = datasets.map( lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=lowerCAmelCase__ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __a = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(lowerCAmelCase__ : Any ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(lowerCAmelCase__ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' ) return tokenizer.pad(lowerCAmelCase__ , padding='''longest''' , return_tensors='''pt''' ) # Instantiate dataloaders. __a = DataLoader( tokenized_datasets['''train'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ ) __a = DataLoader( tokenized_datasets['''validation'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ ) return train_dataloader, eval_dataloader def lowercase ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Any ) -> List[str]: # Initialize accelerator __a = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __a = config['''lr'''] __a = int(config['''num_epochs'''] ) __a = int(config['''seed'''] ) __a = int(config['''batch_size'''] ) __a = args.model_name_or_path set_seed(lowerCAmelCase__ ) __a , __a = get_dataloaders(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __a = AutoModelForSequenceClassification.from_pretrained(lowerCAmelCase__ , return_dict=lowerCAmelCase__ ) # Instantiate optimizer __a = ( AdamW if accelerator.state.deepspeed_plugin is None or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) __a = optimizer_cls(params=model.parameters() , lr=lowerCAmelCase__ ) if accelerator.state.deepspeed_plugin is not None: __a = accelerator.state.deepspeed_plugin.deepspeed_config[ '''gradient_accumulation_steps''' ] else: __a = 1 __a = (len(lowerCAmelCase__ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): __a = get_linear_schedule_with_warmup( optimizer=lowerCAmelCase__ , num_warmup_steps=0 , num_training_steps=lowerCAmelCase__ , ) else: __a = DummyScheduler(lowerCAmelCase__ , total_num_steps=lowerCAmelCase__ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __a , __a , __a , __a , __a = accelerator.prepare( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # We need to keep track of how many total steps we have iterated over __a = 0 # We also need to keep track of the stating epoch so files are named properly __a = 0 # Now we train the model __a = evaluate.load('''glue''' , '''mrpc''' ) __a = 0 __a = {} for epoch in range(lowerCAmelCase__ , lowerCAmelCase__ ): model.train() for step, batch in enumerate(lowerCAmelCase__ ): __a = model(**lowerCAmelCase__ ) __a = outputs.loss __a = loss / gradient_accumulation_steps accelerator.backward(lowerCAmelCase__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 model.eval() __a = 0 for step, batch in enumerate(lowerCAmelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __a = model(**lowerCAmelCase__ ) __a = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times __a , __a = accelerator.gather( (predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(lowerCAmelCase__ ) - 1: __a = predictions[: len(eval_dataloader.dataset ) - samples_seen] __a = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=lowerCAmelCase__ , references=lowerCAmelCase__ , ) __a = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'''epoch {epoch}:''' , lowerCAmelCase__ ) __a = eval_metric['''accuracy'''] if best_performance < eval_metric["accuracy"]: __a = eval_metric['''accuracy'''] if args.performance_lower_bound is not None: assert ( args.performance_lower_bound <= best_performance ), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}''' accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , '''all_results.json''' ) , '''w''' ) as f: json.dump(lowerCAmelCase__ , lowerCAmelCase__ ) def lowercase ( ) -> int: __a = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' ) parser.add_argument( '''--model_name_or_path''' , type=lowerCAmelCase__ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=lowerCAmelCase__ , ) parser.add_argument( '''--output_dir''' , type=lowerCAmelCase__ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , ) parser.add_argument( '''--performance_lower_bound''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help='''Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.''' , ) parser.add_argument( '''--num_epochs''' , type=lowerCAmelCase__ , default=3 , help='''Number of train epochs.''' , ) __a = parser.parse_args() __a = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16} training_function(lowerCAmelCase__ , lowerCAmelCase__ ) if __name__ == "__main__": main()
45
from queue import PriorityQueue from typing import Any import numpy as np def __magic_name__ ( __lowerCAmelCase : dict , __lowerCAmelCase : str , __lowerCAmelCase : set , __lowerCAmelCase : set , __lowerCAmelCase : dict , __lowerCAmelCase : dict , __lowerCAmelCase : PriorityQueue , __lowerCAmelCase : dict , __lowerCAmelCase : float | int , ) -> float | int: for nxt, d in graph[v]: if nxt in visited_forward: continue __lowerCamelCase = cst_fwd.get(__lowerCAmelCase , np.inf ) __lowerCamelCase = cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt) ) __lowerCamelCase = new_cost_f __lowerCamelCase = v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: __lowerCamelCase = cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : dict , __lowerCAmelCase : dict ) -> int: __lowerCamelCase = -1 __lowerCamelCase = set() __lowerCamelCase = set() __lowerCamelCase = {source: 0} __lowerCamelCase = {destination: 0} __lowerCamelCase = {source: None} __lowerCamelCase = {destination: None} __lowerCamelCase = PriorityQueue() __lowerCamelCase = PriorityQueue() __lowerCamelCase = np.inf queue_forward.put((0, source) ) queue_backward.put((0, destination) ) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): __lowerCamelCase , __lowerCamelCase = queue_forward.get() visited_forward.add(__lowerCAmelCase ) __lowerCamelCase , __lowerCamelCase = queue_backward.get() visited_backward.add(__lowerCAmelCase ) __lowerCamelCase = pass_and_relaxation( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) __lowerCamelCase = pass_and_relaxation( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: __lowerCamelCase = shortest_distance return shortest_path_distance SCREAMING_SNAKE_CASE__ : List[Any] = { "B": [["C", 1]], "C": [["D", 1]], "D": [["F", 1]], "E": [["B", 1], ["G", 2]], "F": [], "G": [["F", 1]], } SCREAMING_SNAKE_CASE__ : Optional[int] = { "B": [["E", 1]], "C": [["B", 1]], "D": [["C", 1]], "F": [["D", 1], ["G", 1]], "E": [[None, np.inf]], "G": [["E", 2]], } if __name__ == "__main__": import doctest doctest.testmod()
270
0
"""simple docstring""" import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device lowerCAmelCase : List[Any] = False class __lowercase ( unittest.TestCase ): """simple docstring""" pass @nightly @require_torch_gpu class __lowercase ( unittest.TestCase ): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self : Tuple): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _SCREAMING_SNAKE_CASE ( self : int): SCREAMING_SNAKE_CASE_: str = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa) pipe.to(lowerCAmelCase__) pipe.set_progress_bar_config(disable=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Dict = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg") SCREAMING_SNAKE_CASE_: Tuple = torch.manual_seed(0) SCREAMING_SNAKE_CASE_: str = pipe.dual_guided( prompt="first prompt" , image=lowerCAmelCase__ , text_to_image_strength=0.75 , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[int] = VersatileDiffusionPipeline.from_pretrained(lowerCAmelCase__ , torch_dtype=torch.floataa) pipe.to(lowerCAmelCase__) pipe.set_progress_bar_config(disable=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = generator.manual_seed(0) SCREAMING_SNAKE_CASE_: List[str] = pipe.dual_guided( prompt="first prompt" , image=lowerCAmelCase__ , text_to_image_strength=0.75 , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images assert np.abs(image - new_image).sum() < 1E-5, "Models don't have the same forward pass" def _SCREAMING_SNAKE_CASE ( self : List[Any]): SCREAMING_SNAKE_CASE_: int = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa) pipe.to(lowerCAmelCase__) pipe.set_progress_bar_config(disable=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: int = "cyberpunk 2077" SCREAMING_SNAKE_CASE_: Dict = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg") SCREAMING_SNAKE_CASE_: Dict = torch.manual_seed(0) SCREAMING_SNAKE_CASE_: Dict = pipe.dual_guided( prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , text_to_image_strength=0.75 , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images SCREAMING_SNAKE_CASE_: Tuple = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE_: Dict = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1 SCREAMING_SNAKE_CASE_: List[str] = "A painting of a squirrel eating a burger " SCREAMING_SNAKE_CASE_: str = torch.manual_seed(0) SCREAMING_SNAKE_CASE_: Tuple = pipe.text_to_image( prompt=lowerCAmelCase__ , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy").images SCREAMING_SNAKE_CASE_: Any = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE_: Union[str, Any] = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1 SCREAMING_SNAKE_CASE_: Optional[Any] = pipe.image_variation(lowerCAmelCase__ , generator=lowerCAmelCase__ , output_type="numpy").images SCREAMING_SNAKE_CASE_: Any = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE_: int = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
365
def A_ ( _UpperCAmelCase = 10**9 ): SCREAMING_SNAKE_CASE_: List[str] = 1 SCREAMING_SNAKE_CASE_: Optional[int] = 2 SCREAMING_SNAKE_CASE_: int = 0 SCREAMING_SNAKE_CASE_: Dict = 0 SCREAMING_SNAKE_CASE_: List[str] = 0 while perimeter <= max_perimeter: perimeters_sum += perimeter prev_value += 2 * value value += prev_value SCREAMING_SNAKE_CASE_: Any = 2 * value + 2 if i % 2 == 0 else 2 * value - 2 i += 1 return perimeters_sum if __name__ == "__main__": print(f'''{solution() = }''')
127
0
"""simple docstring""" import io import os import unicodedata from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : Tuple = """▁""" SCREAMING_SNAKE_CASE : Union[str, Any] = {"""vocab_file""": """vocab.txt""", """sentencepiece_model_ckpt""": """sentencepiece.bpe.model"""} SCREAMING_SNAKE_CASE : str = { """sentencepiece_model_file""": """sentencepiece.bpe.model""", """vocab_file""": """vocab.txt""", } SCREAMING_SNAKE_CASE : Tuple = { """vocab_file""": { """ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""", """ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""", }, """sentencepiece_model_file""": { """ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""", """ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""", }, } SCREAMING_SNAKE_CASE : Optional[int] = { """ernie-m-base""": 514, """ernie-m-large""": 514, } SCREAMING_SNAKE_CASE : Optional[Any] = { """ernie-m-base""": {"""do_lower_case""": False}, """ernie-m-large""": {"""do_lower_case""": False}, } class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ =["input_ids"] lowerCamelCase__ =VOCAB_FILES_NAMES lowerCamelCase__ =PRETRAINED_INIT_CONFIGURATION lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ =RESOURCE_FILES_NAMES def __init__(self , a_ , a_=None , a_=False , a_="utf8" , a_="[UNK]" , a_="[SEP]" , a_="[PAD]" , a_="[CLS]" , a_="[MASK]" , a_ = None , **a_ , ): '''simple docstring''' __snake_case : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=a_ , unk_token=a_ , sep_token=a_ , pad_token=a_ , cls_token=a_ , mask_token=a_ , vocab_file=a_ , encoding=a_ , sp_model_kwargs=self.sp_model_kwargs , **a_ , ) __snake_case : str = do_lower_case __snake_case : int = sentencepiece_model_ckpt __snake_case : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(a_ ) # to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning if vocab_file is not None: __snake_case : int = self.load_vocab(filepath=a_ ) else: __snake_case : Any = {self.sp_model.id_to_piece(a_ ): id for id in range(self.sp_model.get_piece_size() )} __snake_case : Optional[int] = {v: k for k, v in self.vocab.items()} def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' if text is None: return None __snake_case : Union[str, Any] = self.tokenize(a_ ) __snake_case , __snake_case : Tuple = '''''', [] for i, ch in enumerate(a_ ): if ch in self.SP_CHAR_MAPPING: __snake_case : List[Any] = self.SP_CHAR_MAPPING.get(a_ ) else: __snake_case : List[Any] = unicodedata.normalize('''NFKC''' , a_ ) if self.is_whitespace(a_ ): continue normalized_text += ch char_mapping.extend([i] * len(a_ ) ) __snake_case , __snake_case , __snake_case : Tuple = normalized_text, [], 0 if self.do_lower_case: __snake_case : List[str] = text.lower() for token in split_tokens: if token[:1] == "▁": __snake_case : int = token[1:] __snake_case : List[str] = text[offset:].index(a_ ) + offset __snake_case : int = start + len(a_ ) token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) ) __snake_case : List[Any] = end return token_mapping @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return len(self.vocab ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return dict(self.vocab , **self.added_tokens_encoder ) def __getstate__(self ): '''simple docstring''' __snake_case : str = self.__dict__.copy() __snake_case : Optional[Any] = None return state def __setstate__(self , a_ ): '''simple docstring''' __snake_case : Dict = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): __snake_case : List[str] = {} __snake_case : str = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.sentencepiece_model_ckpt ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' return "".join((self.SP_CHAR_MAPPING.get(a_ , a_ ) for c in text) ) def SCREAMING_SNAKE_CASE (self , a_ , a_=False , a_=64 , a_=0.1 ): '''simple docstring''' if self.sp_model_kwargs.get('''enable_sampling''' ) is True: __snake_case : Tuple = True if self.sp_model_kwargs.get('''alpha''' ) is not None: __snake_case : Tuple = self.sp_model_kwargs.get('''alpha''' ) if self.sp_model_kwargs.get('''nbest_size''' ) is not None: __snake_case : Any = self.sp_model_kwargs.get('''nbest_size''' ) if not enable_sampling: __snake_case : Optional[Any] = self.sp_model.EncodeAsPieces(a_ ) else: __snake_case : int = self.sp_model.SampleEncodeAsPieces(a_ , a_ , a_ ) __snake_case : Dict = [] for pi, piece in enumerate(a_ ): if piece == SPIECE_UNDERLINE: if not pieces[pi + 1].startswith(a_ ) and pi != 0: new_pieces.append(a_ ) continue else: continue __snake_case : Dict = 0 for i, chunk in enumerate(a_ ): if chunk == SPIECE_UNDERLINE: continue if self.is_ch_char(a_ ) or self.is_punct(a_ ): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) new_pieces.append(a_ ) __snake_case : Optional[int] = i + 1 elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) __snake_case : List[str] = i elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) __snake_case : Optional[int] = i if len(a_ ) > lst_i: new_pieces.append(piece[lst_i:] ) return new_pieces def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : Dict = ''''''.join(a_ ).replace(a_ , ''' ''' ).strip() return out_string def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : List[str] = self.convert_ids_to_tokens(a_ ) __snake_case : Tuple = ''''''.join(a_ ).replace(a_ , ''' ''' ).strip() return out_string def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' return self.vocab.get(a_ , self.vocab.get(self.unk_token ) ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' return self.reverse_vocab.get(a_ , self.unk_token ) def SCREAMING_SNAKE_CASE (self , a_ , a_=None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __snake_case : Union[str, Any] = [self.cls_token_id] __snake_case : Optional[int] = [self.sep_token_id] return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep def SCREAMING_SNAKE_CASE (self , a_ , a_=None ): '''simple docstring''' if offset_mapping_a is None: return [(0, 0)] + offset_mapping_a + [(0, 0)] return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)] def SCREAMING_SNAKE_CASE (self , a_ , a_=None , a_=False ): '''simple docstring''' if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(a_ )) + [1, 1] + ([0] * len(a_ )) + [1] return [1] + ([0] * len(a_ )) + [1] def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ): '''simple docstring''' if token_ids_a is None: # [CLS] X [SEP] return (len(a_ ) + 2) * [0] # [CLS] A [SEP] [SEP] B [SEP] return [0] * (len(a_ ) + 1) + [1] * (len(a_ ) + 3) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' if "\u4e00" <= char <= "\u9fff": return True return False def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' if ("a" <= char <= "z") or ("A" <= char <= "Z"): return True return False def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' if char in ",;:.?!~,;:。?!《》【】": return True return False def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' if char == " " or char == "\t" or char == "\n" or char == "\r": return True if len(a_ ) == 1: __snake_case : Tuple = unicodedata.category(a_ ) if cat == "Zs": return True return False def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : str = {} with io.open(a_ , '''r''' , encoding='''utf-8''' ) as f: for index, line in enumerate(a_ ): __snake_case : Union[str, Any] = line.rstrip('''\n''' ) __snake_case : Dict = int(a_ ) return token_to_idx def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ): '''simple docstring''' __snake_case : Optional[Any] = 0 if os.path.isdir(a_ ): __snake_case : Any = os.path.join( a_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) else: __snake_case : Dict = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory with open(a_ , '''w''' , encoding='''utf-8''' ) as writer: for token, token_index in sorted(self.vocab.items() , key=lambda a_ : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.""" ''' Please check that the vocabulary is not corrupted!''' ) __snake_case : Tuple = token_index writer.write(token + '''\n''' ) index += 1 __snake_case : Tuple = os.path.join(a_ , '''sentencepiece.bpe.model''' ) with open(a_ , '''wb''' ) as fi: __snake_case : Union[str, Any] = self.sp_model.serialized_model_proto() fi.write(a_ ) return (vocab_file,)
102
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { """abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""", } class a__ ( snake_case ): """simple docstring""" __lowerCamelCase = 'gpt_neox_japanese' def __init__( self , lowercase=32000 , lowercase=2560 , lowercase=32 , lowercase=32 , lowercase=4 , lowercase="gelu" , lowercase=1.00 , lowercase=10000 , lowercase=2048 , lowercase=0.02 , lowercase=1e-5 , lowercase=True , lowercase=31996 , lowercase=31999 , lowercase=0.1 , lowercase=0.0 , **lowercase , ) -> Dict: '''simple docstring''' super().__init__(bos_token_id=lowercase , eos_token_id=lowercase , **lowercase ) A__ = vocab_size A__ = max_position_embeddings A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_multiple_size A__ = hidden_act A__ = rotary_pct A__ = rotary_emb_base A__ = initializer_range A__ = layer_norm_eps A__ = use_cache A__ = attention_dropout A__ = hidden_dropout
68
0
from __future__ import annotations import math def a_ ( __lowercase : float , __lowercase : int ) -> float: _snake_case = u for i in range(1 , __lowercase ): _snake_case = temp * (u - i) return temp def a_ ( ) -> None: _snake_case = int(input('enter the numbers of values: ' ) ) _snake_case = [] for _ in range(__lowercase ): y.append([] ) for i in range(__lowercase ): for j in range(__lowercase ): y[i].append(__lowercase ) _snake_case = 0 print('enter the values of parameters in a list: ' ) _snake_case = list(map(__lowercase , input().split() ) ) print('enter the values of corresponding parameters: ' ) for i in range(__lowercase ): _snake_case = float(input() ) _snake_case = int(input('enter the value to interpolate: ' ) ) _snake_case = (value - x[0]) / (x[1] - x[0]) # for calculating forward difference table for i in range(1 , __lowercase ): for j in range(n - i ): _snake_case = y[j + 1][i - 1] - y[j][i - 1] _snake_case = y[0][0] for i in range(1 , __lowercase ): summ += (ucal(__lowercase , __lowercase ) * y[0][i]) / math.factorial(__lowercase ) print(f'''the value at {value} is {summ}''' ) if __name__ == "__main__": main()
130
from pathlib import Path import cva import numpy as np from matplotlib import pyplot as plt def a_ ( __lowercase : np.ndarray , __lowercase : np.ndarray , __lowercase : np.ndarray , __lowercase : int , __lowercase : int ) -> np.ndarray: _snake_case = cva.getAffineTransform(__lowercase , __lowercase ) return cva.warpAffine(__lowercase , __lowercase , (rows, cols) ) if __name__ == "__main__": # read original image _lowerCamelCase : Optional[Any] = cva.imread( str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''') ) # turn image in gray scale value _lowerCamelCase : List[str] = cva.cvtColor(image, cva.COLOR_BGR2GRAY) # get image shape _lowerCamelCase , _lowerCamelCase : List[Any] = gray_img.shape # set different points to rotate image _lowerCamelCase : str = np.array([[50, 50], [200, 50], [50, 200]], np.floataa) _lowerCamelCase : Optional[Any] = np.array([[10, 100], [200, 50], [100, 250]], np.floataa) _lowerCamelCase : List[str] = np.array([[50, 50], [150, 50], [120, 200]], np.floataa) _lowerCamelCase : Dict = np.array([[10, 100], [80, 50], [180, 250]], np.floataa) # add all rotated images in a list _lowerCamelCase : int = [ gray_img, get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), ] # plot different image rotations _lowerCamelCase : Any = plt.figure(1) _lowerCamelCase : List[Any] = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3'''] for i, image in enumerate(images): plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''') plt.title(titles[i]) plt.axis('''off''') plt.subplots_adjust(left=0.0, bottom=0.0_5, right=1.0, top=0.9_5) plt.show()
130
1
def _UpperCAmelCase ( snake_case , snake_case , snake_case ): """simple docstring""" return round(float(moles / volume ) * nfactor ) def _UpperCAmelCase ( snake_case , snake_case , snake_case ): """simple docstring""" return round(float((moles * 0.0_821 * temperature) / (volume) ) ) def _UpperCAmelCase ( snake_case , snake_case , snake_case ): """simple docstring""" return round(float((moles * 0.0_821 * temperature) / (pressure) ) ) def _UpperCAmelCase ( snake_case , snake_case , snake_case ): """simple docstring""" return round(float((pressure * volume) / (0.0_821 * moles) ) ) if __name__ == "__main__": import doctest doctest.testmod()
82
from __future__ import annotations from collections.abc import Callable from typing import Generic, TypeVar A__ = TypeVar("""T""") A__ = TypeVar("""U""") class __lowerCAmelCase ( Generic[T, U] ): def __init__( self , _snake_case , _snake_case ): """simple docstring""" _lowerCAmelCase = key _lowerCAmelCase = val _lowerCAmelCase = None _lowerCAmelCase = None def __repr__( self ): """simple docstring""" return ( F'Node: key: {self.key}, val: {self.val}, ' F'has next: {bool(self.next )}, has prev: {bool(self.prev )}' ) class __lowerCAmelCase ( Generic[T, U] ): def __init__( self ): """simple docstring""" _lowerCAmelCase = DoubleLinkedListNode(_snake_case , _snake_case ) _lowerCAmelCase = DoubleLinkedListNode(_snake_case , _snake_case ) _lowerCAmelCase , _lowerCAmelCase = self.rear, self.head def __repr__( self ): """simple docstring""" _lowerCAmelCase = ["""DoubleLinkedList"""] _lowerCAmelCase = self.head while node.next is not None: rep.append(str(_snake_case ) ) _lowerCAmelCase = node.next rep.append(str(self.rear ) ) return ",\n ".join(_snake_case ) def snake_case ( self , _snake_case ): """simple docstring""" _lowerCAmelCase = self.rear.prev # All nodes other than self.head are guaranteed to have non-None previous assert previous is not None _lowerCAmelCase = node _lowerCAmelCase = previous _lowerCAmelCase = node _lowerCAmelCase = self.rear def snake_case ( self , _snake_case ): """simple docstring""" if node.prev is None or node.next is None: return None _lowerCAmelCase = node.next _lowerCAmelCase = node.prev _lowerCAmelCase = None _lowerCAmelCase = None return node class __lowerCAmelCase ( Generic[T, U] ): __lowerCamelCase = {} def __init__( self , _snake_case ): """simple docstring""" _lowerCAmelCase = DoubleLinkedList() _lowerCAmelCase = capacity _lowerCAmelCase = 0 _lowerCAmelCase = 0 _lowerCAmelCase = 0 _lowerCAmelCase = {} def __repr__( self ): """simple docstring""" return ( F'CacheInfo(hits={self.hits}, misses={self.miss}, ' F'capacity={self.capacity}, current size={self.num_keys})' ) def __contains__( self , _snake_case ): """simple docstring""" return key in self.cache def snake_case ( self , _snake_case ): """simple docstring""" if key in self.cache: self.hits += 1 _lowerCAmelCase = self.cache[key] _lowerCAmelCase = self.list.remove(self.cache[key] ) assert node == value_node # node is guaranteed not None because it is in self.cache assert node is not None self.list.add(_snake_case ) return node.val self.miss += 1 return None def snake_case ( self , _snake_case , _snake_case ): """simple docstring""" if key not in self.cache: if self.num_keys >= self.capacity: # delete first node (oldest) when over capacity _lowerCAmelCase = self.list.head.next # guaranteed to have a non-None first node when num_keys > 0 # explain to type checker via assertions assert first_node is not None assert first_node.key is not None assert ( self.list.remove(_snake_case ) is not None ) # node guaranteed to be in list assert node.key is not None del self.cache[first_node.key] self.num_keys -= 1 _lowerCAmelCase = DoubleLinkedListNode(_snake_case , _snake_case ) self.list.add(self.cache[key] ) self.num_keys += 1 else: # bump node to the end of the list, update value _lowerCAmelCase = self.list.remove(self.cache[key] ) assert node is not None # node guaranteed to be in list _lowerCAmelCase = value self.list.add(_snake_case ) @classmethod def snake_case ( cls , _snake_case = 128 ): """simple docstring""" def cache_decorator_inner(_snake_case ) -> Callable[..., U]: def cache_decorator_wrapper(*_snake_case ) -> U: if func not in cls.decorator_function_to_instance_map: _lowerCAmelCase = LRUCache(_snake_case ) _lowerCAmelCase = cls.decorator_function_to_instance_map[func].get(args[0] ) if result is None: _lowerCAmelCase = func(*_snake_case ) cls.decorator_function_to_instance_map[func].put(args[0] , _snake_case ) return result def cache_info() -> LRUCache[T, U]: return cls.decorator_function_to_instance_map[func] setattr(_snake_case , """cache_info""" , _snake_case ) # noqa: B010 return cache_decorator_wrapper return cache_decorator_inner if __name__ == "__main__": import doctest doctest.testmod()
82
1
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionSAGPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class A__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase ): """simple docstring""" UpperCamelCase_ : Optional[int] = StableDiffusionSAGPipeline UpperCamelCase_ : List[str] = TEXT_TO_IMAGE_PARAMS UpperCamelCase_ : List[Any] = TEXT_TO_IMAGE_BATCH_PARAMS UpperCamelCase_ : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS UpperCamelCase_ : Any = TEXT_TO_IMAGE_IMAGE_PARAMS UpperCamelCase_ : Dict = False def _lowerCAmelCase ( self : str ) -> List[str]: """simple docstring""" torch.manual_seed(0 ) _UpperCAmelCase : List[Any] = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , ) _UpperCAmelCase : List[str] = DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , ) torch.manual_seed(0 ) _UpperCAmelCase : Union[str, Any] = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) torch.manual_seed(0 ) _UpperCAmelCase : Optional[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) _UpperCAmelCase : List[str] = CLIPTextModel(lowerCAmelCase__ ) _UpperCAmelCase : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) _UpperCAmelCase : str = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, Any]=0 ) -> Any: """simple docstring""" if str(lowerCAmelCase__ ).startswith("mps" ): _UpperCAmelCase : int = torch.manual_seed(lowerCAmelCase__ ) else: _UpperCAmelCase : Dict = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ ) _UpperCAmelCase : Tuple = { "prompt": ".", "generator": generator, "num_inference_steps": 2, "guidance_scale": 1.0, "sag_scale": 1.0, "output_type": "numpy", } return inputs def _lowerCAmelCase ( self : Any ) -> str: """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class A__ ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : Dict ) -> Optional[int]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCAmelCase ( self : Dict ) -> List[Any]: """simple docstring""" _UpperCAmelCase : str = StableDiffusionSAGPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" ) _UpperCAmelCase : Any = sag_pipe.to(lowerCAmelCase__ ) sag_pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) _UpperCAmelCase : int = "." _UpperCAmelCase : List[Any] = torch.manual_seed(0 ) _UpperCAmelCase : List[Any] = sag_pipe( [prompt] , generator=lowerCAmelCase__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type="np" ) _UpperCAmelCase : Tuple = output.images _UpperCAmelCase : Any = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) _UpperCAmelCase : Optional[Any] = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2 def _lowerCAmelCase ( self : Any ) -> Dict: """simple docstring""" _UpperCAmelCase : Optional[int] = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" ) _UpperCAmelCase : Dict = sag_pipe.to(lowerCAmelCase__ ) sag_pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) _UpperCAmelCase : Dict = "." _UpperCAmelCase : Optional[int] = torch.manual_seed(0 ) _UpperCAmelCase : Optional[int] = sag_pipe( [prompt] , generator=lowerCAmelCase__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type="np" ) _UpperCAmelCase : Union[str, Any] = output.images _UpperCAmelCase : int = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) _UpperCAmelCase : Optional[int] = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2 def _lowerCAmelCase ( self : List[Any] ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase : Union[str, Any] = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" ) _UpperCAmelCase : Any = sag_pipe.to(lowerCAmelCase__ ) sag_pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) _UpperCAmelCase : Optional[int] = "." _UpperCAmelCase : Tuple = torch.manual_seed(0 ) _UpperCAmelCase : str = sag_pipe( [prompt] , width=7_6_8 , height=5_1_2 , generator=lowerCAmelCase__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type="np" , ) _UpperCAmelCase : Union[str, Any] = output.images assert image.shape == (1, 5_1_2, 7_6_8, 3)
17
'''simple docstring''' def __UpperCAmelCase ( a_: int, a_: int ): if not isinstance(a_, a_ ): raise ValueError("iterations must be defined as integers" ) if not isinstance(a_, a_ ) or not number >= 1: raise ValueError( "starting number must be\n and integer and be more than 0" ) if not iterations >= 1: raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" ) _UpperCAmelCase : List[str] = "" while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(a_ ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
17
1
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __SCREAMING_SNAKE_CASE :Union[str, Any] = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE :Tuple = { '''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''', # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class A_ ( lowerCAmelCase_ ): _lowerCamelCase : List[str] = """sew-d""" def __init__( self : Optional[Any] , snake_case_ : Tuple=3_2 , snake_case_ : Optional[Any]=7_6_8 , snake_case_ : Tuple=1_2 , snake_case_ : Union[str, Any]=1_2 , snake_case_ : Tuple=3_0_7_2 , snake_case_ : Tuple=2 , snake_case_ : int=5_1_2 , snake_case_ : Optional[int]=2_5_6 , snake_case_ : Union[str, Any]=True , snake_case_ : Any=True , snake_case_ : str=("p2c", "c2p") , snake_case_ : Dict="layer_norm" , snake_case_ : str="gelu_python" , snake_case_ : Dict=0.1 , snake_case_ : List[str]=0.1 , snake_case_ : Tuple=0.1 , snake_case_ : Any=0.0 , snake_case_ : Tuple=0.1 , snake_case_ : Union[str, Any]=0.0_2 , snake_case_ : str=1e-7 , snake_case_ : Optional[Any]=1e-5 , snake_case_ : Optional[Any]="group" , snake_case_ : Tuple="gelu" , snake_case_ : Tuple=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , snake_case_ : Dict=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , snake_case_ : Dict=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , snake_case_ : int=False , snake_case_ : Union[str, Any]=1_2_8 , snake_case_ : int=1_6 , snake_case_ : Any=True , snake_case_ : Tuple=0.0_5 , snake_case_ : Tuple=1_0 , snake_case_ : Dict=2 , snake_case_ : Tuple=0.0 , snake_case_ : List[Any]=1_0 , snake_case_ : Union[str, Any]=0 , snake_case_ : Any="mean" , snake_case_ : Optional[Any]=False , snake_case_ : Any=False , snake_case_ : Tuple=2_5_6 , snake_case_ : int=0 , snake_case_ : Optional[Any]=1 , snake_case_ : List[str]=2 , **snake_case_ : List[str] , ): super().__init__(**snake_case_ , pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ ) _UpperCAmelCase = hidden_size _UpperCAmelCase = feat_extract_norm _UpperCAmelCase = feat_extract_activation _UpperCAmelCase = list(snake_case_ ) _UpperCAmelCase = list(snake_case_ ) _UpperCAmelCase = list(snake_case_ ) _UpperCAmelCase = conv_bias _UpperCAmelCase = num_conv_pos_embeddings _UpperCAmelCase = num_conv_pos_embedding_groups _UpperCAmelCase = len(self.conv_dim ) _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = intermediate_size _UpperCAmelCase = squeeze_factor _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = position_buckets _UpperCAmelCase = share_att_key _UpperCAmelCase = relative_attention _UpperCAmelCase = norm_rel_ebd _UpperCAmelCase = list(snake_case_ ) _UpperCAmelCase = hidden_act _UpperCAmelCase = num_attention_heads _UpperCAmelCase = hidden_dropout _UpperCAmelCase = attention_dropout _UpperCAmelCase = activation_dropout _UpperCAmelCase = feat_proj_dropout _UpperCAmelCase = final_dropout _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = feature_layer_norm_eps _UpperCAmelCase = initializer_range _UpperCAmelCase = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect." "It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`," f'but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)' f'= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _UpperCAmelCase = apply_spec_augment _UpperCAmelCase = mask_time_prob _UpperCAmelCase = mask_time_length _UpperCAmelCase = mask_time_min_masks _UpperCAmelCase = mask_feature_prob _UpperCAmelCase = mask_feature_length _UpperCAmelCase = mask_feature_min_masks # ctc loss _UpperCAmelCase = ctc_loss_reduction _UpperCAmelCase = ctc_zero_infinity # sequence classification _UpperCAmelCase = use_weighted_layer_sum _UpperCAmelCase = classifier_proj_size @property def lowercase ( self : Any ): return functools.reduce(operator.mul , self.conv_stride , 1 )
22
"""simple docstring""" from __future__ import annotations import os from collections.abc import Mapping _lowerCAmelCase :Tuple = tuple[int, int] class _UpperCAmelCase : '''simple docstring''' def __init__( self , A , A ) -> None: _UpperCAmelCase : set[int] = vertices _UpperCAmelCase : dict[EdgeT, int] = { (min(A ), max(A )): weight for edge, weight in edges.items() } def __lowerCAmelCase ( self , A , A ) -> None: self.vertices.add(edge[0] ) self.vertices.add(edge[1] ) _UpperCAmelCase : List[Any] = weight def __lowerCAmelCase ( self ) -> Graph: _UpperCAmelCase : Graph = Graph({min(self.vertices )} , {} ) _UpperCAmelCase : EdgeT _UpperCAmelCase : int _UpperCAmelCase : EdgeT _UpperCAmelCase : int while len(subgraph.vertices ) < len(self.vertices ): _UpperCAmelCase : Any = max(self.edges.values() ) + 1 for edge, weight in self.edges.items(): if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices): if weight < min_weight: _UpperCAmelCase : Tuple = edge _UpperCAmelCase : Optional[int] = weight subgraph.add_edge(A , A ) return subgraph def lowerCamelCase_ (UpperCamelCase__ : str = "p107_network.txt" ): _UpperCAmelCase : str = os.path.abspath(os.path.dirname(UpperCamelCase__ ) ) _UpperCAmelCase : str = os.path.join(UpperCamelCase__ , UpperCamelCase__ ) _UpperCAmelCase : dict[EdgeT, int] = {} _UpperCAmelCase : list[str] _UpperCAmelCase : int _UpperCAmelCase : int with open(UpperCamelCase__ ) as f: _UpperCAmelCase : str = f.read().strip().split('''\n''' ) _UpperCAmelCase : List[Any] = [line.split(''',''' ) for line in data] for edgea in range(1 , len(UpperCamelCase__ ) ): for edgea in range(UpperCamelCase__ ): if adjaceny_matrix[edgea][edgea] != "-": _UpperCAmelCase : Optional[Any] = int(adjaceny_matrix[edgea][edgea] ) _UpperCAmelCase : Graph = Graph(set(range(len(UpperCamelCase__ ) ) ) , UpperCamelCase__ ) _UpperCAmelCase : Graph = graph.prims_algorithm() _UpperCAmelCase : int = sum(graph.edges.values() ) _UpperCAmelCase : int = sum(subgraph.edges.values() ) return initial_total - optimal_total if __name__ == "__main__": print(f"{solution() = }")
263
0
'''simple docstring''' import math def _UpperCamelCase ( UpperCamelCase__ = 1_0_0 ): UpperCAmelCase__ : List[str] = sum(i * i for i in range(1 , n + 1 ) ) UpperCAmelCase__ : Optional[Any] = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) ) return square_of_sum - sum_of_squares if __name__ == "__main__": print(f"""{solution() = }""")
351
'''simple docstring''' def _UpperCamelCase ( UpperCamelCase__ ): UpperCAmelCase__ : List[str] = len(UpperCamelCase__ ) UpperCAmelCase__ : List[Any] = sum(UpperCamelCase__ ) UpperCAmelCase__ : List[str] = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 , n + 1 ): UpperCAmelCase__ : int = True for i in range(1 , s + 1 ): UpperCAmelCase__ : Dict = False for i in range(1 , n + 1 ): for j in range(1 , s + 1 ): UpperCAmelCase__ : int = dp[i][j - 1] if arr[i - 1] <= j: UpperCAmelCase__ : str = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) , -1 , -1 ): if dp[n][j] is True: UpperCAmelCase__ : int = s - 2 * j break return diff
283
0
'''simple docstring''' import shutil import tempfile import unittest from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast from transformers.testing_utils import require_sentencepiece, require_torchaudio from .test_feature_extraction_clap import floats_list @require_torchaudio @require_sentencepiece class lowercase ( unittest.TestCase ): """simple docstring""" def _snake_case ( self ) -> Optional[Any]: _UpperCAmelCase : str = """laion/clap-htsat-unfused""" _UpperCAmelCase : int = tempfile.mkdtemp() def _snake_case ( self ,**a_ ) -> str: return RobertaTokenizer.from_pretrained(self.checkpoint ,**a_ ) def _snake_case ( self ,**a_ ) -> Tuple: return ClapFeatureExtractor.from_pretrained(self.checkpoint ,**a_ ) def _snake_case ( self ) -> int: shutil.rmtree(self.tmpdirname ) def _snake_case ( self ) -> Optional[int]: _UpperCAmelCase : str = self.get_tokenizer() _UpperCAmelCase : Any = self.get_feature_extractor() _UpperCAmelCase : int = ClapProcessor(tokenizer=a_ ,feature_extractor=a_ ) processor.save_pretrained(self.tmpdirname ) _UpperCAmelCase : List[Any] = ClapProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer ,a_ ) self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor ,a_ ) def _snake_case ( self ) -> List[Any]: _UpperCAmelCase : int = ClapProcessor(tokenizer=self.get_tokenizer() ,feature_extractor=self.get_feature_extractor() ) processor.save_pretrained(self.tmpdirname ) _UpperCAmelCase : List[str] = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" ) _UpperCAmelCase : List[Any] = self.get_feature_extractor(do_normalize=a_ ,padding_value=1.0 ) _UpperCAmelCase : Optional[Any] = ClapProcessor.from_pretrained( self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=a_ ,padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer ,a_ ) self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.feature_extractor ,a_ ) def _snake_case ( self ) -> str: _UpperCAmelCase : Tuple = self.get_feature_extractor() _UpperCAmelCase : Dict = self.get_tokenizer() _UpperCAmelCase : str = ClapProcessor(tokenizer=a_ ,feature_extractor=a_ ) _UpperCAmelCase : Tuple = floats_list((3, 1_000) ) _UpperCAmelCase : int = feature_extractor(a_ ,return_tensors="""np""" ) _UpperCAmelCase : Union[str, Any] = processor(audios=a_ ,return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 ) def _snake_case ( self ) -> Tuple: _UpperCAmelCase : List[Any] = self.get_feature_extractor() _UpperCAmelCase : Any = self.get_tokenizer() _UpperCAmelCase : Optional[int] = ClapProcessor(tokenizer=a_ ,feature_extractor=a_ ) _UpperCAmelCase : Union[str, Any] = """This is a test string""" _UpperCAmelCase : Optional[Any] = processor(text=a_ ) _UpperCAmelCase : Any = tokenizer(a_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def _snake_case ( self ) -> List[Any]: _UpperCAmelCase : str = self.get_feature_extractor() _UpperCAmelCase : List[str] = self.get_tokenizer() _UpperCAmelCase : Any = ClapProcessor(tokenizer=a_ ,feature_extractor=a_ ) _UpperCAmelCase : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _UpperCAmelCase : Dict = processor.batch_decode(a_ ) _UpperCAmelCase : Any = tokenizer.batch_decode(a_ ) self.assertListEqual(a_ ,a_ ) def _snake_case ( self ) -> Dict: _UpperCAmelCase : List[str] = self.get_feature_extractor() _UpperCAmelCase : int = self.get_tokenizer() _UpperCAmelCase : Dict = ClapProcessor(tokenizer=a_ ,feature_extractor=a_ ) self.assertListEqual( processor.model_input_names[2:] ,feature_extractor.model_input_names ,msg="""`processor` and `feature_extractor` model input names do not match""" ,)
215
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import _LazyModule A_ : int = {"""tokenization_bertweet""": ["""BertweetTokenizer"""]} if TYPE_CHECKING: from .tokenization_bertweet import BertweetTokenizer else: import sys A_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
215
1
import argparse import dataclasses import json import logging import os import shutil from typing import List, Optional import datasets from accelerate import Accelerator from datasets import load_dataset from finetuning import finetune from tqdm.auto import tqdm import transformers from transformers import AutoConfig, set_seed from transformers.trainer_utils import IntervalStrategy UpperCamelCase__ = logging.getLogger(__name__) UpperCamelCase__ = """pytorch_model.bin""" @dataclasses.dataclass class lowerCamelCase_ : SCREAMING_SNAKE_CASE_ = dataclasses.field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models.'} ) SCREAMING_SNAKE_CASE_ = dataclasses.field( default=a_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co.'} , ) @dataclasses.dataclass class lowerCamelCase_ : SCREAMING_SNAKE_CASE_ = dataclasses.field(metadata={'help': 'A csv or a json file containing the training data.'} ) SCREAMING_SNAKE_CASE_ = dataclasses.field(metadata={'help': 'A csv or a json file containing the data to predict on.'} ) SCREAMING_SNAKE_CASE_ = dataclasses.field( default=a_ , metadata={'help': 'A csv or a json file containing the validation data.'} ) SCREAMING_SNAKE_CASE_ = dataclasses.field( default=a_ , metadata={'help': 'The name of the task to train on.'} , ) SCREAMING_SNAKE_CASE_ = dataclasses.field( default=a_ , metadata={'help': 'The list of labels for the task.'} ) @dataclasses.dataclass class lowerCamelCase_ : SCREAMING_SNAKE_CASE_ = dataclasses.field( metadata={'help': 'The output directory where the model predictions and checkpoints will be written.'} ) SCREAMING_SNAKE_CASE_ = dataclasses.field( default='accuracy' , metadata={'help': 'The evaluation metric used for the task.'} ) SCREAMING_SNAKE_CASE_ = dataclasses.field( default='no' , metadata={ 'help': 'The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]' } , ) SCREAMING_SNAKE_CASE_ = dataclasses.field( default=10 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , ) SCREAMING_SNAKE_CASE_ = dataclasses.field( default=0.0 , metadata={ 'help': 'How much the specified evaluation metric must improve to satisfy early stopping conditions.' } , ) SCREAMING_SNAKE_CASE_ = dataclasses.field( default=a_ , metadata={'help': 'Whether to filter the pseudo-labeled data based on the confidence score.'} , ) SCREAMING_SNAKE_CASE_ = dataclasses.field( default=a_ , metadata={'help': 'Whether to filter the pseudo-labeled data based on the validation performance.'} , ) SCREAMING_SNAKE_CASE_ = dataclasses.field( default=a_ , metadata={'help': 'Whether to fine-tune on labeled data after pseudo training.'} , ) SCREAMING_SNAKE_CASE_ = dataclasses.field( default=0.0 , metadata={'help': 'Confidence threshold for pseudo-labeled data filtering.'} , ) SCREAMING_SNAKE_CASE_ = dataclasses.field( default=1_00 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , ) SCREAMING_SNAKE_CASE_ = dataclasses.field( default=a_ , metadata={'help': 'Random seed for initialization.'} , ) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) -> Dict: """simple docstring""" a = datasets.concatenate_datasets([infer_input, infer_output], axis=1 ) if args.do_filter_by_confidence: a = dataset.filter(lambda snake_case_ : example["probability"] > args.confidence_threshold ) if args.do_filter_by_val_performance: assert eval_result >= 0.0 and eval_result <= 1.0 a = int(eval_result * len(snake_case_ ) ) print(snake_case_ ) a = dataset.sort('''probability''', reverse=snake_case_ ) a = dataset.select(range(snake_case_ ) ) a = dataset.remove_columns(['''label''', '''probability'''] ) a = dataset.rename_column('''prediction''', '''label''' ) a = dataset.map(lambda snake_case_ : {"label": idalabel[example["label"]]} ) a = dataset.shuffle(seed=args.seed ) a = os.path.join(snake_case_, f"""train_pseudo.{args.data_file_extension}""" ) if args.data_file_extension == "csv": dataset.to_csv(snake_case_, index=snake_case_ ) else: dataset.to_json(snake_case_ ) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, **snake_case_ ) -> Optional[Any]: """simple docstring""" a = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, ) logger.info(accelerator.state ) # Setup logging, we only want one process per machine to log things on the # screen. accelerator.is_local_main_process is only True for one process per # machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() a = STModelArguments(model_name_or_path=snake_case_ ) a = STDataArguments(train_file=snake_case_, infer_file=snake_case_ ) a = STTrainingArguments(output_dir=snake_case_ ) a = argparse.Namespace() for arg_class in (model_args, data_args, training_args): for key, value in vars(snake_case_ ).items(): setattr(snake_case_, snake_case_, snake_case_ ) for key, value in kwargs.items(): if hasattr(snake_case_, snake_case_ ): setattr(snake_case_, snake_case_, snake_case_ ) # Sanity checks a = {} a = None # You need to provide the training data and the data to predict on assert args.train_file is not None assert args.infer_file is not None a = args.train_file a = args.infer_file if args.evaluation_strategy != IntervalStrategy.NO.value: assert args.eval_file is not None a = args.eval_file for key in data_files: a = data_files[key].split('''.''' )[-1] assert extension in ["csv", "json"], f"""`{key}_file` should be a csv or a json file.""" if args.data_file_extension is None: a = extension else: assert extension == args.data_file_extension, f"""`{key}_file` should be a {args.data_file_extension} file`.""" assert ( args.eval_metric in datasets.list_metrics() ), f"""{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.""" # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed ) logger.info('''Creating the initial data directory for self-training...''' ) a = f"""{args.output_dir}/self-train_iter-{{}}""".format a = data_dir_format(0 ) if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=snake_case_ ) os.makedirs(snake_case_, exist_ok=snake_case_ ) accelerator.wait_for_everyone() a = None a = None a = 0 a = False # Show the progress bar a = tqdm(range(args.max_selftrain_iterations ), disable=not accelerator.is_local_main_process ) # Self-train for iteration in range(0, int(args.max_selftrain_iterations ) ): a = data_dir_format(snake_case_ ) assert os.path.exists(snake_case_ ) # Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for # iteration > 0 a = os.path.join(snake_case_, '''stage-1''' ) a = { '''accelerator''': accelerator, '''model_name_or_path''': args.model_name_or_path, '''cache_dir''': args.cache_dir, '''do_train''': True, '''train_file''': data_files['''train'''] if iteration == 0 else data_files['''train_pseudo'''], '''do_eval''': True if args.eval_file is not None else False, '''eval_file''': data_files['''eval'''], '''do_predict''': True, '''infer_file''': data_files['''infer'''], '''task_name''': args.task_name, '''label_list''': args.label_list, '''output_dir''': current_output_dir, '''eval_metric''': args.eval_metric, '''evaluation_strategy''': args.evaluation_strategy, '''early_stopping_patience''': args.early_stopping_patience, '''early_stopping_threshold''': args.early_stopping_threshold, '''seed''': args.seed, } # Add additional training arguments for key, value in kwargs.items(): if key not in arguments_dict and not hasattr(snake_case_, snake_case_ ): arguments_dict.update({key: value} ) a = os.path.join(snake_case_, '''best-checkpoint''', snake_case_ ) if os.path.exists(snake_case_ ): logger.info( '''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.''', snake_case_, snake_case_, ) else: logger.info('''***** Running self-training: iteration: %d, stage: 1 *****''', snake_case_ ) finetune(**snake_case_ ) accelerator.wait_for_everyone() assert os.path.exists(snake_case_ ) logger.info('''Self-training job completed: iteration: %d, stage: 1.''', snake_case_ ) if iteration > 0 and args.finetune_on_labeled_data: # Stage 2 (optional): fine-tuning on the original labeled data a = os.path.join(snake_case_, '''best-checkpoint''' ) a = os.path.join(snake_case_, '''stage-2''' ) # Update arguments_dict a = model_path a = data_files['''train'''] a = current_output_dir a = os.path.join(snake_case_, '''best-checkpoint''', snake_case_ ) if os.path.exists(snake_case_ ): logger.info( '''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.''', snake_case_, snake_case_, ) else: logger.info('''***** Running self-training: iteration: %d, stage: 2 *****''', snake_case_ ) finetune(**snake_case_ ) accelerator.wait_for_everyone() assert os.path.exists(snake_case_ ) logger.info('''Self-training job completed: iteration: %d, stage: 2.''', snake_case_ ) a = iteration a = data_dir_format(iteration + 1 ) a = AutoConfig.from_pretrained(os.path.join(snake_case_, '''best-checkpoint''' ) ) a = config.idalabel a = os.path.join(snake_case_, '''eval_results_best-checkpoint.json''' ) a = os.path.join(snake_case_, '''test_results_best-checkpoint.json''' ) assert os.path.exists(snake_case_ ) with open(snake_case_, '''r''' ) as f: a = float(json.load(snake_case_ )[args.eval_metric] ) a = os.path.join(snake_case_, '''infer_output_best-checkpoint.csv''' ) assert os.path.exists(snake_case_ ) # Loading the dataset from local csv or json files. a = load_dataset(args.data_file_extension, data_files={'''data''': data_files['''infer''']} )['''data'''] a = load_dataset('''csv''', data_files={'''data''': infer_output_file} )['''data'''] if accelerator.is_main_process: os.makedirs(snake_case_, exist_ok=snake_case_ ) shutil.copy(snake_case_, os.path.join(snake_case_, f"""eval_results_iter-{iteration}.json""" ) ) if os.path.exists(snake_case_ ): shutil.copy(snake_case_, os.path.join(snake_case_, f"""test_results_iter-{iteration}.json""" ) ) create_pseudo_labeled_data(snake_case_, snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) accelerator.wait_for_everyone() a = os.path.join(snake_case_, f"""train_pseudo.{args.data_file_extension}""" ) if args.evaluation_strategy != IntervalStrategy.NO.value: a = eval_result if best_iteration is None: a = new_iteration a = new_eval_result else: if new_eval_result - best_eval_result > args.early_stopping_threshold: a = new_iteration a = new_eval_result a = 0 else: if new_eval_result == best_eval_result: a = new_iteration a = new_eval_result early_stopping_patience_counter += 1 if early_stopping_patience_counter >= args.early_stopping_patience: a = True progress_bar.update(1 ) if should_training_stop: break if best_iteration is not None: # Save the best iteration logger.info('''Best iteration: %d''', snake_case_ ) logger.info('''Best evaluation result: %s = %f''', args.eval_metric, snake_case_ ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(snake_case_, f"""eval_results_iter-{iteration}.json""" ), os.path.join(snake_case_, '''eval_results_best-iteration.json''' ), ) else: # Assume that the last iteration is the best logger.info('''Best iteration: %d''', args.max_selftrain_iterations - 1 ) logger.info('''Best evaluation result: %s = %f''', args.eval_metric, snake_case_ ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(snake_case_, f"""eval_results_iter-{args.max_selftrain_iterations - 1}.json""" ), os.path.join(snake_case_, '''eval_results_best-iteration.json''' ), )
370
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=a_ ) class lowerCamelCase_ ( a_ ): SCREAMING_SNAKE_CASE_ = field(default='language-modeling' , metadata={'include_in_asdict_even_if_is_default': True} ) SCREAMING_SNAKE_CASE_ = Features({'text': Value('string' )} ) SCREAMING_SNAKE_CASE_ = Features({} ) SCREAMING_SNAKE_CASE_ = "text" @property def SCREAMING_SNAKE_CASE_ ( self : int ): '''simple docstring''' return {self.text_column: "text"}
330
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING __A : str = logging.get_logger(__name__) __A : Union[str, Any] = { '''microsoft/table-transformer-detection''': ( '''https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json''' ), } class _UpperCAmelCase ( _A ): SCREAMING_SNAKE_CASE_ : int = "table-transformer" SCREAMING_SNAKE_CASE_ : Optional[int] = ["past_key_values"] SCREAMING_SNAKE_CASE_ : Optional[int] = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self : Tuple , A : Dict=True , A : List[str]=None , A : str=3 , A : int=1_00 , A : Optional[int]=6 , A : Union[str, Any]=20_48 , A : Union[str, Any]=8 , A : Optional[Any]=6 , A : Tuple=20_48 , A : List[Any]=8 , A : str=0.0 , A : Optional[int]=0.0 , A : str=True , A : Optional[int]="relu" , A : List[Any]=2_56 , A : Union[str, Any]=0.1 , A : Tuple=0.0 , A : int=0.0 , A : Any=0.02 , A : Optional[Any]=1.0 , A : List[Any]=False , A : List[Any]="sine" , A : Tuple="resnet50" , A : Optional[Any]=True , A : int=False , A : List[Any]=1 , A : Optional[Any]=5 , A : Union[str, Any]=2 , A : Optional[int]=1 , A : Tuple=1 , A : List[str]=5 , A : Optional[Any]=2 , A : str=0.1 , **A : int , ) -> Dict: if backbone_config is not None and use_timm_backbone: raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' ) if not use_timm_backbone: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) lowercase_ : Dict = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] ) elif isinstance(A , A ): lowercase_ : Union[str, Any] = backbone_config.get('''model_type''' ) lowercase_ : Any = CONFIG_MAPPING[backbone_model_type] lowercase_ : List[str] = config_class.from_dict(A ) # set timm attributes to None lowercase_ , lowercase_ , lowercase_ : Dict = None, None, None lowercase_ : Union[str, Any] = use_timm_backbone lowercase_ : Tuple = backbone_config lowercase_ : Dict = num_channels lowercase_ : List[str] = num_queries lowercase_ : Any = d_model lowercase_ : Optional[Any] = encoder_ffn_dim lowercase_ : Any = encoder_layers lowercase_ : Dict = encoder_attention_heads lowercase_ : str = decoder_ffn_dim lowercase_ : str = decoder_layers lowercase_ : int = decoder_attention_heads lowercase_ : Optional[int] = dropout lowercase_ : Optional[Any] = attention_dropout lowercase_ : str = activation_dropout lowercase_ : Tuple = activation_function lowercase_ : Any = init_std lowercase_ : str = init_xavier_std lowercase_ : Union[str, Any] = encoder_layerdrop lowercase_ : Optional[Any] = decoder_layerdrop lowercase_ : Optional[Any] = encoder_layers lowercase_ : Tuple = auxiliary_loss lowercase_ : Dict = position_embedding_type lowercase_ : Optional[Any] = backbone lowercase_ : List[str] = use_pretrained_backbone lowercase_ : Any = dilation # Hungarian matcher lowercase_ : Any = class_cost lowercase_ : Optional[Any] = bbox_cost lowercase_ : int = giou_cost # Loss coefficients lowercase_ : Union[str, Any] = mask_loss_coefficient lowercase_ : Optional[int] = dice_loss_coefficient lowercase_ : Optional[Any] = bbox_loss_coefficient lowercase_ : Optional[int] = giou_loss_coefficient lowercase_ : Union[str, Any] = eos_coefficient super().__init__(is_encoder_decoder=A , **A ) @property def A ( self : Any ) -> int: return self.encoder_attention_heads @property def A ( self : List[Any] ) -> int: return self.d_model class _UpperCAmelCase ( _A ): SCREAMING_SNAKE_CASE_ : Optional[Any] = version.parse("1.11" ) @property def A ( self : Dict ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ('''pixel_mask''', {0: '''batch'''}), ] ) @property def A ( self : List[str] ) -> float: return 1e-5 @property def A ( self : int ) -> int: return 12
33
import os import re from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _UpperCAmelCase : List[Any] = logging.get_logger(__name__) _UpperCAmelCase : Dict = {"vocab_file": "spiece.model"} _UpperCAmelCase : Dict = { "vocab_file": { "google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model", "google/bigbird-roberta-large": ( "https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model" ), "google/bigbird-base-trivia-itc": ( "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model" ), } } _UpperCAmelCase : int = { "google/bigbird-roberta-base": 4_096, "google/bigbird-roberta-large": 4_096, "google/bigbird-base-trivia-itc": 4_096, } class lowercase ( _SCREAMING_SNAKE_CASE ): __lowercase : List[str] = VOCAB_FILES_NAMES __lowercase : int = PRETRAINED_VOCAB_FILES_MAP __lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowercase : List[Any] = ["input_ids", "attention_mask"] __lowercase : List[int] = [] def __init__( self , A_ , A_="<unk>" , A_="<s>" , A_="</s>" , A_="<pad>" , A_="[SEP]" , A_="[MASK]" , A_="[CLS]" , A_ = None , **A_ , ) -> None: """simple docstring""" UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else bos_token UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else eos_token UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else unk_token UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else pad_token UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else cls_token UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else sep_token # Mask token behave like a normal word, i.e. include the space before it UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=A_ , eos_token=A_ , unk_token=A_ , pad_token=A_ , sep_token=A_ , mask_token=A_ , cls_token=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , ) UpperCamelCase = vocab_file UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(A_ ) @property def __UpperCamelCase ( self ) -> Dict: """simple docstring""" return self.sp_model.get_piece_size() def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" UpperCamelCase = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> Optional[int]: """simple docstring""" UpperCamelCase = self.__dict__.copy() UpperCamelCase = None return state def __setstate__( self , A_ ) -> Optional[int]: """simple docstring""" UpperCamelCase = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): UpperCamelCase = {} UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __UpperCamelCase ( self , A_ ) -> List[str]: """simple docstring""" return self.sp_model.encode(A_ , out_type=A_ ) def __UpperCamelCase ( self , A_ ) -> Optional[Any]: """simple docstring""" return self.sp_model.piece_to_id(A_ ) def __UpperCamelCase ( self , A_ ) -> Tuple: """simple docstring""" UpperCamelCase = self.sp_model.IdToPiece(A_ ) return token def __UpperCamelCase ( self , A_ ) -> List[Any]: """simple docstring""" UpperCamelCase = [] UpperCamelCase = '' UpperCamelCase = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(A_ ) + token UpperCamelCase = True UpperCamelCase = [] else: current_sub_tokens.append(A_ ) UpperCamelCase = False out_string += self.sp_model.decode(A_ ) return out_string.strip() def __UpperCamelCase ( self , A_ , A_ = False , A_ = None , A_ = True , **A_ , ) -> str: """simple docstring""" UpperCamelCase = kwargs.pop('use_source_tokenizer' , A_ ) UpperCamelCase = self.convert_ids_to_tokens(A_ , skip_special_tokens=A_ ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 UpperCamelCase = [] UpperCamelCase = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(A_ ) ) UpperCamelCase = [] sub_texts.append(A_ ) else: current_sub_text.append(A_ ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(A_ ) ) # Mimic the behavior of the Rust tokenizer: # No space before [MASK] and [SEP] if spaces_between_special_tokens: UpperCamelCase = re.sub(r' (\[(MASK|SEP)\])' , r'\1' , ' '.join(A_ ) ) else: UpperCamelCase = ''.join(A_ ) UpperCamelCase = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: UpperCamelCase = self.clean_up_tokenization(A_ ) return clean_text else: return text def __UpperCamelCase ( self , A_ , A_ = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(A_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return UpperCamelCase = os.path.join( A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , A_ ) elif not os.path.isfile(self.vocab_file ): with open(A_ , 'wb' ) as fi: UpperCamelCase = self.sp_model.serialized_model_proto() fi.write(A_ ) return (out_vocab_file,) def __UpperCamelCase ( self , A_ , A_ = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCamelCase = [self.cls_token_id] UpperCamelCase = [self.sep_token_id] return cls + token_ids_a + sep + token_ids_a + sep def __UpperCamelCase ( self , A_ , A_ = None , A_ = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ ) if token_ids_a is None: return [1] + ([0] * len(A_ )) + [1] return [1] + ([0] * len(A_ )) + [1] + ([0] * len(A_ )) + [1] def __UpperCamelCase ( self , A_ , A_ = None ) -> List[int]: """simple docstring""" UpperCamelCase = [self.sep_token_id] UpperCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
222
0
"""simple docstring""" import argparse from pathlib import Path from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration def _a ( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case = None , _snake_case = None , _snake_case = None , ): """simple docstring""" if config_name_or_path is None: UpperCAmelCase = """facebook/rag-token-base""" if model_type == """rag_token""" else """facebook/rag-sequence-base""" if generator_tokenizer_name_or_path is None: UpperCAmelCase = generator_name_or_path if question_encoder_tokenizer_name_or_path is None: UpperCAmelCase = question_encoder_name_or_path UpperCAmelCase = RagTokenForGeneration if model_type == """rag_token""" else RagSequenceForGeneration # Save model. UpperCAmelCase = RagConfig.from_pretrained(_snake_case ) UpperCAmelCase = AutoConfig.from_pretrained(_snake_case ) UpperCAmelCase = AutoConfig.from_pretrained(_snake_case ) UpperCAmelCase = gen_config UpperCAmelCase = question_encoder_config UpperCAmelCase = model_class.from_pretrained_question_encoder_generator( _snake_case , _snake_case , config=_snake_case ) rag_model.save_pretrained(_snake_case ) # Sanity check. model_class.from_pretrained(_snake_case ) # Save tokenizers. UpperCAmelCase = AutoTokenizer.from_pretrained(_snake_case ) gen_tokenizer.save_pretrained(dest_dir / """generator_tokenizer/""" ) UpperCAmelCase = AutoTokenizer.from_pretrained(_snake_case ) question_encoder_tokenizer.save_pretrained(dest_dir / """question_encoder_tokenizer/""" ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() parser.add_argument( """--model_type""", choices=["""rag_sequence""", """rag_token"""], required=True, type=str, help="""RAG model type: rag_sequence, rag_token""", ) parser.add_argument("""--dest""", type=str, required=True, help="""Path to the output checkpoint directory.""") parser.add_argument("""--generator_name_or_path""", type=str, required=True, help="""Generator model identifier""") parser.add_argument( """--question_encoder_name_or_path""", type=str, required=True, help="""Question encoder model identifier""" ) parser.add_argument( """--generator_tokenizer_name_or_path""", type=str, help="""Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``""", ) parser.add_argument( """--question_encoder_tokenizer_name_or_path""", type=str, help="""Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``""", ) parser.add_argument( """--config_name_or_path""", type=str, help=( """Identifier of the model config to use, if not provided, resolves to a base config for a given""" """ ``model_type``""" ), ) _UpperCamelCase = parser.parse_args() _UpperCamelCase = Path(args.dest) dest_dir.mkdir(exist_ok=True) consolidate( args.model_type, args.generator_name_or_path, args.question_encoder_name_or_path, dest_dir, args.config_name_or_path, args.generator_tokenizer_name_or_path, args.question_encoder_tokenizer_name_or_path, )
234
"""simple docstring""" import io import json import unittest from parameterized import parameterized from transformers import FSMTForConditionalGeneration, FSMTTokenizer from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device from utils import calculate_bleu _UpperCamelCase = get_tests_dir() + """/test_data/fsmt/fsmt_val_data.json""" with io.open(filename, """r""", encoding="""utf-8""") as f: _UpperCamelCase = json.load(f) @require_torch class lowerCamelCase__ ( unittest.TestCase ): def _UpperCamelCase ( self ,A ): return FSMTTokenizer.from_pretrained(A ) def _UpperCamelCase ( self ,A ): UpperCAmelCase = FSMTForConditionalGeneration.from_pretrained(A ).to(A ) if torch_device == "cuda": model.half() return model @parameterized.expand( [ ["""en-ru""", 26.0], ["""ru-en""", 22.0], ["""en-de""", 22.0], ["""de-en""", 29.0], ] ) @slow def _UpperCamelCase ( self ,A ,A ): # note: this test is not testing the best performance since it only evals a small batch # but it should be enough to detect a regression in the output quality UpperCAmelCase = F'''facebook/wmt19-{pair}''' UpperCAmelCase = self.get_tokenizer(A ) UpperCAmelCase = self.get_model(A ) UpperCAmelCase = bleu_data[pair]["""src"""] UpperCAmelCase = bleu_data[pair]["""tgt"""] UpperCAmelCase = tokenizer(A ,return_tensors="""pt""" ,truncation=A ,padding="""longest""" ).to(A ) UpperCAmelCase = model.generate( input_ids=batch.input_ids ,num_beams=8 ,) UpperCAmelCase = tokenizer.batch_decode( A ,skip_special_tokens=A ,clean_up_tokenization_spaces=A ) UpperCAmelCase = calculate_bleu(A ,A ) print(A ) self.assertGreaterEqual(scores["""bleu"""] ,A )
234
1
from scipy.stats import spearmanr import datasets _lowercase: Union[str, Any] = "\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n" _lowercase: List[Any] = "\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {'spearmanr': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results['spearmanr'])\n -0.7\n >>> print(round(results['spearmanr_pvalue'], 2))\n 0.19\n" _lowercase: Dict = r"\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class _lowercase ( datasets.Metric ): """simple docstring""" def UpperCamelCase_ (self ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("float" ), "references": datasets.Value("float" ), } ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , ) def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False ): """simple docstring""" a = spearmanr(lowerCamelCase_ , lowerCamelCase_ ) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
227
import random def a( A : Optional[Any] , A : Optional[Any] , A : str ) -> List[Any]: """simple docstring""" a = a[left_index] a = left_index + 1 for j in range(left_index + 1 , A ): if a[j] < pivot: a , a = a[i], a[j] i += 1 a , a = a[i - 1], a[left_index] return i - 1 def a( A : List[Any] , A : List[Any] , A : Union[str, Any] ) -> List[Any]: """simple docstring""" if left < right: a = random.randint(A , right - 1 ) a , a = ( a[left], a[pivot], ) # switches the pivot with the left most bound a = partition(A , A , A ) quick_sort_random( A , A , A ) # recursive quicksort to the left of the pivot point quick_sort_random( A , pivot_index + 1 , A ) # recursive quicksort to the right of the pivot point def a( ) -> Any: """simple docstring""" a = input("Enter numbers separated by a comma:\n" ).strip() a = [int(A ) for item in user_input.split("," )] quick_sort_random(A , 0 , len(A ) ) print(A ) if __name__ == "__main__": main()
227
1
from sklearn.metrics import mean_squared_error import datasets lowerCAmelCase : Tuple = """\ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } """ lowerCAmelCase : List[str] = """\ Mean Squared Error(MSE) is the average of the square of difference between the predicted and actual values. """ lowerCAmelCase : Optional[Any] = """ Args: predictions: array-like of shape (n_samples,) or (n_samples, n_outputs) Estimated target values. references: array-like of shape (n_samples,) or (n_samples, n_outputs) Ground truth (correct) target values. sample_weight: array-like of shape (n_samples,), default=None Sample weights. multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\" Defines aggregating of multiple output values. Array-like value defines weights used to average errors. \"raw_values\" : Returns a full set of errors in case of multioutput input. \"uniform_average\" : Errors of all outputs are averaged with uniform weight. squared : bool, default=True If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value. Returns: mse : mean squared error. Examples: >>> mse_metric = datasets.load_metric(\"mse\") >>> predictions = [2.5, 0.0, 2, 8] >>> references = [3, -0.5, 2, 7] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {'mse': 0.375} >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False) >>> print(rmse_result) {'mse': 0.6123724356957945} If you're using multi-dimensional lists, then set the config as follows : >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\") >>> predictions = [[0.5, 1], [-1, 1], [7, -6]] >>> references = [[0, 2], [-1, 2], [8, -5]] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {'mse': 0.7083333333333334} >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values') >>> print(results) # doctest: +NORMALIZE_WHITESPACE {'mse': array([0.41666667, 1. ])} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowercase ( datasets.Metric ): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self : Tuple): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types()) , reference_urls=[ "https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html" ] , ) def _SCREAMING_SNAKE_CASE ( self : Optional[int]): if self.config_name == "multilist": return { "predictions": datasets.Sequence(datasets.Value("float")), "references": datasets.Sequence(datasets.Value("float")), } else: return { "predictions": datasets.Value("float"), "references": datasets.Value("float"), } def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : Dict="uniform_average" , lowerCAmelCase__ : Any=True): SCREAMING_SNAKE_CASE_: Any = mean_squared_error( lowerCAmelCase__ , lowerCAmelCase__ , sample_weight=lowerCAmelCase__ , multioutput=lowerCAmelCase__ , squared=lowerCAmelCase__) return {"mse": mse}
127
import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase : Any = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.plbart.modeling_plbart import shift_tokens_right lowerCAmelCase : Optional[Any] = 50003 lowerCAmelCase : List[str] = 50002 @require_sentencepiece @require_tokenizers class __lowercase ( UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" _UpperCAmelCase : int = PLBartTokenizer _UpperCAmelCase : Any = None _UpperCAmelCase : Optional[Any] = False def _SCREAMING_SNAKE_CASE ( self : str): super().setUp() # We have a SentencePiece fixture for testing SCREAMING_SNAKE_CASE_: Tuple = PLBartTokenizer(lowerCAmelCase__ , language_codes="base" , keep_accents=lowerCAmelCase__) tokenizer.save_pretrained(self.tmpdirname) def _SCREAMING_SNAKE_CASE ( self : str): SCREAMING_SNAKE_CASE_: Optional[int] = PLBartTokenizer(lowerCAmelCase__ , language_codes="base" , keep_accents=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Dict = tokenizer.tokenize("This is a test") self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) SCREAMING_SNAKE_CASE_: List[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( lowerCAmelCase__ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) SCREAMING_SNAKE_CASE_: str = tokenizer.convert_tokens_to_ids(lowerCAmelCase__) self.assertListEqual( lowerCAmelCase__ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) SCREAMING_SNAKE_CASE_: Optional[Any] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__) self.assertListEqual( lowerCAmelCase__ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) SCREAMING_SNAKE_CASE_: int = tokenizer.vocab_size SCREAMING_SNAKE_CASE_: Tuple = [tokenizer.convert_ids_to_tokens(lowerCAmelCase__) for x in range(end - 4 , lowerCAmelCase__)] self.assertListEqual(lowerCAmelCase__ , ["__java__", "__python__", "__en_XX__", "<mask>"]) SCREAMING_SNAKE_CASE_: Optional[int] = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go" SCREAMING_SNAKE_CASE_: Optional[int] = tokenizer(lowerCAmelCase__).input_ids self.assertEqual( tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__) , lowerCAmelCase__ , ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): SCREAMING_SNAKE_CASE_: Union[str, Any] = PLBartTokenizer(lowerCAmelCase__ , language_codes="multi" , keep_accents=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: str = tokenizer.tokenize("This is a test") self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) SCREAMING_SNAKE_CASE_: Tuple = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( lowerCAmelCase__ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) SCREAMING_SNAKE_CASE_: Optional[int] = tokenizer.convert_tokens_to_ids(lowerCAmelCase__) self.assertListEqual( lowerCAmelCase__ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) SCREAMING_SNAKE_CASE_: Optional[int] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__) self.assertListEqual( lowerCAmelCase__ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) SCREAMING_SNAKE_CASE_: Optional[int] = tokenizer.vocab_size SCREAMING_SNAKE_CASE_: int = [tokenizer.convert_ids_to_tokens(lowerCAmelCase__) for x in range(end - 7 , lowerCAmelCase__)] self.assertListEqual( lowerCAmelCase__ , ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"]) SCREAMING_SNAKE_CASE_: str = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go" SCREAMING_SNAKE_CASE_: Tuple = tokenizer(lowerCAmelCase__).input_ids self.assertEqual( tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__) , lowerCAmelCase__ , ) @require_torch @require_sentencepiece @require_tokenizers class __lowercase ( unittest.TestCase ): """simple docstring""" _UpperCAmelCase : Optional[Any] = '''uclanlp/plbart-python-en_XX''' _UpperCAmelCase : List[str] = [ '''def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])''', '''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''', ] _UpperCAmelCase : int = [ '''Returns the maximum value of a b c.''', '''Sums the values of a b c.''', ] _UpperCAmelCase : Optional[Any] = [ 134, 5452, 3_3460, 3_3441, 3_3463, 3_3465, 3_3463, 3_3449, 988, 20, 3_3456, 19, 3_3456, 771, 39, 4258, 889, 3318, 3_3441, 3_3463, 3_3465, 3_3463, 3_3449, 2471, 2, PYTHON_CODE, ] @classmethod def _SCREAMING_SNAKE_CASE ( cls : Dict): SCREAMING_SNAKE_CASE_: PLBartTokenizer = PLBartTokenizer.from_pretrained( cls.checkpoint_name , language_codes="base" , src_lang="python" , tgt_lang="en_XX") SCREAMING_SNAKE_CASE_: Optional[Any] = 1 return cls def _SCREAMING_SNAKE_CASE ( self : List[str]): self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"] , 5_0001) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"] , 5_0002) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"] , 5_0003) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): SCREAMING_SNAKE_CASE_: Dict = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0] self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Dict): self.assertIn(lowerCAmelCase__ , self.tokenizer.all_special_ids) SCREAMING_SNAKE_CASE_: Optional[Any] = [EN_CODE, 9037, 3_3442, 57, 752, 153, 14, 56, 18, 9, 2] SCREAMING_SNAKE_CASE_: int = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase__) self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__) self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : str): SCREAMING_SNAKE_CASE_: List[str] = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 20] self.assertIsInstance(src_text[0] , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = 10 SCREAMING_SNAKE_CASE_: Optional[Any] = self.tokenizer(lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__).input_ids[0] self.assertEqual(ids[-2] , 2) self.assertEqual(ids[-1] , lowerCAmelCase__) self.assertEqual(len(lowerCAmelCase__) , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Tuple): self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"]) , [5_0004, 5_0001]) def _SCREAMING_SNAKE_CASE ( self : str): SCREAMING_SNAKE_CASE_: List[Any] = tempfile.mkdtemp() SCREAMING_SNAKE_CASE_: Optional[int] = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: int = PLBartTokenizer.from_pretrained(lowerCAmelCase__) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCAmelCase__) @require_torch def _SCREAMING_SNAKE_CASE ( self : int): SCREAMING_SNAKE_CASE_: List[Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , return_tensors="pt") SCREAMING_SNAKE_CASE_: Any = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE]) self.assertEqual(batch.decoder_input_ids[1][0] , lowerCAmelCase__) self.assertEqual(batch.decoder_input_ids[1][-1] , 2) self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE]) @require_torch def _SCREAMING_SNAKE_CASE ( self : str): SCREAMING_SNAKE_CASE_: Optional[int] = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=len(self.expected_src_tokens) , return_tensors="pt" , ) SCREAMING_SNAKE_CASE_: Union[str, Any] = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__) self.assertEqual((2, 26) , batch.input_ids.shape) self.assertEqual((2, 26) , batch.attention_mask.shape) SCREAMING_SNAKE_CASE_: Optional[Any] = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__) self.assertEqual(2 , batch.decoder_input_ids[0, -1]) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , []) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE]) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): SCREAMING_SNAKE_CASE_: List[Any] = self.tokenizer(self.src_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=3 , return_tensors="pt") SCREAMING_SNAKE_CASE_: Any = self.tokenizer( text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=10 , return_tensors="pt") SCREAMING_SNAKE_CASE_: List[Any] = targets["input_ids"] SCREAMING_SNAKE_CASE_: List[Any] = shift_tokens_right(lowerCAmelCase__ , self.tokenizer.pad_token_id) self.assertEqual(batch.input_ids.shape[1] , 3) self.assertEqual(batch.decoder_input_ids.shape[1] , 10) @require_torch def _SCREAMING_SNAKE_CASE ( self : int): SCREAMING_SNAKE_CASE_: List[str] = self.tokenizer._build_translation_inputs( "A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="java") self.assertEqual( nested_simplify(lowerCAmelCase__) , { # A, test, EOS, en_XX "input_ids": [[150, 242, 2, 5_0003]], "attention_mask": [[1, 1, 1, 1]], # java "forced_bos_token_id": 5_0001, } , )
127
1
from __future__ import annotations _SCREAMING_SNAKE_CASE = list[tuple[int, int]] _SCREAMING_SNAKE_CASE = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] _SCREAMING_SNAKE_CASE = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right class SCREAMING_SNAKE_CASE_ : def __init__( self : Union[str, Any] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : float , lowerCamelCase_ : Node | None , ): """simple docstring""" UpperCamelCase = pos_x UpperCamelCase = pos_y UpperCamelCase = (pos_y, pos_x) UpperCamelCase = goal_x UpperCamelCase = goal_y UpperCamelCase = g_cost UpperCamelCase = parent UpperCamelCase = self.calculate_heuristic() def lowerCamelCase_ ( self : Optional[Any] ): """simple docstring""" UpperCamelCase = abs(self.pos_x - self.goal_x ) UpperCamelCase = abs(self.pos_y - self.goal_y ) return dx + dy def __lt__( self : List[Any] , lowerCamelCase_ : Tuple ): """simple docstring""" return self.f_cost < other.f_cost class SCREAMING_SNAKE_CASE_ : def __init__( self : Any , lowerCamelCase_ : tuple[int, int] , lowerCamelCase_ : tuple[int, int] ): """simple docstring""" UpperCamelCase = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCamelCase_ ) UpperCamelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9999 , lowerCamelCase_ ) UpperCamelCase = [self.start] UpperCamelCase = [] UpperCamelCase = False def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() UpperCamelCase = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: UpperCamelCase = True return self.retrace_path(lowerCamelCase_ ) self.closed_nodes.append(lowerCamelCase_ ) UpperCamelCase = self.get_successors(lowerCamelCase_ ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(lowerCamelCase_ ) else: # retrieve the best current path UpperCamelCase = self.open_nodes.pop(self.open_nodes.index(lowerCamelCase_ ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(lowerCamelCase_ ) else: self.open_nodes.append(lowerCamelCase_ ) if not self.reached: return [self.start.pos] return None def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Node ): """simple docstring""" UpperCamelCase = [] for action in delta: UpperCamelCase = parent.pos_x + action[1] UpperCamelCase = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCamelCase_ ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( lowerCamelCase_ , lowerCamelCase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCamelCase_ , ) ) return successors def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Node | None ): """simple docstring""" UpperCamelCase = node UpperCamelCase = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) UpperCamelCase = current_node.parent path.reverse() return path if __name__ == "__main__": _SCREAMING_SNAKE_CASE = (0, 0) _SCREAMING_SNAKE_CASE = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) print("""------""") _SCREAMING_SNAKE_CASE = GreedyBestFirst(init, goal) _SCREAMING_SNAKE_CASE = greedy_bf.search() if path: for pos_x, pos_y in path: _SCREAMING_SNAKE_CASE = 2 for elem in grid: print(elem)
343
from __future__ import annotations from typing import Any class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ): pass class SCREAMING_SNAKE_CASE_ : def __init__( self : List[Any] , lowerCamelCase_ : Any ): """simple docstring""" UpperCamelCase = data UpperCamelCase = None def __iter__( self : Optional[int] ): """simple docstring""" UpperCamelCase = self UpperCamelCase = [] while node: if node in visited: raise ContainsLoopError visited.append(lowerCamelCase_ ) yield node.data UpperCamelCase = node.next_node @property def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" try: list(self ) return False except ContainsLoopError: return True if __name__ == "__main__": _SCREAMING_SNAKE_CASE = Node(1) _SCREAMING_SNAKE_CASE = Node(2) _SCREAMING_SNAKE_CASE = Node(3) _SCREAMING_SNAKE_CASE = Node(4) print(root_node.has_loop) # False _SCREAMING_SNAKE_CASE = root_node.next_node print(root_node.has_loop) # True _SCREAMING_SNAKE_CASE = Node(5) _SCREAMING_SNAKE_CASE = Node(6) _SCREAMING_SNAKE_CASE = Node(5) _SCREAMING_SNAKE_CASE = Node(6) print(root_node.has_loop) # False _SCREAMING_SNAKE_CASE = Node(1) print(root_node.has_loop) # False
343
1
def a__ ( A__, A__, A__, A__ ): # Return True if there is node that has not iterated. SCREAMING_SNAKE_CASE_ : List[Any] = [False] * len(A__ ) SCREAMING_SNAKE_CASE_ : Dict = [] queue.append(A__ ) SCREAMING_SNAKE_CASE_ : Optional[int] = True while queue: SCREAMING_SNAKE_CASE_ : Optional[int] = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(A__ ) SCREAMING_SNAKE_CASE_ : Any = True SCREAMING_SNAKE_CASE_ : List[str] = u return visited[t] def a__ ( A__, A__, A__ ): # This array is filled by BFS and to store path SCREAMING_SNAKE_CASE_ : int = [-1] * (len(A__ )) SCREAMING_SNAKE_CASE_ : List[str] = 0 while bfs(A__, A__, A__, A__ ): SCREAMING_SNAKE_CASE_ : Optional[int] = float('Inf' ) SCREAMING_SNAKE_CASE_ : Any = sink while s != source: # Find the minimum value in select path SCREAMING_SNAKE_CASE_ : Dict = min(A__, graph[parent[s]][s] ) SCREAMING_SNAKE_CASE_ : List[Any] = parent[s] max_flow += path_flow SCREAMING_SNAKE_CASE_ : Dict = sink while v != source: SCREAMING_SNAKE_CASE_ : Optional[Any] = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow SCREAMING_SNAKE_CASE_ : Optional[int] = parent[v] return max_flow lowerCAmelCase__ : Optional[Any] =[ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] lowerCAmelCase__ : List[str] =0, 5 print(ford_fulkerson(graph, source, sink))
353
import os import tempfile import unittest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class __lowercase (__SCREAMING_SNAKE_CASE ): """simple docstring""" def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=9_9 , lowerCAmelCase__=3_2 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=3_7 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=1_6 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=None , ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = parent SCREAMING_SNAKE_CASE_ : Any = batch_size SCREAMING_SNAKE_CASE_ : Optional[int] = seq_length SCREAMING_SNAKE_CASE_ : Any = is_training SCREAMING_SNAKE_CASE_ : int = use_input_mask SCREAMING_SNAKE_CASE_ : Union[str, Any] = use_token_type_ids SCREAMING_SNAKE_CASE_ : Dict = use_labels SCREAMING_SNAKE_CASE_ : List[str] = vocab_size SCREAMING_SNAKE_CASE_ : Dict = hidden_size SCREAMING_SNAKE_CASE_ : Dict = num_hidden_layers SCREAMING_SNAKE_CASE_ : Tuple = num_attention_heads SCREAMING_SNAKE_CASE_ : Dict = intermediate_size SCREAMING_SNAKE_CASE_ : Any = hidden_act SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE_ : List[Any] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ : List[str] = max_position_embeddings SCREAMING_SNAKE_CASE_ : Any = type_vocab_size SCREAMING_SNAKE_CASE_ : List[Any] = type_sequence_label_size SCREAMING_SNAKE_CASE_ : Tuple = initializer_range SCREAMING_SNAKE_CASE_ : int = num_labels SCREAMING_SNAKE_CASE_ : List[str] = num_choices SCREAMING_SNAKE_CASE_ : Tuple = scope def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE_ : Optional[int] = None if self.use_input_mask: SCREAMING_SNAKE_CASE_ : str = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE_ : str = None SCREAMING_SNAKE_CASE_ : Optional[int] = None SCREAMING_SNAKE_CASE_ : Any = None if self.use_labels: SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size] , self.num_choices ) SCREAMING_SNAKE_CASE_ : Any = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase__ ( self ): """simple docstring""" return DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = DistilBertModel(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() SCREAMING_SNAKE_CASE_ : int = model(lowerCAmelCase__ , lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : int = model(lowerCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = DistilBertForMaskedLM(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() SCREAMING_SNAKE_CASE_ : Any = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = DistilBertForQuestionAnswering(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() SCREAMING_SNAKE_CASE_ : str = model( lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_labels SCREAMING_SNAKE_CASE_ : Optional[int] = DistilBertForSequenceClassification(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() SCREAMING_SNAKE_CASE_ : str = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_labels SCREAMING_SNAKE_CASE_ : Any = DistilBertForTokenClassification(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() SCREAMING_SNAKE_CASE_ : List[Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = self.num_choices SCREAMING_SNAKE_CASE_ : Union[str, Any] = DistilBertForMultipleChoice(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() SCREAMING_SNAKE_CASE_ : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE_ : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE_ : Any = model( lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = self.prepare_config_and_inputs() ((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)) : Tuple = config_and_inputs SCREAMING_SNAKE_CASE_ : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class __lowercase (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): """simple docstring""" _UpperCAmelCase = ( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) _UpperCAmelCase = ( { """feature-extraction""": DistilBertModel, """fill-mask""": DistilBertForMaskedLM, """question-answering""": DistilBertForQuestionAnswering, """text-classification""": DistilBertForSequenceClassification, """token-classification""": DistilBertForTokenClassification, """zero-shot""": DistilBertForSequenceClassification, } if is_torch_available() else {} ) _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = True def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = DistilBertModelTester(self ) SCREAMING_SNAKE_CASE_ : str = ConfigTester(self , config_class=lowerCAmelCase__ , dim=3_7 ) def UpperCamelCase__ ( self ): """simple docstring""" self.config_tester.run_common_tests() def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*lowerCAmelCase__ ) def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*lowerCAmelCase__ ) def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*lowerCAmelCase__ ) def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowerCAmelCase__ ) def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*lowerCAmelCase__ ) def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowerCAmelCase__ ) @slow def UpperCamelCase__ ( self ): """simple docstring""" for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE_ : Any = DistilBertModel.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) @slow @require_torch_gpu def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return SCREAMING_SNAKE_CASE_ : Tuple = True SCREAMING_SNAKE_CASE_ : str = model_class(config=lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : List[str] = torch.jit.trace( lowerCAmelCase__ , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , 'traced_model.pt' ) ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.jit.load(os.path.join(lowerCAmelCase__ , 'traced_model.pt' ) , map_location=lowerCAmelCase__ ) loaded(inputs_dict['input_ids'].to(lowerCAmelCase__ ) , inputs_dict['attention_mask'].to(lowerCAmelCase__ ) ) @require_torch class __lowercase (unittest.TestCase ): """simple docstring""" @slow def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = DistilBertModel.from_pretrained('distilbert-base-uncased' ) SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] ) SCREAMING_SNAKE_CASE_ : Dict = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): SCREAMING_SNAKE_CASE_ : Any = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0] SCREAMING_SNAKE_CASE_ : int = torch.Size((1, 1_1, 7_6_8) ) self.assertEqual(output.shape , lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor( [[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase__ , atol=1E-4 ) )
162
0
def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" if not grid or not grid[0]: raise TypeError('''The grid does not contain the appropriate information''' ) for cell_n in range(1 , len(grid[0] ) ): grid[0][cell_n] += grid[0][cell_n - 1] lowercase__ = grid[0] for row_n in range(1 , len(SCREAMING_SNAKE_CASE ) ): lowercase__ = grid[row_n] lowercase__ = fill_row(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowercase__ = grid[row_n] return grid[-1][-1] def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" current_row[0] += row_above[0] for cell_n in range(1 , len(SCREAMING_SNAKE_CASE ) ): current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] ) return current_row if __name__ == "__main__": import doctest doctest.testmod()
110
import socket def _a ( ): """simple docstring""" lowercase__ = socket.socket(socket.AF_INET , socket.SOCK_STREAM ) lowercase__ = socket.gethostname() lowercase__ = 1_23_12 sock.connect((host, port) ) sock.send(B'''Hello server!''' ) with open('''Received_file''' , '''wb''' ) as out_file: print('''File opened''' ) print('''Receiving data...''' ) while True: lowercase__ = sock.recv(10_24 ) if not data: break out_file.write(SCREAMING_SNAKE_CASE ) print('''Successfully received the file''' ) sock.close() print('''Connection closed''' ) if __name__ == "__main__": main()
110
1
'''simple docstring''' from collections import defaultdict from pathlib import Path import pandas as pd from rouge_cli import calculate_rouge_path from utils import calculate_rouge lowercase : Tuple = [ "Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the" " final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe" " depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.", "The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal" " accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's" " founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the" " body.", "Amnesty International releases its annual report on the death penalty. The report catalogs the use of" " state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the" " world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital" " punishment.", ] lowercase : Dict = [ "Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ." " Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz" " had informed his Lufthansa training school of an episode of severe depression, airline says .", "Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ." " Israel and the United States opposed the move, which could open the door to war crimes investigations against" " Israelis .", "Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to" " death . Organization claims that governments around the world are using the threat of terrorism to advance" " executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death" " sentences up by 28% .", ] def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]: _snake_case = calculate_rouge(__A , __A , bootstrap_aggregation=__A , rouge_keys=['rouge2', 'rougeL'] ) assert isinstance(__A , __A ) _snake_case = calculate_rouge(__A , __A , bootstrap_aggregation=__A , rouge_keys=['rouge2'] ) assert ( pd.DataFrame(no_aggregation['rouge2'] ).fmeasure.mean() == pd.DataFrame(no_aggregation_just_ra['rouge2'] ).fmeasure.mean() ) def SCREAMING_SNAKE_CASE__ ( ) -> Dict: _snake_case = 'rougeLsum' _snake_case = calculate_rouge(__A , __A , newline_sep=__A , rouge_keys=[k] )[k] _snake_case = calculate_rouge(__A , __A , newline_sep=__A , rouge_keys=[k] )[k] assert score > score_no_sep def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]: _snake_case = ['rouge1', 'rouge2', 'rougeL'] _snake_case = calculate_rouge(__A , __A , newline_sep=__A , rouge_keys=__A ) _snake_case = calculate_rouge(__A , __A , newline_sep=__A , rouge_keys=__A ) assert score_sep == score_no_sep def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]: _snake_case = [ 'Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.', 'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .', ] _snake_case = [ 'Margot Frank, died in 1945, a month earlier than previously thought.', 'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of' ' the final seconds on board Flight 9525.', ] assert calculate_rouge(__A , __A , newline_sep=__A ) == calculate_rouge(__A , __A , newline_sep=__A ) def SCREAMING_SNAKE_CASE__ ( ) -> Any: _snake_case = [ '" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" ' ] _snake_case = [ ' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .' ] _snake_case = calculate_rouge(__A , __A , rouge_keys=['rougeLsum'] , newline_sep=__A )['rougeLsum'] _snake_case = calculate_rouge(__A , __A , rouge_keys=['rougeLsum'] )['rougeLsum'] assert new_score > prev_score def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]: _snake_case = Path('examples/seq2seq/test_data/wmt_en_ro' ) _snake_case = calculate_rouge_path(data_dir.joinpath('test.source' ) , data_dir.joinpath('test.target' ) ) assert isinstance(__A , __A ) _snake_case = calculate_rouge_path( data_dir.joinpath('test.source' ) , data_dir.joinpath('test.target' ) , bootstrap_aggregation=__A ) assert isinstance(__A , __A )
160
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging lowercase : Optional[Any] = logging.get_logger(__name__) lowercase : List[str] = { "Helsinki-NLP/opus-mt-en-de": "https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json", # See all Marian models at https://huggingface.co/models?filter=marian } class __UpperCAmelCase ( _lowerCamelCase ): __lowercase = """marian""" __lowercase = ["""past_key_values"""] __lowercase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""} def __init__( self , lowerCAmelCase_=5_81_01 , lowerCAmelCase_=None , lowerCAmelCase_=10_24 , lowerCAmelCase_=12 , lowerCAmelCase_=40_96 , lowerCAmelCase_=16 , lowerCAmelCase_=12 , lowerCAmelCase_=40_96 , lowerCAmelCase_=16 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_="gelu" , lowerCAmelCase_=10_24 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.02 , lowerCAmelCase_=5_81_00 , lowerCAmelCase_=False , lowerCAmelCase_=5_81_00 , lowerCAmelCase_=0 , lowerCAmelCase_=0 , lowerCAmelCase_=True , **lowerCAmelCase_ , ): """simple docstring""" _snake_case = vocab_size _snake_case = decoder_vocab_size or vocab_size _snake_case = max_position_embeddings _snake_case = d_model _snake_case = encoder_ffn_dim _snake_case = encoder_layers _snake_case = encoder_attention_heads _snake_case = decoder_ffn_dim _snake_case = decoder_layers _snake_case = decoder_attention_heads _snake_case = dropout _snake_case = attention_dropout _snake_case = activation_dropout _snake_case = activation_function _snake_case = init_std _snake_case = encoder_layerdrop _snake_case = decoder_layerdrop _snake_case = use_cache _snake_case = encoder_layers _snake_case = scale_embedding # scale factor will be sqrt(d_model) if True _snake_case = share_encoder_decoder_embeddings super().__init__( pad_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , decoder_start_token_id=lowerCAmelCase_ , forced_eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ , ) class __UpperCAmelCase ( _lowerCamelCase ): @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs def lowerCamelCase ( self ): """simple docstring""" if self.task in ["default", "seq2seq-lm"]: _snake_case = OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ] ) if self.use_past: _snake_case = {0: 'batch'} _snake_case = {0: 'batch', 1: 'past_decoder_sequence + sequence'} else: _snake_case = {0: 'batch', 1: 'decoder_sequence'} _snake_case = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(lowerCAmelCase_ , direction='inputs' ) elif self.task == "causal-lm": # TODO: figure this case out. _snake_case = OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ] ) if self.use_past: _snake_case , _snake_case = self.num_layers for i in range(lowerCAmelCase_ ): _snake_case = {0: 'batch', 2: 'past_sequence + sequence'} _snake_case = {0: 'batch', 2: 'past_sequence + sequence'} else: _snake_case = OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}), ('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}), ] ) return common_inputs @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs def lowerCamelCase ( self ): """simple docstring""" if self.task in ["default", "seq2seq-lm"]: _snake_case = super().outputs else: _snake_case = super(lowerCAmelCase_ , self ).outputs if self.use_past: _snake_case , _snake_case = self.num_layers for i in range(lowerCAmelCase_ ): _snake_case = {0: 'batch', 2: 'past_sequence + sequence'} _snake_case = {0: 'batch', 2: 'past_sequence + sequence'} return common_outputs def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = -1 , lowerCAmelCase_ = -1 , lowerCAmelCase_ = False , lowerCAmelCase_ = None , ): """simple docstring""" _snake_case = self._generate_dummy_inputs_for_encoder_and_decoder( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # Generate decoder inputs _snake_case = seq_length if not self.use_past else 1 _snake_case = self._generate_dummy_inputs_for_encoder_and_decoder( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = {F'decoder_{name}': tensor for name, tensor in decoder_inputs.items()} _snake_case = dict(**lowerCAmelCase_ , **lowerCAmelCase_ ) if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch _snake_case , _snake_case = common_inputs['input_ids'].shape _snake_case = common_inputs['decoder_input_ids'].shape[1] _snake_case , _snake_case = self.num_attention_heads _snake_case = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) _snake_case = decoder_seq_length + 3 _snake_case = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) _snake_case = torch.cat( [common_inputs['decoder_attention_mask'], torch.ones(lowerCAmelCase_ , lowerCAmelCase_ )] , dim=1 ) _snake_case = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered _snake_case , _snake_case = self.num_layers _snake_case = min(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = max(lowerCAmelCase_ , lowerCAmelCase_ ) - min_num_layers _snake_case = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder' for _ in range(lowerCAmelCase_ ): common_inputs["past_key_values"].append( ( torch.zeros(lowerCAmelCase_ ), torch.zeros(lowerCAmelCase_ ), torch.zeros(lowerCAmelCase_ ), torch.zeros(lowerCAmelCase_ ), ) ) # TODO: test this. _snake_case = encoder_shape if remaining_side_name == 'encoder' else decoder_shape for _ in range(lowerCAmelCase_ , lowerCAmelCase_ ): common_inputs["past_key_values"].append((torch.zeros(lowerCAmelCase_ ), torch.zeros(lowerCAmelCase_ )) ) return common_inputs def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = -1 , lowerCAmelCase_ = -1 , lowerCAmelCase_ = False , lowerCAmelCase_ = None , ): """simple docstring""" _snake_case = self._generate_dummy_inputs_for_encoder_and_decoder( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch _snake_case , _snake_case = common_inputs['input_ids'].shape # Not using the same length for past_key_values _snake_case = seqlen + 2 _snake_case , _snake_case = self.num_layers _snake_case , _snake_case = self.num_attention_heads _snake_case = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) _snake_case = common_inputs['attention_mask'].dtype _snake_case = torch.cat( [common_inputs['attention_mask'], torch.ones(lowerCAmelCase_ , lowerCAmelCase_ , dtype=lowerCAmelCase_ )] , dim=1 ) _snake_case = [ (torch.zeros(lowerCAmelCase_ ), torch.zeros(lowerCAmelCase_ )) for _ in range(lowerCAmelCase_ ) ] return common_inputs def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = -1 , lowerCAmelCase_ = -1 , lowerCAmelCase_ = False , lowerCAmelCase_ = None , ): """simple docstring""" _snake_case = compute_effective_axis_dimension( lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX _snake_case = tokenizer.num_special_tokens_to_add(lowerCAmelCase_ ) _snake_case = compute_effective_axis_dimension( lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase_ ) # Generate dummy inputs according to compute batch and sequence _snake_case = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size _snake_case = dict(tokenizer(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ ) ) return common_inputs def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = -1 , lowerCAmelCase_ = -1 , lowerCAmelCase_ = False , lowerCAmelCase_ = None , ): """simple docstring""" if self.task in ["default", "seq2seq-lm"]: _snake_case = self._generate_dummy_inputs_for_default_and_seqaseq_lm( lowerCAmelCase_ , batch_size=lowerCAmelCase_ , seq_length=lowerCAmelCase_ , is_pair=lowerCAmelCase_ , framework=lowerCAmelCase_ ) else: _snake_case = self._generate_dummy_inputs_for_causal_lm( lowerCAmelCase_ , batch_size=lowerCAmelCase_ , seq_length=lowerCAmelCase_ , is_pair=lowerCAmelCase_ , framework=lowerCAmelCase_ ) return common_inputs def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" if self.task in ["default", "seq2seq-lm"]: _snake_case = super()._flatten_past_key_values_(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) else: _snake_case = super(lowerCAmelCase_ , self )._flatten_past_key_values_( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) @property def lowerCamelCase ( self ): """simple docstring""" return 1E-4
160
1
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionSAGPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _lowerCAmelCase ( lowercase ,lowercase ,unittest.TestCase ): """simple docstring""" __UpperCAmelCase : List[str] = StableDiffusionSAGPipeline __UpperCAmelCase : Dict = TEXT_TO_IMAGE_PARAMS __UpperCAmelCase : int = TEXT_TO_IMAGE_BATCH_PARAMS __UpperCAmelCase : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS __UpperCAmelCase : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS __UpperCAmelCase : Optional[int] = False def _lowercase ( self : Any ): torch.manual_seed(0 ) __lowercase = UNetaDConditionModel( block_out_channels=(3_2, 6_4), layers_per_block=2, sample_size=3_2, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=3_2, ) __lowercase = DDIMScheduler( beta_start=0.00_085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=UpperCAmelCase__, set_alpha_to_one=UpperCAmelCase__, ) torch.manual_seed(0 ) __lowercase = AutoencoderKL( block_out_channels=[3_2, 6_4], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0 ) __lowercase = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=3_2, intermediate_size=3_7, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_0_0_0, ) __lowercase = CLIPTextModel(UpperCAmelCase__ ) __lowercase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) __lowercase = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def _lowercase ( self : Optional[int], UpperCAmelCase__ : str, UpperCAmelCase__ : Optional[int]=0 ): if str(UpperCAmelCase__ ).startswith("mps" ): __lowercase = torch.manual_seed(UpperCAmelCase__ ) else: __lowercase = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ ) __lowercase = { "prompt": ".", "generator": generator, "num_inference_steps": 2, "guidance_scale": 1.0, "sag_scale": 1.0, "output_type": "numpy", } return inputs def _lowercase ( self : Tuple ): super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def _lowercase ( self : List[Any] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase ( self : int ): __lowercase = StableDiffusionSAGPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" ) __lowercase = sag_pipe.to(UpperCAmelCase__ ) sag_pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) __lowercase = "." __lowercase = torch.manual_seed(0 ) __lowercase = sag_pipe( [prompt], generator=UpperCAmelCase__, guidance_scale=7.5, sag_scale=1.0, num_inference_steps=2_0, output_type="np" ) __lowercase = output.images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __lowercase = np.array([0.1_568, 0.1_738, 0.1_695, 0.1_693, 0.1_507, 0.1_705, 0.1_547, 0.1_751, 0.1_949] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2 def _lowercase ( self : Union[str, Any] ): __lowercase = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" ) __lowercase = sag_pipe.to(UpperCAmelCase__ ) sag_pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) __lowercase = "." __lowercase = torch.manual_seed(0 ) __lowercase = sag_pipe( [prompt], generator=UpperCAmelCase__, guidance_scale=7.5, sag_scale=1.0, num_inference_steps=2_0, output_type="np" ) __lowercase = output.images __lowercase = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __lowercase = np.array([0.3_459, 0.2_876, 0.2_537, 0.3_002, 0.2_671, 0.2_160, 0.3_026, 0.2_262, 0.2_371] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2 def _lowercase ( self : int ): __lowercase = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" ) __lowercase = sag_pipe.to(UpperCAmelCase__ ) sag_pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) __lowercase = "." __lowercase = torch.manual_seed(0 ) __lowercase = sag_pipe( [prompt], width=7_6_8, height=5_1_2, generator=UpperCAmelCase__, guidance_scale=7.5, sag_scale=1.0, num_inference_steps=2_0, output_type="np", ) __lowercase = output.images assert image.shape == (1, 5_1_2, 7_6_8, 3)
17
"""simple docstring""" import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin class _lowerCAmelCase ( unittest.TestCase ,lowercase ): """simple docstring""" def _lowercase ( self : List[Any] ): __lowercase = load_tool("text-classification" ) self.tool.setup() __lowercase = load_tool("text-classification", remote=UpperCAmelCase__ ) def _lowercase ( self : str ): __lowercase = self.tool("That's quite cool", ["positive", "negative"] ) self.assertEqual(UpperCAmelCase__, "positive" ) def _lowercase ( self : str ): __lowercase = self.remote_tool("That's quite cool", ["positive", "negative"] ) self.assertEqual(UpperCAmelCase__, "positive" ) def _lowercase ( self : List[str] ): __lowercase = self.tool(text="That's quite cool", labels=["positive", "negative"] ) self.assertEqual(UpperCAmelCase__, "positive" ) def _lowercase ( self : Tuple ): __lowercase = self.remote_tool(text="That's quite cool", labels=["positive", "negative"] ) self.assertEqual(UpperCAmelCase__, "positive" )
17
1
from manim import * class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ): """simple docstring""" def _UpperCamelCase ( self ) -> str: SCREAMING_SNAKE_CASE_ = Rectangle(height=0.5 , width=0.5 ) SCREAMING_SNAKE_CASE_ = Rectangle(height=0.25 , width=0.25 ) SCREAMING_SNAKE_CASE_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) SCREAMING_SNAKE_CASE_ = [mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE_ = [mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE_ = VGroup(*_A ).arrange(_A , buff=0 ) SCREAMING_SNAKE_CASE_ = VGroup(*_A ).arrange(_A , buff=0 ) SCREAMING_SNAKE_CASE_ = VGroup(_A , _A ).arrange(_A , buff=0 ) SCREAMING_SNAKE_CASE_ = Text('''CPU''' , font_size=24 ) SCREAMING_SNAKE_CASE_ = Group(_A , _A ).arrange(_A , buff=0.5 , aligned_edge=_A ) cpu.move_to([-2.5, -0.5, 0] ) self.add(_A ) SCREAMING_SNAKE_CASE_ = [mem.copy() for i in range(4 )] SCREAMING_SNAKE_CASE_ = VGroup(*_A ).arrange(_A , buff=0 ) SCREAMING_SNAKE_CASE_ = Text('''GPU''' , font_size=24 ) SCREAMING_SNAKE_CASE_ = Group(_A , _A ).arrange(_A , buff=0.5 , aligned_edge=_A ) gpu.move_to([-1, -1, 0] ) self.add(_A ) SCREAMING_SNAKE_CASE_ = [mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE_ = VGroup(*_A ).arrange(_A , buff=0 ) SCREAMING_SNAKE_CASE_ = Text('''Model''' , font_size=24 ) SCREAMING_SNAKE_CASE_ = Group(_A , _A ).arrange(_A , buff=0.5 , aligned_edge=_A ) model.move_to([3, -1.0, 0] ) self.add(_A ) SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = [] for i, rect in enumerate(_A ): rect.set_stroke(_A ) SCREAMING_SNAKE_CASE_ = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_A , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_A ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(model_cpu_arr[0] , direction=_A , buff=0.0 ) else: cpu_target.next_to(model_cpu_arr[i - 1] , direction=_A , buff=0.0 ) self.add(_A ) model_cpu_arr.append(_A ) self.add(*_A , *_A , *_A ) SCREAMING_SNAKE_CASE_ = [mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE_ = VGroup(*_A ).arrange(_A , buff=0 ) SCREAMING_SNAKE_CASE_ = Text('''Loaded Checkpoint''' , font_size=24 ) SCREAMING_SNAKE_CASE_ = Group(_A , _A ).arrange(_A , buff=0.5 , aligned_edge=_A ) checkpoint.move_to([3, 0.5, 0] ) self.add(_A ) SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = [] for i, rect in enumerate(_A ): SCREAMING_SNAKE_CASE_ = fill.copy().set_fill(_A , opacity=0.7 ) target.move_to(_A ) ckpt_arr.append(_A ) SCREAMING_SNAKE_CASE_ = target.copy() if i < 5: cpu_target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.move_to(cpu_right_col_base[i - 5] ) ckpt_cpu_arr.append(_A ) self.add(*_A , *_A ) SCREAMING_SNAKE_CASE_ = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) SCREAMING_SNAKE_CASE_ = MarkupText( F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(_A , _A ) SCREAMING_SNAKE_CASE_ = MarkupText( F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , ) blue_text.next_to(_A , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(_A ) SCREAMING_SNAKE_CASE_ = MarkupText( F'''Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.''' , font_size=24 , ) step_a.move_to([2, 2, 0] ) SCREAMING_SNAKE_CASE_ = [meta_mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE_ = [meta_mem.copy() for i in range(6 )] SCREAMING_SNAKE_CASE_ = VGroup(*_A ).arrange(_A , buff=0 ) SCREAMING_SNAKE_CASE_ = VGroup(*_A ).arrange(_A , buff=0 ) SCREAMING_SNAKE_CASE_ = VGroup(_A , _A ).arrange(_A , buff=0 ) SCREAMING_SNAKE_CASE_ = Text('''Disk''' , font_size=24 ) SCREAMING_SNAKE_CASE_ = Group(_A , _A ).arrange(_A , buff=0.5 , aligned_edge=_A ) disk.move_to([-4.0, -1.25, 0] ) self.play(Write(_A , run_time=3 ) , Write(_A , run_time=1 ) , Create(_A , run_time=1 ) ) SCREAMING_SNAKE_CASE_ = [] for i, rect in enumerate(_A ): SCREAMING_SNAKE_CASE_ = rect.copy() target.generate_target() target.target.move_to(disk_left_col_base[i] ).scale(0.5 ) animations.append(MoveToTarget(_A , run_time=1.5 ) ) self.play(*_A ) self.play(FadeOut(_A ) ) SCREAMING_SNAKE_CASE_ = MarkupText(F'''Then, the checkpoint is removed from memory\nthrough garbage collection.''' , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(_A , run_time=3 ) ) self.play( FadeOut(_A , _A , *_A , *_A ) , ) self.wait()
257
__UpperCAmelCase = [ (10_00, "M"), (9_00, "CM"), (5_00, "D"), (4_00, "CD"), (1_00, "C"), (90, "XC"), (50, "L"), (40, "XL"), (10, "X"), (9, "IX"), (5, "V"), (4, "IV"), (1, "I"), ] def A__ ( __lowerCamelCase ): SCREAMING_SNAKE_CASE_ = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 1_00, '''D''': 5_00, '''M''': 10_00} SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = 0 while place < len(__lowerCamelCase ): if (place + 1 < len(__lowerCamelCase )) and (vals[roman[place]] < vals[roman[place + 1]]): total += vals[roman[place + 1]] - vals[roman[place]] place += 2 else: total += vals[roman[place]] place += 1 return total def A__ ( __lowerCamelCase ): SCREAMING_SNAKE_CASE_ = [] for arabic, roman in ROMAN: ((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)) = divmod(__lowerCamelCase, __lowerCamelCase ) result.append(roman * factor ) if number == 0: break return "".join(__lowerCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
257
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) _SCREAMING_SNAKE_CASE : int = { "configuration_clip": [ "CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "CLIPConfig", "CLIPOnnxConfig", "CLIPTextConfig", "CLIPVisionConfig", ], "processing_clip": ["CLIPProcessor"], "tokenization_clip": ["CLIPTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE : Optional[int] = ["CLIPTokenizerFast"] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE : Optional[Any] = ["CLIPFeatureExtractor"] _SCREAMING_SNAKE_CASE : Dict = ["CLIPImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE : List[Any] = [ "CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "CLIPModel", "CLIPPreTrainedModel", "CLIPTextModel", "CLIPTextModelWithProjection", "CLIPVisionModel", "CLIPVisionModelWithProjection", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE : List[str] = [ "TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "TFCLIPModel", "TFCLIPPreTrainedModel", "TFCLIPTextModel", "TFCLIPVisionModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE : Any = [ "FlaxCLIPModel", "FlaxCLIPPreTrainedModel", "FlaxCLIPTextModel", "FlaxCLIPTextPreTrainedModel", "FlaxCLIPVisionModel", "FlaxCLIPVisionPreTrainedModel", ] if TYPE_CHECKING: from .configuration_clip import ( CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPConfig, CLIPOnnxConfig, CLIPTextConfig, CLIPVisionConfig, ) from .processing_clip import CLIPProcessor from .tokenization_clip import CLIPTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_clip_fast import CLIPTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clip import CLIPFeatureExtractor from .image_processing_clip import CLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clip import ( CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPModel, CLIPPreTrainedModel, CLIPTextModel, CLIPTextModelWithProjection, CLIPVisionModel, CLIPVisionModelWithProjection, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_clip import ( TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFCLIPModel, TFCLIPPreTrainedModel, TFCLIPTextModel, TFCLIPVisionModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_clip import ( FlaxCLIPModel, FlaxCLIPPreTrainedModel, FlaxCLIPTextModel, FlaxCLIPTextPreTrainedModel, FlaxCLIPVisionModel, FlaxCLIPVisionPreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
85
from jiwer import compute_measures import datasets lowerCAmelCase : Tuple = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n' lowerCAmelCase : List[Any] = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n' lowerCAmelCase : Dict = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class _A ( datasets.Metric): def UpperCAmelCase ( self ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Value('string' , id='sequence' ), } ) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[ 'https://en.wikipedia.org/wiki/Word_error_rate', ] , ) def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False ): """simple docstring""" if concatenate_texts: return compute_measures(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )["wer"] else: SCREAMING_SNAKE_CASE_ : str = 0 SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0 for prediction, reference in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ : str = compute_measures(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
253
0
"""simple docstring""" from __future__ import annotations class UpperCAmelCase : """simple docstring""" def __init__( self , _UpperCAmelCase ): lowercase__: List[str] = TypeError( '''Matrices must be formed from a list of zero or more lists containing at ''' '''least one and the same number of values, each of which must be of type ''' '''int or float.''' ) if len(_SCREAMING_SNAKE_CASE ) != 0: lowercase__: Union[str, Any] = len(rows[0] ) if cols == 0: raise error for row in rows: if len(_SCREAMING_SNAKE_CASE ) != cols: raise error for value in row: if not isinstance(_SCREAMING_SNAKE_CASE , (int, float) ): raise error lowercase__: int = rows else: lowercase__: Union[str, Any] = [] def _snake_case ( self ): return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )] @property def _snake_case ( self ): return len(self.rows ) @property def _snake_case ( self ): return len(self.rows[0] ) @property def _snake_case ( self ): return (self.num_rows, self.num_columns) @property def _snake_case ( self ): return self.order[0] == self.order[1] def _snake_case ( self ): lowercase__: Optional[Any] = [ [0 if column_num != row_num else 1 for column_num in range(self.num_rows )] for row_num in range(self.num_rows ) ] return Matrix(_SCREAMING_SNAKE_CASE ) def _snake_case ( self ): if not self.is_square: return 0 if self.order == (0, 0): return 1 if self.order == (1, 1): return int(self.rows[0][0] ) if self.order == (2, 2): return int( (self.rows[0][0] * self.rows[1][1]) - (self.rows[0][1] * self.rows[1][0]) ) else: return sum( self.rows[0][column] * self.cofactors().rows[0][column] for column in range(self.num_columns ) ) def _snake_case ( self ): return bool(self.determinant() ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: Tuple = [ [ self.rows[other_row][other_column] for other_column in range(self.num_columns ) if other_column != column ] for other_row in range(self.num_rows ) if other_row != row ] return Matrix(_SCREAMING_SNAKE_CASE ).determinant() def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase ): if (row + column) % 2 == 0: return self.get_minor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return -1 * self.get_minor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def _snake_case ( self ): return Matrix( [ [self.get_minor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for column in range(self.num_columns )] for row in range(self.num_rows ) ] ) def _snake_case ( self ): return Matrix( [ [ self.minors().rows[row][column] if (row + column) % 2 == 0 else self.minors().rows[row][column] * -1 for column in range(self.minors().num_columns ) ] for row in range(self.minors().num_rows ) ] ) def _snake_case ( self ): lowercase__: Optional[int] = [ [self.cofactors().rows[column][row] for column in range(self.num_columns )] for row in range(self.num_rows ) ] return Matrix(_SCREAMING_SNAKE_CASE ) def _snake_case ( self ): lowercase__: Dict = self.determinant() if not determinant: raise TypeError('''Only matrices with a non-zero determinant have an inverse''' ) return self.adjugate() * (1 / determinant) def __repr__( self ): return str(self.rows ) def __str__( self ): if self.num_rows == 0: return "[]" if self.num_rows == 1: return "[[" + ". ".join(str(self.rows[0] ) ) + "]]" return ( "[" + "\n ".join( [ '''[''' + '''. '''.join([str(_SCREAMING_SNAKE_CASE ) for value in row] ) + '''.]''' for row in self.rows ] ) + "]" ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ): lowercase__: Optional[int] = TypeError('''Row must be a list containing all ints and/or floats''' ) if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise type_error for value in row: if not isinstance(_SCREAMING_SNAKE_CASE , (int, float) ): raise type_error if len(_SCREAMING_SNAKE_CASE ) != self.num_columns: raise ValueError( '''Row must be equal in length to the other rows in the matrix''' ) if position is None: self.rows.append(_SCREAMING_SNAKE_CASE ) else: lowercase__: List[str] = self.rows[0:position] + [row] + self.rows[position:] def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ): lowercase__: Any = TypeError( '''Column must be a list containing all ints and/or floats''' ) if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise type_error for value in column: if not isinstance(_SCREAMING_SNAKE_CASE , (int, float) ): raise type_error if len(_SCREAMING_SNAKE_CASE ) != self.num_rows: raise ValueError( '''Column must be equal in length to the other columns in the matrix''' ) if position is None: lowercase__: Optional[Any] = [self.rows[i] + [column[i]] for i in range(self.num_rows )] else: lowercase__: Optional[Any] = [ self.rows[i][0:position] + [column[i]] + self.rows[i][position:] for i in range(self.num_rows ) ] def __eq__( self , _UpperCAmelCase ): if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): return NotImplemented return self.rows == other.rows def __ne__( self , _UpperCAmelCase ): return not self == other def __neg__( self ): return self * -1 def __add__( self , _UpperCAmelCase ): if self.order != other.order: raise ValueError('''Addition requires matrices of the same order''' ) return Matrix( [ [self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )] for i in range(self.num_rows ) ] ) def __sub__( self , _UpperCAmelCase ): if self.order != other.order: raise ValueError('''Subtraction requires matrices of the same order''' ) return Matrix( [ [self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )] for i in range(self.num_rows ) ] ) def __mul__( self , _UpperCAmelCase ): if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ): return Matrix( [[int(element * other ) for element in row] for row in self.rows] ) elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if self.num_columns != other.num_rows: raise ValueError( '''The number of columns in the first matrix must ''' '''be equal to the number of rows in the second''' ) return Matrix( [ [Matrix.dot_product(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for column in other.columns()] for row in self.rows ] ) else: raise TypeError( '''A Matrix can only be multiplied by an int, float, or another matrix''' ) def __pow__( self , _UpperCAmelCase ): if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise TypeError('''A Matrix can only be raised to the power of an int''' ) if not self.is_square: raise ValueError('''Only square matrices can be raised to a power''' ) if other == 0: return self.identity() if other < 0: if self.is_invertable(): return self.inverse() ** (-other) raise ValueError( '''Only invertable matrices can be raised to a negative power''' ) lowercase__: str = self for _ in range(other - 1 ): result *= self return result @classmethod def _snake_case ( cls , _UpperCAmelCase , _UpperCAmelCase ): return sum(row[i] * column[i] for i in range(len(_SCREAMING_SNAKE_CASE ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
370
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { "microsoft/unispeech-sat-base-100h-libri-ft": ( "https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json" ), # See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat } class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :Any = "unispeech-sat" def __init__( self , _UpperCAmelCase=32 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-5 , _UpperCAmelCase="group" , _UpperCAmelCase="gelu" , _UpperCAmelCase=(512, 512, 512, 512, 512, 512, 512) , _UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , _UpperCAmelCase=(10, 3, 3, 3, 3, 2, 2) , _UpperCAmelCase=False , _UpperCAmelCase=128 , _UpperCAmelCase=16 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=0.05 , _UpperCAmelCase=10 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0 , _UpperCAmelCase=10 , _UpperCAmelCase=0 , _UpperCAmelCase=320 , _UpperCAmelCase=2 , _UpperCAmelCase=0.1 , _UpperCAmelCase=100 , _UpperCAmelCase=256 , _UpperCAmelCase=256 , _UpperCAmelCase=0.1 , _UpperCAmelCase="mean" , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=256 , _UpperCAmelCase=(512, 512, 512, 512, 1500) , _UpperCAmelCase=(5, 3, 3, 1, 1) , _UpperCAmelCase=(1, 2, 3, 1, 1) , _UpperCAmelCase=512 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=504 , **_UpperCAmelCase , ): super().__init__(**_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase ) lowercase__: Union[str, Any] = hidden_size lowercase__: Union[str, Any] = feat_extract_norm lowercase__: Any = feat_extract_activation lowercase__: List[Any] = list(_UpperCAmelCase ) lowercase__: Optional[int] = list(_UpperCAmelCase ) lowercase__: int = list(_UpperCAmelCase ) lowercase__: Any = conv_bias lowercase__: List[str] = num_conv_pos_embeddings lowercase__: List[str] = num_conv_pos_embedding_groups lowercase__: int = len(self.conv_dim ) lowercase__: Dict = num_hidden_layers lowercase__: List[Any] = intermediate_size lowercase__: Dict = hidden_act lowercase__: Optional[Any] = num_attention_heads lowercase__: Union[str, Any] = hidden_dropout lowercase__: List[Any] = attention_dropout lowercase__: str = activation_dropout lowercase__: Optional[Any] = feat_proj_dropout lowercase__: Optional[int] = final_dropout lowercase__: Any = layerdrop lowercase__: int = layer_norm_eps lowercase__: Any = initializer_range lowercase__: Union[str, Any] = vocab_size lowercase__: Optional[Any] = num_clusters lowercase__: Dict = do_stable_layer_norm lowercase__: List[str] = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,""" F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowercase__: Dict = apply_spec_augment lowercase__: Union[str, Any] = mask_time_prob lowercase__: List[str] = mask_time_length lowercase__: Union[str, Any] = mask_time_min_masks lowercase__: str = mask_feature_prob lowercase__: Dict = mask_feature_length lowercase__: List[Any] = mask_feature_min_masks # parameters for pretraining with codevector quantized representations lowercase__: Tuple = num_codevectors_per_group lowercase__: Optional[Any] = num_codevector_groups lowercase__: int = contrastive_logits_temperature lowercase__: Any = feat_quantizer_dropout lowercase__: int = num_negatives lowercase__: Optional[Any] = codevector_dim lowercase__: int = proj_codevector_dim lowercase__: str = diversity_loss_weight # ctc loss lowercase__: int = ctc_loss_reduction lowercase__: Union[str, Any] = ctc_zero_infinity # SequenceClassification-specific parameter. Feel free to ignore for other classes. lowercase__: Optional[Any] = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. lowercase__: Union[str, Any] = list(_UpperCAmelCase ) lowercase__: Tuple = list(_UpperCAmelCase ) lowercase__: Union[str, Any] = list(_UpperCAmelCase ) lowercase__: Tuple = xvector_output_dim @property def _snake_case ( self ): return functools.reduce(operator.mul , self.conv_stride , 1 )
2
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _UpperCAmelCase : int = { """configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Any = [ """NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""", """NezhaForNextSentencePrediction""", """NezhaForMaskedLM""", """NezhaForPreTraining""", """NezhaForMultipleChoice""", """NezhaForQuestionAnswering""", """NezhaForSequenceClassification""", """NezhaForTokenClassification""", """NezhaModel""", """NezhaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nezha import ( NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, NezhaPreTrainedModel, ) else: import sys _UpperCAmelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
50
import colorsys from PIL import Image # type: ignore def lowerCamelCase__ ( a__ : float , a__ : float , a__ : int ) -> float: UpperCamelCase_ = x UpperCamelCase_ = y for step in range(a__ ): # noqa: B007 UpperCamelCase_ = a * a - b * b + x UpperCamelCase_ = 2 * a * b + y UpperCamelCase_ = a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def lowerCamelCase__ ( a__ : float ) -> tuple: if distance == 1: return (0, 0, 0) else: return (255, 255, 255) def lowerCamelCase__ ( a__ : float ) -> tuple: if distance == 1: return (0, 0, 0) else: return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(a__ , 1 , 1 ) ) def lowerCamelCase__ ( a__ : int = 800 , a__ : int = 600 , a__ : float = -0.6 , a__ : float = 0 , a__ : float = 3.2 , a__ : int = 50 , a__ : bool = True , ) -> Image.Image: UpperCamelCase_ = Image.new("""RGB""" , (image_width, image_height) ) UpperCamelCase_ = img.load() # loop through the image-coordinates for image_x in range(a__ ): for image_y in range(a__ ): # determine the figure-coordinates based on the image-coordinates UpperCamelCase_ = figure_width / image_width * image_height UpperCamelCase_ = figure_center_x + (image_x / image_width - 0.5) * figure_width UpperCamelCase_ = figure_center_y + (image_y / image_height - 0.5) * figure_height UpperCamelCase_ = get_distance(a__ , a__ , a__ ) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: UpperCamelCase_ = get_color_coded_rgb(a__ ) else: UpperCamelCase_ = get_black_and_white_rgb(a__ ) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure _A = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
122
0
"""simple docstring""" from manim import * class lowerCAmelCase__ ( __snake_case ): def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : int = Rectangle(height=0.5 , width=0.5 ) UpperCAmelCase__ : Optional[int] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) UpperCAmelCase__ : List[str] = [mem.copy() for i in range(6 )] UpperCAmelCase__ : Tuple = [mem.copy() for i in range(6 )] UpperCAmelCase__ : int = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) UpperCAmelCase__ : str = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) UpperCAmelCase__ : int = VGroup(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) UpperCAmelCase__ : str = Text("CPU" , font_size=2_4 ) UpperCAmelCase__ : str = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(lowerCamelCase_ ) UpperCAmelCase__ : Any = [mem.copy() for i in range(1 )] UpperCAmelCase__ : List[Any] = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) UpperCAmelCase__ : Optional[Any] = Text("GPU" , font_size=2_4 ) UpperCAmelCase__ : Dict = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ ) gpu.align_to(lowerCamelCase_ , lowerCamelCase_ ) gpu.set_x(gpu.get_x() - 1 ) self.add(lowerCamelCase_ ) UpperCAmelCase__ : Optional[Any] = [mem.copy() for i in range(6 )] UpperCAmelCase__ : Any = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) UpperCAmelCase__ : List[Any] = Text("Model" , font_size=2_4 ) UpperCAmelCase__ : Optional[int] = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ ) model.move_to([3, -1.0, 0] ) self.play( Create(lowerCamelCase_ , run_time=1 ) , Create(lowerCamelCase_ , run_time=1 ) , Create(lowerCamelCase_ , run_time=1 ) , ) UpperCAmelCase__ : Optional[int] = MarkupText( f'First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.' , font_size=2_4 , ) UpperCAmelCase__ : Tuple = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) UpperCAmelCase__ : Union[str, Any] = MarkupText( f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=1_8 , ) key_text.move_to([-5, 2.4, 0] ) step_a.move_to([2, 2, 0] ) self.play(Write(lowerCamelCase_ , run_time=2.5 ) , Write(lowerCamelCase_ ) , Write(lowerCamelCase_ ) ) self.add(lowerCamelCase_ ) UpperCAmelCase__ : List[Any] = [] UpperCAmelCase__ : Any = [] UpperCAmelCase__ : Optional[Any] = [] for i, rect in enumerate(lowerCamelCase_ ): UpperCAmelCase__ : Optional[int] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase_ , opacity=0.7 ) cpu_target.move_to(lowerCamelCase_ ) cpu_target.generate_target() UpperCAmelCase__ : Any = 0.46 / 4 UpperCAmelCase__ : str = 0.46 / 3 if i == 0: cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowerCamelCase_ ) cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 ) elif i == 3: cpu_target.target.next_to(cpu_targs[0].target , direction=lowerCamelCase_ , buff=0.0 ) else: cpu_target.target.next_to(cpu_targs[i - 1].target , direction=lowerCamelCase_ , buff=0.0 ) cpu_targs.append(lowerCamelCase_ ) first_animations.append(rect.animate(run_time=0.5 ).set_stroke(lowerCamelCase_ ) ) second_animations.append(MoveToTarget(lowerCamelCase_ , run_time=1.5 ) ) self.play(*lowerCamelCase_ ) self.play(*lowerCamelCase_ ) self.wait()
365
"""simple docstring""" import importlib import os import fsspec import pytest from fsspec import register_implementation from fsspec.registry import _registry as _fsspec_registry from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem from .utils import require_lza, require_zstandard def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] )-> Any: '''simple docstring''' assert "mock" in _fsspec_registry assert "bz2" in _fsspec_registry def SCREAMING_SNAKE_CASE__ ( )-> List[Any]: '''simple docstring''' assert "mock" not in _fsspec_registry assert "bz2" in _fsspec_registry def SCREAMING_SNAKE_CASE__ ( )-> Optional[int]: '''simple docstring''' UpperCAmelCase__ : int = "mock-s3-bucket" UpperCAmelCase__ : Any = f's3://{mock_bucket}' UpperCAmelCase__ : Tuple = extract_path_from_uri(snake_case ) assert dataset_path.startswith("s3://" ) is False UpperCAmelCase__ : str = "./local/path" UpperCAmelCase__ : Union[str, Any] = extract_path_from_uri(snake_case ) assert dataset_path == new_dataset_path def SCREAMING_SNAKE_CASE__ ( snake_case : Any )-> str: '''simple docstring''' UpperCAmelCase__ : Optional[Any] = is_remote_filesystem(snake_case ) assert is_remote is True UpperCAmelCase__ : str = fsspec.filesystem("file" ) UpperCAmelCase__ : Optional[Any] = is_remote_filesystem(snake_case ) assert is_remote is False @pytest.mark.parametrize("compression_fs_class" , snake_case ) def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] , snake_case : Any , snake_case : List[str] , snake_case : Optional[int] , snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : int )-> int: '''simple docstring''' UpperCAmelCase__ : Optional[Any] = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bza_file, "lz4": lza_file} UpperCAmelCase__ : Dict = input_paths[compression_fs_class.protocol] if input_path is None: UpperCAmelCase__ : Optional[Any] = f'for \'{compression_fs_class.protocol}\' compression protocol, ' if compression_fs_class.protocol == "lz4": reason += require_lza.kwargs["reason"] elif compression_fs_class.protocol == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(snake_case ) UpperCAmelCase__ : Optional[Any] = fsspec.filesystem(compression_fs_class.protocol , fo=snake_case ) assert isinstance(snake_case , snake_case ) UpperCAmelCase__ : Union[str, Any] = os.path.basename(snake_case ) UpperCAmelCase__ : Optional[int] = expected_filename[: expected_filename.rindex("." )] assert fs.glob("*" ) == [expected_filename] with fs.open(snake_case , "r" , encoding="utf-8" ) as f, open(snake_case , encoding="utf-8" ) as expected_file: assert f.read() == expected_file.read() @pytest.mark.parametrize("protocol" , ["zip", "gzip"] ) def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple , snake_case : Dict , snake_case : Tuple )-> Optional[Any]: '''simple docstring''' UpperCAmelCase__ : List[str] = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path} UpperCAmelCase__ : int = compressed_file_paths[protocol] UpperCAmelCase__ : Any = "dataset.jsonl" UpperCAmelCase__ : Any = f'{protocol}://{member_file_path}::{compressed_file_path}' UpperCAmelCase__ , *UpperCAmelCase__ : Optional[int] = fsspec.get_fs_token_paths(snake_case ) assert fs.isfile(snake_case ) assert not fs.isfile("non_existing_" + member_file_path ) @pytest.mark.integration def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : Dict , snake_case : Dict , snake_case : Dict )-> str: '''simple docstring''' UpperCAmelCase__ : Optional[int] = hf_api.dataset_info(snake_case , token=snake_case ) UpperCAmelCase__ : str = HfFileSystem(repo_info=snake_case , token=snake_case ) assert sorted(hffs.glob("*" ) ) == [".gitattributes", "data"] assert hffs.isdir("data" ) assert hffs.isfile(".gitattributes" ) and hffs.isfile("data/text_data.txt" ) with open(snake_case ) as f: assert hffs.open("data/text_data.txt" , "r" ).read() == f.read() def SCREAMING_SNAKE_CASE__ ( )-> Union[str, Any]: '''simple docstring''' UpperCAmelCase__ : Tuple = "bz2" # Import module import datasets.filesystems # Overwrite protocol and reload register_implementation(snake_case , snake_case , clobber=snake_case ) with pytest.warns(snake_case ) as warning_info: importlib.reload(datasets.filesystems ) assert len(snake_case ) == 1 assert ( str(warning_info[0].message ) == f'A filesystem protocol was already set for {protocol} and will be overwritten.' )
298
0
"""simple docstring""" def __SCREAMING_SNAKE_CASE ( A_ = 10_00 ): return sum(e for e in range(3 , A_ ) if e % 3 == 0 or e % 5 == 0 ) if __name__ == "__main__": print(F'''{solution() = }''')
106
from importlib import import_module from .logging import get_logger _snake_case : Optional[int] = get_logger(__name__) class a : """simple docstring""" def __init__( self : List[str] , lowerCamelCase : Optional[Any] , lowerCamelCase : List[str]=None ) -> Any: __snake_case : Dict = attrs or [] if module is not None: for key in module.__dict__: if key in attrs or not key.startswith("__" ): setattr(self , lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase ) ) __snake_case : int = module._original_module if isinstance(lowerCamelCase , _PatchedModuleObj ) else module class a : """simple docstring""" __UpperCAmelCase : List[Any] = [] def __init__( self : List[Any] , lowerCamelCase : Any , lowerCamelCase : str , lowerCamelCase : Dict , lowerCamelCase : Optional[Any]=None ) -> List[Any]: __snake_case : Union[str, Any] = obj __snake_case : Dict = target __snake_case : Any = new __snake_case : List[str] = target.split("." )[0] __snake_case : Union[str, Any] = {} __snake_case : int = attrs or [] def __enter__( self : List[Any] ) -> Tuple: *__snake_case , __snake_case : int = self.target.split("." ) # Patch modules: # it's used to patch attributes of submodules like "os.path.join"; # in this case we need to patch "os" and "os.path" for i in range(len(lowerCamelCase ) ): try: __snake_case : Any = import_module(".".join(submodules[: i + 1] ) ) except ModuleNotFoundError: continue # We iterate over all the globals in self.obj in case we find "os" or "os.path" for attr in self.obj.__dir__(): __snake_case : Union[str, Any] = getattr(self.obj , lowerCamelCase ) # We don't check for the name of the global, but rather if its value *is* "os" or "os.path". # This allows to patch renamed modules like "from os import path as ospath". if obj_attr is submodule or ( (isinstance(lowerCamelCase , _PatchedModuleObj ) and obj_attr._original_module is submodule) ): __snake_case : List[Any] = obj_attr # patch at top level setattr(self.obj , lowerCamelCase , _PatchedModuleObj(lowerCamelCase , attrs=self.attrs ) ) __snake_case : Optional[int] = getattr(self.obj , lowerCamelCase ) # construct lower levels patches for key in submodules[i + 1 :]: setattr(lowerCamelCase , lowerCamelCase , _PatchedModuleObj(getattr(lowerCamelCase , lowerCamelCase , lowerCamelCase ) , attrs=self.attrs ) ) __snake_case : List[Any] = getattr(lowerCamelCase , lowerCamelCase ) # finally set the target attribute setattr(lowerCamelCase , lowerCamelCase , self.new ) # Patch attribute itself: # it's used for builtins like "open", # and also to patch "os.path.join" we may also need to patch "join" # itself if it was imported as "from os.path import join". if submodules: # if it's an attribute of a submodule like "os.path.join" try: __snake_case : Union[str, Any] = getattr(import_module(".".join(lowerCamelCase ) ) , lowerCamelCase ) except (AttributeError, ModuleNotFoundError): return # We iterate over all the globals in self.obj in case we find "os.path.join" for attr in self.obj.__dir__(): # We don't check for the name of the global, but rather if its value *is* "os.path.join". # This allows to patch renamed attributes like "from os.path import join as pjoin". if getattr(self.obj , lowerCamelCase ) is attr_value: __snake_case : Tuple = getattr(self.obj , lowerCamelCase ) setattr(self.obj , lowerCamelCase , self.new ) elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open" __snake_case : Dict = globals()["__builtins__"][target_attr] setattr(self.obj , lowerCamelCase , self.new ) else: raise RuntimeError(F'Tried to patch attribute {target_attr} instead of a submodule.' ) def __exit__( self : Any , *lowerCamelCase : Any ) -> Optional[int]: for attr in list(self.original ): setattr(self.obj , lowerCamelCase , self.original.pop(lowerCamelCase ) ) def __snake_case ( self : Optional[Any] ) -> Optional[int]: self.__enter__() self._active_patches.append(self ) def __snake_case ( self : Any ) -> List[str]: try: self._active_patches.remove(self ) except ValueError: # If the patch hasn't been started this will fail return None return self.__exit__()
123
0
'''simple docstring''' import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import faiss import torch from datasets import Features, Sequence, Value, load_dataset from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser lowerCamelCase__ = logging.getLogger(__name__) torch.set_grad_enabled(False) lowerCamelCase__ = 'cuda' if torch.cuda.is_available() else 'cpu' def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase=100 , __lowerCAmelCase=" " ): _UpperCAmelCase : Any = text.split(__lowerCAmelCase ) return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase )] def __lowerCAmelCase (__lowerCAmelCase ): _UpperCAmelCase , _UpperCAmelCase : Dict = [], [] for title, text in zip(documents["title"] , documents["text"] ): if text is not None: for passage in split_text(__lowerCAmelCase ): titles.append(title if title is not None else "" ) texts.append(__lowerCAmelCase ) return {"title": titles, "text": texts} def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): _UpperCAmelCase : str = ctx_tokenizer( documents["title"] , documents["text"] , truncation=__lowerCAmelCase , padding="longest" , return_tensors="pt" )["input_ids"] _UpperCAmelCase : str = ctx_encoder(input_ids.to(device=__lowerCAmelCase ) , return_dict=__lowerCAmelCase ).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ): ###################################### logger.info("Step 1 - Create the dataset" ) ###################################### # The dataset needed for RAG must have three columns: # - title (string): title of the document # - text (string): text of a passage of the document # - embeddings (array of dimension d): DPR representation of the passage # Let's say you have documents in tab-separated csv files with columns "title" and "text" assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file" # You can load a Dataset object this way _UpperCAmelCase : Optional[int] = load_dataset( "csv" , data_files=[rag_example_args.csv_path] , split="train" , delimiter="\t" , column_names=["title", "text"] ) # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files # Then split the documents into passages of 100 words _UpperCAmelCase : Optional[int] = dataset.map(__lowerCAmelCase , batched=__lowerCAmelCase , num_proc=processing_args.num_proc ) # And compute the embeddings _UpperCAmelCase : Union[str, Any] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__lowerCAmelCase ) _UpperCAmelCase : Optional[int] = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ) _UpperCAmelCase : Dict = Features( {"text": Value("string" ), "title": Value("string" ), "embeddings": Sequence(Value("float32" ) )} ) # optional, save as float32 instead of float64 to save space _UpperCAmelCase : int = dataset.map( partial(__lowerCAmelCase , ctx_encoder=__lowerCAmelCase , ctx_tokenizer=__lowerCAmelCase ) , batched=__lowerCAmelCase , batch_size=processing_args.batch_size , features=__lowerCAmelCase , ) # And finally save your dataset _UpperCAmelCase : List[Any] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset" ) dataset.save_to_disk(__lowerCAmelCase ) # from datasets import load_from_disk # dataset = load_from_disk(passages_path) # to reload the dataset ###################################### logger.info("Step 2 - Index the dataset" ) ###################################### # Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search _UpperCAmelCase : Any = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT ) dataset.add_faiss_index("embeddings" , custom_index=__lowerCAmelCase ) # And save the index _UpperCAmelCase : List[str] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset_hnsw_index.faiss" ) dataset.get_index("embeddings" ).save(__lowerCAmelCase ) # dataset.load_faiss_index("embeddings", index_path) # to reload the index @dataclass class lowerCAmelCase__ : lowerCAmelCase : str = field( default=str(Path(UpperCAmelCase__ ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , ) lowerCAmelCase : Optional[str] = field( default=UpperCAmelCase__ , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , ) lowerCAmelCase : str = field( default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , ) lowerCAmelCase : str = field( default="facebook/dpr-ctx_encoder-multiset-base" , metadata={ "help": ( "The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or" " 'facebook/dpr-ctx_encoder-multiset-base'" ) } , ) lowerCAmelCase : Optional[str] = field( default=str(Path(UpperCAmelCase__ ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , ) @dataclass class lowerCAmelCase__ : lowerCAmelCase : Optional[int] = field( default=UpperCAmelCase__ , metadata={ "help": "The number of processes to use to split the documents into passages. Default is single process." } , ) lowerCAmelCase : int = field( default=16 , metadata={ "help": "The batch size to use when computing the passages embeddings using the DPR context encoder." } , ) @dataclass class lowerCAmelCase__ : lowerCAmelCase : int = field( default=768 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , ) lowerCAmelCase : int = field( default=128 , metadata={ "help": ( "The number of bi-directional links created for every new element during the HNSW index construction." ) } , ) if __name__ == "__main__": logging.basicConfig(level=logging.WARNING) logger.setLevel(logging.INFO) lowerCamelCase__ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments)) lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ = parser.parse_args_into_dataclasses() with TemporaryDirectory() as tmp_dir: lowerCamelCase__ = rag_example_args.output_dir or tmp_dir main(rag_example_args, processing_args, index_hnsw_args)
322
'''simple docstring''' from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig lowerCamelCase__ = logging.get_logger(__name__) # General docstring lowerCamelCase__ = 'RegNetConfig' # Base docstring lowerCamelCase__ = 'facebook/regnet-y-040' lowerCamelCase__ = [1, 1_088, 7, 7] # Image classification docstring lowerCamelCase__ = 'facebook/regnet-y-040' lowerCamelCase__ = 'tabby, tabby cat' lowerCamelCase__ = [ 'facebook/regnet-y-040', # See all regnet models at https://huggingface.co/models?filter=regnet ] class lowerCAmelCase__ ( tf.keras.layers.Layer ): def __init__( self : int , lowerCamelCase__ : int , lowerCamelCase__ : int = 3 , lowerCamelCase__ : int = 1 , lowerCamelCase__ : int = 1 , lowerCamelCase__ : Optional[str] = "relu" , **lowerCamelCase__ : Tuple , ) ->Optional[Any]: '''simple docstring''' super().__init__(**lowerCamelCase__ ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb _UpperCAmelCase : Optional[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) _UpperCAmelCase : Dict = tf.keras.layers.ConvaD( filters=lowerCamelCase__ , kernel_size=lowerCamelCase__ , strides=lowerCamelCase__ , padding="VALID" , groups=lowerCamelCase__ , use_bias=lowerCamelCase__ , name="convolution" , ) _UpperCAmelCase : List[Any] = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" ) _UpperCAmelCase : int = ACTaFN[activation] if activation is not None else tf.identity def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Tuple ) ->Any: '''simple docstring''' _UpperCAmelCase : List[str] = self.convolution(self.padding(lowerCamelCase__ ) ) _UpperCAmelCase : Optional[Any] = self.normalization(lowerCamelCase__ ) _UpperCAmelCase : List[Any] = self.activation(lowerCamelCase__ ) return hidden_state class lowerCAmelCase__ ( tf.keras.layers.Layer ): def __init__( self : str , lowerCamelCase__ : RegNetConfig , **lowerCamelCase__ : Optional[Any] ) ->Optional[Any]: '''simple docstring''' super().__init__(**lowerCamelCase__ ) _UpperCAmelCase : List[str] = config.num_channels _UpperCAmelCase : Any = TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , ) def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : Optional[Any] ) ->Dict: '''simple docstring''' _UpperCAmelCase : List[str] = shape_list(lowerCamelCase__ )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) _UpperCAmelCase : Optional[Any] = tf.transpose(lowerCamelCase__ , perm=(0, 2, 3, 1) ) _UpperCAmelCase : List[Any] = self.embedder(lowerCamelCase__ ) return hidden_state class lowerCAmelCase__ ( tf.keras.layers.Layer ): def __init__( self : int , lowerCamelCase__ : int , lowerCamelCase__ : int = 2 , **lowerCamelCase__ : int ) ->Union[str, Any]: '''simple docstring''' super().__init__(**lowerCamelCase__ ) _UpperCAmelCase : int = tf.keras.layers.ConvaD( filters=lowerCamelCase__ , kernel_size=1 , strides=lowerCamelCase__ , use_bias=lowerCamelCase__ , name="convolution" ) _UpperCAmelCase : Any = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" ) def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : tf.Tensor , lowerCamelCase__ : bool = False ) ->tf.Tensor: '''simple docstring''' return self.normalization(self.convolution(lowerCamelCase__ ) , training=lowerCamelCase__ ) class lowerCAmelCase__ ( tf.keras.layers.Layer ): def __init__( self : Any , lowerCamelCase__ : int , lowerCamelCase__ : int , **lowerCamelCase__ : Optional[int] ) ->Dict: '''simple docstring''' super().__init__(**lowerCamelCase__ ) _UpperCAmelCase : List[str] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCamelCase__ , name="pooler" ) _UpperCAmelCase : int = [ tf.keras.layers.ConvaD(filters=lowerCamelCase__ , kernel_size=1 , activation="relu" , name="attention.0" ), tf.keras.layers.ConvaD(filters=lowerCamelCase__ , kernel_size=1 , activation="sigmoid" , name="attention.2" ), ] def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Optional[int] ) ->List[str]: '''simple docstring''' _UpperCAmelCase : Optional[Any] = self.pooler(lowerCamelCase__ ) for layer_module in self.attention: _UpperCAmelCase : str = layer_module(lowerCamelCase__ ) _UpperCAmelCase : Optional[Any] = hidden_state * pooled return hidden_state class lowerCAmelCase__ ( tf.keras.layers.Layer ): def __init__( self : Dict , lowerCamelCase__ : RegNetConfig , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int = 1 , **lowerCamelCase__ : Any ) ->List[str]: '''simple docstring''' super().__init__(**lowerCamelCase__ ) _UpperCAmelCase : List[str] = in_channels != out_channels or stride != 1 _UpperCAmelCase : List[str] = max(1 , out_channels // config.groups_width ) _UpperCAmelCase : List[str] = ( TFRegNetShortCut(lowerCamelCase__ , stride=lowerCamelCase__ , name="shortcut" ) if should_apply_shortcut else tf.keras.layers.Activation("linear" , name="shortcut" ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. _UpperCAmelCase : Optional[int] = [ TFRegNetConvLayer(lowerCamelCase__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ), TFRegNetConvLayer( lowerCamelCase__ , stride=lowerCamelCase__ , groups=lowerCamelCase__ , activation=config.hidden_act , name="layer.1" ), TFRegNetConvLayer(lowerCamelCase__ , kernel_size=1 , activation=lowerCamelCase__ , name="layer.2" ), ] _UpperCAmelCase : Union[str, Any] = ACTaFN[config.hidden_act] def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : Union[str, Any] ) ->Optional[int]: '''simple docstring''' _UpperCAmelCase : Any = hidden_state for layer_module in self.layers: _UpperCAmelCase : List[Any] = layer_module(lowerCamelCase__ ) _UpperCAmelCase : Optional[Any] = self.shortcut(lowerCamelCase__ ) hidden_state += residual _UpperCAmelCase : List[Any] = self.activation(lowerCamelCase__ ) return hidden_state class lowerCAmelCase__ ( tf.keras.layers.Layer ): def __init__( self : List[Any] , lowerCamelCase__ : RegNetConfig , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int = 1 , **lowerCamelCase__ : str ) ->Optional[int]: '''simple docstring''' super().__init__(**lowerCamelCase__ ) _UpperCAmelCase : Union[str, Any] = in_channels != out_channels or stride != 1 _UpperCAmelCase : Optional[int] = max(1 , out_channels // config.groups_width ) _UpperCAmelCase : Union[str, Any] = ( TFRegNetShortCut(lowerCamelCase__ , stride=lowerCamelCase__ , name="shortcut" ) if should_apply_shortcut else tf.keras.layers.Activation("linear" , name="shortcut" ) ) _UpperCAmelCase : List[Any] = [ TFRegNetConvLayer(lowerCamelCase__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ), TFRegNetConvLayer( lowerCamelCase__ , stride=lowerCamelCase__ , groups=lowerCamelCase__ , activation=config.hidden_act , name="layer.1" ), TFRegNetSELayer(lowerCamelCase__ , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ), TFRegNetConvLayer(lowerCamelCase__ , kernel_size=1 , activation=lowerCamelCase__ , name="layer.3" ), ] _UpperCAmelCase : int = ACTaFN[config.hidden_act] def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : str ) ->Any: '''simple docstring''' _UpperCAmelCase : int = hidden_state for layer_module in self.layers: _UpperCAmelCase : Tuple = layer_module(lowerCamelCase__ ) _UpperCAmelCase : List[Any] = self.shortcut(lowerCamelCase__ ) hidden_state += residual _UpperCAmelCase : Tuple = self.activation(lowerCamelCase__ ) return hidden_state class lowerCAmelCase__ ( tf.keras.layers.Layer ): def __init__( self : str , lowerCamelCase__ : RegNetConfig , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int = 2 , lowerCamelCase__ : int = 2 , **lowerCamelCase__ : Union[str, Any] ) ->Optional[int]: '''simple docstring''' super().__init__(**lowerCamelCase__ ) _UpperCAmelCase : str = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer _UpperCAmelCase : List[str] = [ # downsampling is done in the first layer with stride of 2 layer(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , stride=lowerCamelCase__ , name="layers.0" ), *[layer(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , name=F"""layers.{i+1}""" ) for i in range(depth - 1 )], ] def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : List[str] ) ->List[str]: '''simple docstring''' for layer_module in self.layers: _UpperCAmelCase : Optional[int] = layer_module(lowerCamelCase__ ) return hidden_state class lowerCAmelCase__ ( tf.keras.layers.Layer ): def __init__( self : Dict , lowerCamelCase__ : RegNetConfig , **lowerCamelCase__ : int ) ->Dict: '''simple docstring''' super().__init__(**lowerCamelCase__ ) _UpperCAmelCase : Union[str, Any] = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( lowerCamelCase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) ) _UpperCAmelCase : Dict = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(lowerCamelCase__ , config.depths[1:] ) ): self.stages.append(TFRegNetStage(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , depth=lowerCamelCase__ , name=F"""stages.{i+1}""" ) ) def lowerCAmelCase__ ( self : str , lowerCamelCase__ : tf.Tensor , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = True ) ->TFBaseModelOutputWithNoAttention: '''simple docstring''' _UpperCAmelCase : Optional[Any] = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: _UpperCAmelCase : Optional[Any] = hidden_states + (hidden_state,) _UpperCAmelCase : Dict = stage_module(lowerCamelCase__ ) if output_hidden_states: _UpperCAmelCase : Tuple = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=lowerCamelCase__ , hidden_states=lowerCamelCase__ ) @keras_serializable class lowerCAmelCase__ ( tf.keras.layers.Layer ): lowerCAmelCase : Optional[Any] = RegNetConfig def __init__( self : Union[str, Any] , lowerCamelCase__ : Any , **lowerCamelCase__ : str ) ->int: '''simple docstring''' super().__init__(**lowerCamelCase__ ) _UpperCAmelCase : Union[str, Any] = config _UpperCAmelCase : Union[str, Any] = TFRegNetEmbeddings(lowerCamelCase__ , name="embedder" ) _UpperCAmelCase : Union[str, Any] = TFRegNetEncoder(lowerCamelCase__ , name="encoder" ) _UpperCAmelCase : Union[str, Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCamelCase__ , name="pooler" ) @unpack_inputs def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : tf.Tensor , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : bool = False , ) ->TFBaseModelOutputWithPoolingAndNoAttention: '''simple docstring''' _UpperCAmelCase : Tuple = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCAmelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict _UpperCAmelCase : Union[str, Any] = self.embedder(lowerCamelCase__ , training=lowerCamelCase__ ) _UpperCAmelCase : str = self.encoder( lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , return_dict=lowerCamelCase__ , training=lowerCamelCase__ ) _UpperCAmelCase : Dict = encoder_outputs[0] _UpperCAmelCase : Dict = self.pooler(lowerCamelCase__ ) # Change to NCHW output format have uniformity in the modules _UpperCAmelCase : Union[str, Any] = tf.transpose(lowerCamelCase__ , perm=(0, 3, 1, 2) ) _UpperCAmelCase : Tuple = tf.transpose(lowerCamelCase__ , perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: _UpperCAmelCase : List[str] = tuple([tf.transpose(lowerCamelCase__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=lowerCamelCase__ , pooler_output=lowerCamelCase__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class lowerCAmelCase__ ( UpperCAmelCase__ ): lowerCAmelCase : Tuple = RegNetConfig lowerCAmelCase : Tuple = "regnet" lowerCAmelCase : Union[str, Any] = "pixel_values" @property def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[int]: '''simple docstring''' return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) , dtype=tf.floataa )} lowerCamelCase__ = r'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n' lowerCamelCase__ = r'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top." , UpperCAmelCase__ , ) class lowerCAmelCase__ ( UpperCAmelCase__ ): def __init__( self : Any , lowerCamelCase__ : RegNetConfig , *lowerCamelCase__ : Any , **lowerCamelCase__ : List[str] ) ->Optional[int]: '''simple docstring''' super().__init__(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ ) _UpperCAmelCase : Optional[Any] = TFRegNetMainLayer(lowerCamelCase__ , name="regnet" ) @unpack_inputs @add_start_docstrings_to_model_forward(lowerCamelCase__ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCamelCase__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def lowerCAmelCase__ ( self : str , lowerCamelCase__ : tf.Tensor , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Any=False , ) ->Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]: '''simple docstring''' _UpperCAmelCase : Optional[int] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCAmelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict _UpperCAmelCase : Union[str, Any] = self.regnet( pixel_values=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , return_dict=lowerCamelCase__ , training=lowerCamelCase__ , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( "\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , UpperCAmelCase__ , ) class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ): def __init__( self : str , lowerCamelCase__ : RegNetConfig , *lowerCamelCase__ : List[Any] , **lowerCamelCase__ : Union[str, Any] ) ->Any: '''simple docstring''' super().__init__(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ ) _UpperCAmelCase : Optional[int] = config.num_labels _UpperCAmelCase : Dict = TFRegNetMainLayer(lowerCamelCase__ , name="regnet" ) # classification head _UpperCAmelCase : str = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(lowerCamelCase__ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCamelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def lowerCAmelCase__ ( self : str , lowerCamelCase__ : tf.Tensor = None , lowerCamelCase__ : tf.Tensor = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : Dict=False , ) ->Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: '''simple docstring''' _UpperCAmelCase : str = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCAmelCase : str = return_dict if return_dict is not None else self.config.use_return_dict _UpperCAmelCase : Union[str, Any] = self.regnet( lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , return_dict=lowerCamelCase__ , training=lowerCamelCase__ ) _UpperCAmelCase : int = outputs.pooler_output if return_dict else outputs[1] _UpperCAmelCase : Dict = self.classifier[0](lowerCamelCase__ ) _UpperCAmelCase : str = self.classifier[1](lowerCamelCase__ ) _UpperCAmelCase : Tuple = None if labels is None else self.hf_compute_loss(labels=lowerCamelCase__ , logits=lowerCamelCase__ ) if not return_dict: _UpperCAmelCase : int = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=lowerCamelCase__ , logits=lowerCamelCase__ , hidden_states=outputs.hidden_states )
322
1