code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class A_ (lowercase__ ,unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = LEDTokenizer SCREAMING_SNAKE_CASE__ : Dict = LEDTokenizerFast SCREAMING_SNAKE_CASE__ : List[str] = True def UpperCamelCase__ ( self ): """simple docstring""" super().setUp() UpperCAmelCase_ : List[Any] = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] UpperCAmelCase_ : Union[str, Any] = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) ) UpperCAmelCase_ : Optional[Any] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] UpperCAmelCase_ : Union[str, Any] = {"unk_token": "<unk>"} UpperCAmelCase_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) UpperCAmelCase_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(lowercase_ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(lowercase_ ) ) def UpperCamelCase__ ( self , **lowercase_ ): """simple docstring""" kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ ) def UpperCamelCase__ ( self , **lowercase_ ): """simple docstring""" kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ ) def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" return "lower newer", "lower newer" @cached_property def UpperCamelCase__ ( self ): """simple docstring""" return LEDTokenizer.from_pretrained("allenai/led-base-16384" ) @cached_property def UpperCamelCase__ ( self ): """simple docstring""" return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" ) @require_torch def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : int = ["A long paragraph for summarization.", "Another paragraph for summarization."] UpperCAmelCase_ : Dict = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase_ : int = tokenizer(lowercase_ , max_length=len(lowercase_ ) , padding=lowercase_ , return_tensors="pt" ) self.assertIsInstance(lowercase_ , lowercase_ ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) UpperCAmelCase_ : str = batch.input_ids.tolist()[0] self.assertListEqual(lowercase_ , lowercase_ ) @require_torch def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[int] = ["A long paragraph for summarization.", "Another paragraph for summarization."] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase_ : str = tokenizer(lowercase_ , padding=lowercase_ , return_tensors="pt" ) self.assertIn("input_ids" , lowercase_ ) self.assertIn("attention_mask" , lowercase_ ) self.assertNotIn("labels" , lowercase_ ) self.assertNotIn("decoder_attention_mask" , lowercase_ ) @require_torch def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = [ "Summary of the text.", "Another summary.", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase_ : List[Any] = tokenizer(text_target=lowercase_ , max_length=32 , padding="max_length" , return_tensors="pt" ) self.assertEqual(32 , targets["input_ids"].shape[1] ) @require_torch def UpperCamelCase__ ( self ): """simple docstring""" for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase_ : Tuple = tokenizer( ["I am a small frog" * 1024, "I am a small frog"] , padding=lowercase_ , truncation=lowercase_ , return_tensors="pt" ) self.assertIsInstance(lowercase_ , lowercase_ ) self.assertEqual(batch.input_ids.shape , (2, 5122) ) @require_torch def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : List[Any] = ["A long paragraph for summarization."] UpperCAmelCase_ : Optional[int] = [ "Summary of the text.", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase_ : Optional[Any] = tokenizer(lowercase_ , return_tensors="pt" ) UpperCAmelCase_ : List[str] = tokenizer(text_target=lowercase_ , return_tensors="pt" ) UpperCAmelCase_ : Optional[Any] = inputs["input_ids"] UpperCAmelCase_ : int = targets["input_ids"] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def UpperCamelCase__ ( self ): """simple docstring""" for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase_ : Dict = ["Summary of the text.", "Another summary."] UpperCAmelCase_ : Optional[Any] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] UpperCAmelCase_ : Optional[Any] = tokenizer(lowercase_ , padding=lowercase_ ) UpperCAmelCase_ : Tuple = [[0] * len(lowercase_ ) for x in encoded_output["input_ids"]] UpperCAmelCase_ : Optional[Any] = tokenizer.pad(lowercase_ ) self.assertSequenceEqual(outputs["global_attention_mask"] , lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" pass def UpperCamelCase__ ( self ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): UpperCAmelCase_ : Any = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ ) UpperCAmelCase_ : str = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ ) UpperCAmelCase_ : Union[str, Any] = "A, <mask> AllenNLP sentence." UpperCAmelCase_ : List[Any] = tokenizer_r.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ ) UpperCAmelCase_ : Dict = tokenizer_p.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ ) self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) ) self.assertEqual( sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , ) UpperCAmelCase_ : Optional[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] ) UpperCAmelCase_ : Optional[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] ) self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] ) self.assertSequenceEqual( lowercase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) self.assertSequenceEqual( lowercase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
61
import argparse from argparse import Namespace import torch from torch import nn from transformers import XGLMConfig, XGLMForCausalLM def __UpperCamelCase ( _A ): lowerCAmelCase_ = [ '''decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(_A , _A ) def __UpperCamelCase ( _A ): lowerCAmelCase_ , lowerCAmelCase_ = emb.weight.shape lowerCAmelCase_ = nn.Linear(_A , _A , bias=_A ) lowerCAmelCase_ = emb.weight.data return lin_layer def __UpperCamelCase ( _A ): lowerCAmelCase_ = torch.load(_A , map_location='''cpu''' ) lowerCAmelCase_ = Namespace(**checkpoint['''cfg''']['''model'''] ) lowerCAmelCase_ = checkpoint['''model'''] remove_ignore_keys_(_A ) lowerCAmelCase_ = state_dict['''decoder.embed_tokens.weight'''].shape[0] lowerCAmelCase_ = {key.replace('''decoder''' , '''model''' ): val for key, val in state_dict.items()} lowerCAmelCase_ = XGLMConfig( vocab_size=_A , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''gelu''' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , ) lowerCAmelCase_ = XGLMForCausalLM(_A ) lowerCAmelCase_ = model.load_state_dict(_A , strict=_A ) print(_A ) lowerCAmelCase_ = make_linear_from_emb(model.model.embed_tokens ) return model if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') _A = parser.parse_args() _A = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path) model.save_pretrained(args.pytorch_dump_folder_path)
278
0
import math class UpperCAmelCase__ : """simple docstring""" def __init__( self , A_=0 ) -> int: # a graph with Node 0,1,...,N-1 __UpperCamelCase =n __UpperCamelCase =[ [math.inf for j in range(0 , A_ )] for i in range(0 , A_ ) ] # adjacency matrix for weight __UpperCamelCase =[ [math.inf for j in range(0 , A_ )] for i in range(0 , A_ ) ] # dp[i][j] stores minimum distance from i to j def _a ( self , A_ , A_ , A_ ) -> int: __UpperCamelCase =w def _a ( self ) -> List[str]: for k in range(0 , self.n ): for i in range(0 , self.n ): for j in range(0 , self.n ): __UpperCamelCase =min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] ) def _a ( self , A_ , A_ ) -> List[Any]: return self.dp[u][v] if __name__ == "__main__": _A = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 10) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 10) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
62
# This script creates a super tiny model that is useful inside tests, when we just want to test that # the machinery works, without needing to the check the quality of the outcomes. # # This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny - # all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and # emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files. # The latter is done by `fsmt-make-super-tiny-model.py`. # # It will be used then as "stas/tiny-wmt19-en-ru" from pathlib import Path import json import tempfile from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES _A = '''tiny-wmt19-en-ru''' # Build # borrowed from a test _A = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''w</w>''', '''r</w>''', '''t</w>''', '''lo''', '''low''', '''er</w>''', '''low</w>''', '''lowest</w>''', '''newer</w>''', '''wider</w>''', '''<unk>''', ] _A = dict(zip(vocab, range(len(vocab)))) _A = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', ''''''] with tempfile.TemporaryDirectory() as tmpdirname: _A = Path(tmpdirname) _A = build_dir / VOCAB_FILES_NAMES['''src_vocab_file'''] _A = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file'''] _A = build_dir / VOCAB_FILES_NAMES['''merges_file'''] with open(src_vocab_file, '''w''') as fp: fp.write(json.dumps(vocab_tokens)) with open(tgt_vocab_file, '''w''') as fp: fp.write(json.dumps(vocab_tokens)) with open(merges_file, '''w''') as fp: fp.write('''\n'''.join(merges)) _A = FSMTTokenizer( langs=['''en''', '''ru'''], src_vocab_size=len(vocab), tgt_vocab_size=len(vocab), src_vocab_file=src_vocab_file, tgt_vocab_file=tgt_vocab_file, merges_file=merges_file, ) _A = FSMTConfig( langs=['''ru''', '''en'''], src_vocab_size=1_000, tgt_vocab_size=1_000, d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) _A = FSMTForConditionalGeneration(config) print(f"num of params {tiny_model.num_parameters()}") # Test _A = tokenizer(['''Making tiny model'''], return_tensors='''pt''') _A = tiny_model(**batch) print('''test output:''', len(outputs.logits[0])) # Save tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(f"Generated {mname_tiny}") # Upload # transformers-cli upload tiny-wmt19-en-ru
278
0
'''simple docstring''' def _lowerCamelCase ( lowercase : list , lowercase : list , lowercase : int ) -> list: _a = len(lowercase ) _a = [[0] * n for i in range(lowercase )] for i in range(lowercase ): _a = y_points[i] for i in range(2 , lowercase ): for j in range(lowercase , lowercase ): _a = ( (xa - x_points[j - i + 1]) * q[j][i - 1] - (xa - x_points[j]) * q[j - 1][i - 1] ) / (x_points[j] - x_points[j - i + 1]) return [q[n - 1][n - 1], q] if __name__ == "__main__": import doctest doctest.testmod()
63
import argparse from collections import defaultdict import yaml _A = '''docs/source/en/_toctree.yml''' def __UpperCamelCase ( _A ): lowerCAmelCase_ = defaultdict(_A ) for doc in model_doc: counts[doc["local"]] += 1 lowerCAmelCase_ = [key for key, value in counts.items() if value > 1] lowerCAmelCase_ = [] for duplicate_key in duplicates: lowerCAmelCase_ = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} ) if len(_A ) > 1: raise ValueError( f"{duplicate_key} is present several times in the documentation table of content at " '''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the ''' '''others.''' ) # Only add this once new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] ) # Sort return sorted(_A , key=lambda _A : s["title"].lower() ) def __UpperCamelCase ( _A=False ): with open(_A , encoding='''utf-8''' ) as f: lowerCAmelCase_ = yaml.safe_load(f.read() ) # Get to the API doc lowerCAmelCase_ = 0 while content[api_idx]["title"] != "API": api_idx += 1 lowerCAmelCase_ = content[api_idx]['''sections'''] # Then to the model doc lowerCAmelCase_ = 0 while api_doc[model_idx]["title"] != "Models": model_idx += 1 lowerCAmelCase_ = api_doc[model_idx]['''sections'''] lowerCAmelCase_ = [(idx, section) for idx, section in enumerate(_A ) if '''sections''' in section] lowerCAmelCase_ = False for idx, modality_doc in modalities_docs: lowerCAmelCase_ = modality_doc['''sections'''] lowerCAmelCase_ = clean_model_doc_toc(_A ) if old_modality_doc != new_modality_doc: lowerCAmelCase_ = True if overwrite: lowerCAmelCase_ = new_modality_doc if diff: if overwrite: lowerCAmelCase_ = model_doc lowerCAmelCase_ = api_doc with open(_A , '''w''' , encoding='''utf-8''' ) as f: f.write(yaml.dump(_A , allow_unicode=_A ) ) else: raise ValueError( '''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' ) if __name__ == "__main__": _A = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') _A = parser.parse_args() check_model_doc(args.fix_and_overwrite)
278
0
"""simple docstring""" from io import BytesIO from typing import List, Union import requests from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_decord_available(): import numpy as np from decord import VideoReader if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING A_ = logging.get_logger(__name__) @add_end_docstrings(__a ) class lowercase( __a ): '''simple docstring''' def __init__( self: Optional[Any], *a_: Dict, **a_: int ): '''simple docstring''' super().__init__(*a_, **a_ ) requires_backends(self, """decord""" ) self.check_model_type(a_ ) def UpperCamelCase_ ( self: Union[str, Any], a_: str=None, a_: Any=None, a_: Optional[int]=None ): '''simple docstring''' _snake_case : List[str] = {} if frame_sampling_rate is not None: _snake_case : str = frame_sampling_rate if num_frames is not None: _snake_case : List[str] = num_frames _snake_case : Optional[Any] = {} if top_k is not None: _snake_case : Tuple = top_k return preprocess_params, {}, postprocess_params def __call__( self: Union[str, Any], a_: Union[str, List[str]], **a_: Tuple ): '''simple docstring''' return super().__call__(a_, **a_ ) def UpperCamelCase_ ( self: Union[str, Any], a_: List[str], a_: Optional[int]=None, a_: Optional[int]=1 ): '''simple docstring''' if num_frames is None: _snake_case : Optional[Any] = self.model.config.num_frames if video.startswith("""http://""" ) or video.startswith("""https://""" ): _snake_case : Tuple = BytesIO(requests.get(a_ ).content ) _snake_case : Union[str, Any] = VideoReader(a_ ) videoreader.seek(0 ) _snake_case : Union[str, Any] = 0 _snake_case : List[str] = num_frames * frame_sampling_rate - 1 _snake_case : Dict = np.linspace(a_, a_, num=a_, dtype=np.intaa ) _snake_case : str = videoreader.get_batch(a_ ).asnumpy() _snake_case : Union[str, Any] = list(a_ ) _snake_case : Union[str, Any] = self.image_processor(a_, return_tensors=self.framework ) return model_inputs def UpperCamelCase_ ( self: str, a_: str ): '''simple docstring''' _snake_case : List[Any] = self.model(**a_ ) return model_outputs def UpperCamelCase_ ( self: Union[str, Any], a_: List[str], a_: str=5 ): '''simple docstring''' if top_k > self.model.config.num_labels: _snake_case : Optional[int] = self.model.config.num_labels if self.framework == "pt": _snake_case : Tuple = model_outputs.logits.softmax(-1 )[0] _snake_case , _snake_case : List[Any] = probs.topk(a_ ) else: raise ValueError(f"Unsupported framework: {self.framework}" ) _snake_case : str = scores.tolist() _snake_case : str = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(a_, a_ )]
64
import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest class A ( __UpperCAmelCase ): __snake_case = (UnCLIPScheduler,) def SCREAMING_SNAKE_CASE__ ( self, **UpperCamelCase__ ): """simple docstring""" lowerCAmelCase_ = { '''num_train_timesteps''': 1000, '''variance_type''': '''fixed_small_log''', '''clip_sample''': True, '''clip_sample_range''': 1.0, '''prediction_type''': '''epsilon''', } config.update(**UpperCamelCase__ ) return config def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" for clip_sample_range in [1, 5, 10, 20]: self.check_over_configs(clip_sample_range=UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" for time_step in [0, 500, 999]: for prev_timestep in [None, 5, 100, 250, 500, 750]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=UpperCamelCase__, prev_timestep=UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.scheduler_classes[0] lowerCAmelCase_ = self.get_scheduler_config(variance_type='''fixed_small_log''' ) lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1E-5 def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.scheduler_classes[0] lowerCAmelCase_ = self.get_scheduler_config(variance_type='''learned_range''' ) lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ ) lowerCAmelCase_ = 0.5 assert scheduler._get_variance(1, predicted_variance=UpperCamelCase__ ) - -10.1_712_790 < 1E-5 assert scheduler._get_variance(487, predicted_variance=UpperCamelCase__ ) - -5.7_998_052 < 1E-5 assert scheduler._get_variance(999, predicted_variance=UpperCamelCase__ ) - -0.0_010_011 < 1E-5 def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.scheduler_classes[0] lowerCAmelCase_ = self.get_scheduler_config() lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ ) lowerCAmelCase_ = scheduler.timesteps lowerCAmelCase_ = self.dummy_model() lowerCAmelCase_ = self.dummy_sample_deter lowerCAmelCase_ = torch.manual_seed(0 ) for i, t in enumerate(UpperCamelCase__ ): # 1. predict noise residual lowerCAmelCase_ = model(UpperCamelCase__, UpperCamelCase__ ) # 2. predict previous mean of sample x_t-1 lowerCAmelCase_ = scheduler.step(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, generator=UpperCamelCase__ ).prev_sample lowerCAmelCase_ = pred_prev_sample lowerCAmelCase_ = torch.sum(torch.abs(UpperCamelCase__ ) ) lowerCAmelCase_ = torch.mean(torch.abs(UpperCamelCase__ ) ) assert abs(result_sum.item() - 252.2_682_495 ) < 1E-2 assert abs(result_mean.item() - 0.3_284_743 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.scheduler_classes[0] lowerCAmelCase_ = self.get_scheduler_config() lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ ) scheduler.set_timesteps(25 ) lowerCAmelCase_ = scheduler.timesteps lowerCAmelCase_ = self.dummy_model() lowerCAmelCase_ = self.dummy_sample_deter lowerCAmelCase_ = torch.manual_seed(0 ) for i, t in enumerate(UpperCamelCase__ ): # 1. predict noise residual lowerCAmelCase_ = model(UpperCamelCase__, UpperCamelCase__ ) if i + 1 == timesteps.shape[0]: lowerCAmelCase_ = None else: lowerCAmelCase_ = timesteps[i + 1] # 2. predict previous mean of sample x_t-1 lowerCAmelCase_ = scheduler.step( UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, prev_timestep=UpperCamelCase__, generator=UpperCamelCase__ ).prev_sample lowerCAmelCase_ = pred_prev_sample lowerCAmelCase_ = torch.sum(torch.abs(UpperCamelCase__ ) ) lowerCAmelCase_ = torch.mean(torch.abs(UpperCamelCase__ ) ) assert abs(result_sum.item() - 258.2_044_983 ) < 1E-2 assert abs(result_mean.item() - 0.3_362_038 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" pass def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" pass
278
0
def lowerCAmelCase_ ( __A, __A ) -> int: '''simple docstring''' if len(__A ) != len(__A ): raise ValueError("String lengths must match!" ) UpperCAmelCase__ = 0 for chara, chara in zip(__A, __A ): if chara != chara: count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
65
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _A = logging.get_logger(__name__) _A = torch.device('''cpu''') def __UpperCamelCase ( ): lowerCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw ) return im def __UpperCamelCase ( _A ): if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.1_7_0_3E0_0, 2.1_1_0_7E0_0, -2.0_8_1_1E0_0, 8.8_6_8_5E-0_1, 2.4_3_6_0E-0_1] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.9_6_3_6E-0_1, 2.3_4_7_8E-0_1, -1.6_9_6_3E0_0, -1.7_3_8_1E0_0, -8.6_3_3_7E-0_1] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.2_7_6_8E-0_1, -4.7_4_2_9E-0_1, -1.0_8_9_7E0_0, -1.0_2_4_8E0_0, 3.5_5_2_3E-0_2] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.5_3_3_0E-0_1, 2.4_2_1_1E-0_1, -6.0_1_8_5E-0_1, -8.2_7_8_9E-0_1, -6.0_4_4_6E-0_2] ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = dct.pop(_A ) lowerCAmelCase_ = val def __UpperCamelCase ( _A ): lowerCAmelCase_ = [] for k in state_dict.keys(): lowerCAmelCase_ = k if ".pwconv" in k: lowerCAmelCase_ = k_new.replace('''.pwconv''' , '''.point_wise_conv''' ) if ".dwconv" in k: lowerCAmelCase_ = k_new.replace('''.dwconv''' , '''.depth_wise_conv''' ) if ".Proj." in k: lowerCAmelCase_ = k_new.replace('''.Proj.''' , '''.proj.''' ) if "patch_embed" in k_new: lowerCAmelCase_ = k_new.replace('''patch_embed''' , '''swiftformer.patch_embed.patch_embedding''' ) if "network" in k_new: lowerCAmelCase_ = k_new.split('''.''' ) if ls[2].isdigit(): lowerCAmelCase_ = '''swiftformer.encoder.network.''' + ls[1] + '''.blocks.''' + ls[2] + '''.''' + '''.'''.join(ls[3:] ) else: lowerCAmelCase_ = k_new.replace('''network''' , '''swiftformer.encoder.network''' ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size lowerCAmelCase_ = 1000 lowerCAmelCase_ = '''huggingface/label-files''' lowerCAmelCase_ = '''imagenet-1k-id2label.json''' lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) ) lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()} lowerCAmelCase_ = idalabel lowerCAmelCase_ = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": lowerCAmelCase_ = [3, 3, 6, 4] lowerCAmelCase_ = [48, 56, 112, 220] elif swiftformer_name == "swiftformer_s": lowerCAmelCase_ = [3, 3, 9, 6] lowerCAmelCase_ = [48, 64, 168, 224] elif swiftformer_name == "swiftformer_l1": lowerCAmelCase_ = [4, 3, 10, 5] lowerCAmelCase_ = [48, 96, 192, 384] elif swiftformer_name == "swiftformer_l3": lowerCAmelCase_ = [4, 4, 12, 6] lowerCAmelCase_ = [64, 128, 320, 512] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith('''https''' ): lowerCAmelCase_ = torch.hub.load_state_dict_from_url(_A , map_location='''cpu''' , check_hash=_A ) else: lowerCAmelCase_ = torch.load(_A , map_location='''cpu''' ) lowerCAmelCase_ = checkpoint lowerCAmelCase_ = create_rename_keys(_A ) for rename_key_src, rename_key_dest in rename_keys: rename_key(_A , _A , _A ) # load HuggingFace model lowerCAmelCase_ = SwiftFormerForImageClassification(_A ).eval() hf_model.load_state_dict(_A ) # prepare test inputs lowerCAmelCase_ = prepare_img() lowerCAmelCase_ = ViTImageProcessor.from_pretrained('''preprocessor_config''' ) lowerCAmelCase_ = processor(images=_A , return_tensors='''pt''' ) # compare outputs from both models lowerCAmelCase_ = get_expected_output(_A ) lowerCAmelCase_ = hf_model(inputs['''pixel_values'''] ).logits assert hf_logits.shape == torch.Size([1, 1000] ) assert torch.allclose(hf_logits[0, 0:5] , _A , atol=1E-3 ) Path(_A ).mkdir(exist_ok=_A ) print(f"Saving model {swiftformer_name} to {pytorch_dump_folder_path}" ) hf_model.save_pretrained(_A ) if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--swiftformer_name''', default='''swiftformer_xs''', choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''], type=str, help='''Name of the SwiftFormer model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''./converted_outputs/''', type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''') _A = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
278
0
"""simple docstring""" import math class lowerCamelCase : '''simple docstring''' def __init__( self: List[Any] , snake_case: int=0 ) -> int: # a graph with Node 0,1,...,N-1 snake_case_ :List[str] = n snake_case_ :int = [ [math.inf for j in range(0 , snake_case )] for i in range(0 , snake_case ) ] # adjacency matrix for weight snake_case_ :str = [ [math.inf for j in range(0 , snake_case )] for i in range(0 , snake_case ) ] # dp[i][j] stores minimum distance from i to j def lowerCAmelCase_ ( self: Optional[int] , snake_case: str , snake_case: Optional[Any] , snake_case: str ) -> Tuple: snake_case_ :List[Any] = w def lowerCAmelCase_ ( self: List[str] ) -> str: for k in range(0 , self.n ): for i in range(0 , self.n ): for j in range(0 , self.n ): snake_case_ :Any = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] ) def lowerCAmelCase_ ( self: int , snake_case: List[Any] , snake_case: Optional[Any] ) -> Union[str, Any]: return self.dp[u][v] if __name__ == "__main__": __a = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 10) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 10) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
66
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _A = logging.get_logger(__name__) _A = { '''google/vit-base-patch16-224''': '''https://huggingface.co/vit-base-patch16-224/resolve/main/config.json''', # See all ViT models at https://huggingface.co/models?filter=vit } class A ( __UpperCAmelCase ): __snake_case = 'vit' def __init__( self, UpperCamelCase__=768, UpperCamelCase__=12, UpperCamelCase__=12, UpperCamelCase__=3072, UpperCamelCase__="gelu", UpperCamelCase__=0.0, UpperCamelCase__=0.0, UpperCamelCase__=0.02, UpperCamelCase__=1E-12, UpperCamelCase__=224, UpperCamelCase__=16, UpperCamelCase__=3, UpperCamelCase__=True, UpperCamelCase__=16, **UpperCamelCase__, ): """simple docstring""" super().__init__(**UpperCamelCase__ ) lowerCAmelCase_ = hidden_size lowerCAmelCase_ = num_hidden_layers lowerCAmelCase_ = num_attention_heads lowerCAmelCase_ = intermediate_size lowerCAmelCase_ = hidden_act lowerCAmelCase_ = hidden_dropout_prob lowerCAmelCase_ = attention_probs_dropout_prob lowerCAmelCase_ = initializer_range lowerCAmelCase_ = layer_norm_eps lowerCAmelCase_ = image_size lowerCAmelCase_ = patch_size lowerCAmelCase_ = num_channels lowerCAmelCase_ = qkv_bias lowerCAmelCase_ = encoder_stride class A ( __UpperCAmelCase ): __snake_case = version.parse('1.11' ) @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return 1E-4
278
0
'''simple docstring''' import argparse import json import os import tensorstore as ts import torch from flax import serialization from flax.traverse_util import flatten_dict, unflatten_dict from tensorflow.io import gfile from transformers.modeling_utils import dtype_byte_size from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import ( rename_keys, ) from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME from transformers.utils.hub import convert_file_size_to_int def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]: if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3: # expert layer __lowerCamelCase = flax_key_tuple[:-1] + ('''weight''',) __lowerCamelCase = torch.permute(UpperCamelCase__ , (0, 2, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(UpperCamelCase__ ): # linear layer __lowerCamelCase = flax_key_tuple[:-1] + ('''weight''',) __lowerCamelCase = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: __lowerCamelCase = flax_key_tuple[:-1] + ('''weight''',) return flax_key_tuple, flax_tensor def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]: if "metadata" in layer: __lowerCamelCase = layer.split('''metadata''' ) __lowerCamelCase = ''''''.join(split_layer[0] )[:-1] __lowerCamelCase = [tuple(('''metadata''' + split_layer[1]).split('''/''' ) )] elif "kvstore" in layer: __lowerCamelCase = layer.split('''kvstore''' ) __lowerCamelCase = ''''''.join(split_layer[0] )[:-1] __lowerCamelCase = [tuple(('''kvstore''' + split_layer[1]).split('''/''' ) )] else: __lowerCamelCase = layer.split('''/''' ) __lowerCamelCase = '''/'''.join(split_layer[:-1] ) __lowerCamelCase = (split_layer[-1],) if "kvstore/path" in layer: __lowerCamelCase = f"""{switch_checkpoint_path}/{checkpoint_info[layer]}""" elif "kvstore/driver" in layer: __lowerCamelCase = '''file''' else: __lowerCamelCase = checkpoint_info[layer] return curr_real_layer_name, split_layer, content def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> Dict: __lowerCamelCase = rename_keys(UpperCamelCase__ ) __lowerCamelCase = {} for k, v in current_block.items(): __lowerCamelCase = v __lowerCamelCase = new_current_block torch.save(UpperCamelCase__ , UpperCamelCase__ ) def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = WEIGHTS_NAME ) -> Tuple: __lowerCamelCase = convert_file_size_to_int(UpperCamelCase__ ) __lowerCamelCase = [] __lowerCamelCase = {} __lowerCamelCase = 0 __lowerCamelCase = 0 os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ ) with gfile.GFile(switch_checkpoint_path + '''/checkpoint''' , '''rb''' ) as fp: __lowerCamelCase = serialization.msgpack_restore(fp.read() )['''optimizer''']['''target'''] __lowerCamelCase = flatten_dict(UpperCamelCase__ , sep='''/''' ) __lowerCamelCase = {} for layer in checkpoint_info.keys(): __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = get_key_and_tensorstore_dict( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) if curr_real_layer_name in all_layers: __lowerCamelCase = content else: __lowerCamelCase = {split_layer[-1]: content} for key in all_layers.keys(): # open tensorstore file __lowerCamelCase = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result() __lowerCamelCase = torch.tensor(UpperCamelCase__ ) __lowerCamelCase = raw_weights.numel() * dtype_byte_size(raw_weights.dtype ) # use the renaming pattern from the small conversion scripts __lowerCamelCase , __lowerCamelCase = rename_base_flax_keys(tuple(key.split('''/''' ) ) , UpperCamelCase__ ) __lowerCamelCase = '''/'''.join(UpperCamelCase__ ) # If this weight is going to tip up over the maximal size, we split. if current_block_size + weight_size > max_shard_size: __lowerCamelCase = os.path.join( UpperCamelCase__ , weights_name.replace('''.bin''' , f"""-{len(UpperCamelCase__ )+1:05d}-of-???.bin""" ) ) rename_and_save_block(UpperCamelCase__ , UpperCamelCase__ ) sharded_state_dicts.append(current_block.keys() ) del current_block __lowerCamelCase = {} __lowerCamelCase = 0 __lowerCamelCase = raw_weights.to(getattr(UpperCamelCase__ , UpperCamelCase__ ) ) current_block_size += weight_size total_size += weight_size # Add the last block __lowerCamelCase = os.path.join(UpperCamelCase__ , weights_name.replace('''.bin''' , f"""-{len(UpperCamelCase__ )+1:05d}-of-???.bin""" ) ) rename_and_save_block(UpperCamelCase__ , UpperCamelCase__ ) sharded_state_dicts.append(current_block.keys() ) # If we only have one shard, we return it if len(UpperCamelCase__ ) == 1: return {weights_name: sharded_state_dicts[0]}, None # Otherwise, let's build the index __lowerCamelCase = {} __lowerCamelCase = {} for idx, shard in enumerate(UpperCamelCase__ ): __lowerCamelCase = weights_name.replace( '''.bin''' , f"""-{idx+1:05d}-of-{len(UpperCamelCase__ ):05d}.bin""" ) # len(sharded_state_dicts):05d} __lowerCamelCase = os.path.join(UpperCamelCase__ , weights_name.replace('''.bin''' , f"""-{idx+1:05d}-of-???.bin""" ) ) os.rename(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ) __lowerCamelCase = shard for key in shard: __lowerCamelCase = shard_file # Add the metadata __lowerCamelCase = {'''total_size''': total_size} __lowerCamelCase = {'''metadata''': metadata, '''weight_map''': weight_map} with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''w''' , encoding='''utf-8''' ) as f: __lowerCamelCase = json.dumps(UpperCamelCase__ , indent=2 , sort_keys=UpperCamelCase__ ) + '''\n''' f.write(UpperCamelCase__ ) return metadata, index if __name__ == "__main__": __UpperCAmelCase =argparse.ArgumentParser() # Required parameters parser.add_argument( "--switch_t5x_checkpoint_path", default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600", type=str, required=False, help="Path to a directory containing a folder per layer. Follows the original Google format.", ) parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size") parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model") parser.add_argument( "--pytorch_dump_folder_path", default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted", type=str, required=False, help="Path to the output pytorch model.", ) __UpperCAmelCase =parser.parse_args() shard_on_the_fly( args.switch_tax_checkpoint_path, args.pytorch_dump_folder_path, args.max_shard_size, args.dtype, ) def __lowerCAmelCase ( ) -> List[Any]: from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer __lowerCamelCase = SwitchTransformersConfig.from_pretrained('''google/switch-base-8''' ) config.save_pretrained('''/home/arthur_huggingface_co/transformers/switch_converted''' ) __lowerCamelCase = SwitchTransformersForConditionalGeneration.from_pretrained( '''/home/arthur_huggingface_co/transformers/switch_converted''' , device_map='''auto''' ) __lowerCamelCase = TaTokenizer.from_pretrained('''t5-small''' ) __lowerCamelCase = '''A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''' __lowerCamelCase = tokenizer(UpperCamelCase__ , return_tensors='''pt''' ).input_ids __lowerCamelCase = model.generate(UpperCamelCase__ , decoder_start_token_id=0 ) print(tokenizer.decode(out[0] ) )
67
import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def __UpperCamelCase ( _A , _A ): assert isinstance(_A , _A ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCAmelCase_ = JsonDatasetReader(_A , cache_dir=_A , keep_in_memory=_A ).read() _check_json_dataset(_A , _A ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase_ = features.copy() if features else default_expected_features lowerCAmelCase_ = ( Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase_ = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read() _check_json_dataset(_A , _A ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}, ] , ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''} lowerCAmelCase_ = features.copy() if features else default_expected_features lowerCAmelCase_ = ( Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase_ = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read() assert isinstance(_A , _A ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def __UpperCamelCase ( _A , _A ): # jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"} lowerCAmelCase_ = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''} lowerCAmelCase_ = features.copy() lowerCAmelCase_ = ( Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read() assert isinstance(_A , _A ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase_ = JsonDatasetReader(_A , cache_dir=_A , split=_A ).read() _check_json_dataset(_A , _A ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''' , [str, list] ) def __UpperCamelCase ( _A , _A , _A ): if issubclass(_A , _A ): lowerCAmelCase_ = jsonl_path elif issubclass(_A , _A ): lowerCAmelCase_ = [jsonl_path] lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase_ = JsonDatasetReader(_A , cache_dir=_A ).read() _check_json_dataset(_A , _A ) def __UpperCamelCase ( _A , _A , _A=("train",) ): assert isinstance(_A , _A ) for split in splits: lowerCAmelCase_ = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCAmelCase_ = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=_A , keep_in_memory=_A ).read() _check_json_datasetdict(_A , _A ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase_ = features.copy() if features else default_expected_features lowerCAmelCase_ = ( Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase_ = JsonDatasetReader({'''train''': jsonl_path} , features=_A , cache_dir=_A ).read() _check_json_datasetdict(_A , _A ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def __UpperCamelCase ( _A , _A , _A ): if split: lowerCAmelCase_ = {split: jsonl_path} else: lowerCAmelCase_ = '''train''' lowerCAmelCase_ = {'''train''': jsonl_path, '''test''': jsonl_path} lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase_ = JsonDatasetReader(_A , cache_dir=_A ).read() _check_json_datasetdict(_A , _A , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def __UpperCamelCase ( _A ): return json.load(_A ) def __UpperCamelCase ( _A ): return [json.loads(_A ) for line in buffer] class A : @pytest.mark.parametrize('''lines, load_json_function''', [(True, load_json_lines), (False, load_json)] ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ): """simple docstring""" with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, lines=UpperCamelCase__ ).write() buffer.seek(0 ) lowerCAmelCase_ = load_json_function(UpperCamelCase__ ) assert isinstance(UpperCamelCase__, UpperCamelCase__ ) assert isinstance(exported_content[0], UpperCamelCase__ ) assert len(UpperCamelCase__ ) == 10 @pytest.mark.parametrize( '''orient, container, keys, len_at''', [ ('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None), ('''split''', dict, {'''columns''', '''data'''}, '''data'''), ('''index''', dict, set('''0123456789''' ), None), ('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''), ('''values''', list, None, None), ('''table''', dict, {'''schema''', '''data'''}, '''data'''), ], ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ): """simple docstring""" with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, lines=UpperCamelCase__, orient=UpperCamelCase__ ).write() buffer.seek(0 ) lowerCAmelCase_ = load_json(UpperCamelCase__ ) assert isinstance(UpperCamelCase__, UpperCamelCase__ ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(UpperCamelCase__, '''keys''' ) and not hasattr(exported_content[0], '''keys''' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(UpperCamelCase__ ) == 10 @pytest.mark.parametrize('''lines, load_json_function''', [(True, load_json_lines), (False, load_json)] ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ): """simple docstring""" with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, lines=UpperCamelCase__, num_proc=2 ).write() buffer.seek(0 ) lowerCAmelCase_ = load_json_function(UpperCamelCase__ ) assert isinstance(UpperCamelCase__, UpperCamelCase__ ) assert isinstance(exported_content[0], UpperCamelCase__ ) assert len(UpperCamelCase__ ) == 10 @pytest.mark.parametrize( '''orient, container, keys, len_at''', [ ('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None), ('''split''', dict, {'''columns''', '''data'''}, '''data'''), ('''index''', dict, set('''0123456789''' ), None), ('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''), ('''values''', list, None, None), ('''table''', dict, {'''schema''', '''data'''}, '''data'''), ], ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ): """simple docstring""" with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, lines=UpperCamelCase__, orient=UpperCamelCase__, num_proc=2 ).write() buffer.seek(0 ) lowerCAmelCase_ = load_json(UpperCamelCase__ ) assert isinstance(UpperCamelCase__, UpperCamelCase__ ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(UpperCamelCase__, '''keys''' ) and not hasattr(exported_content[0], '''keys''' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(UpperCamelCase__ ) == 10 def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ): """simple docstring""" with pytest.raises(UpperCamelCase__ ): with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, num_proc=0 ) @pytest.mark.parametrize('''compression, extension''', [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ): """simple docstring""" lowerCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / f"test.json.{extension}" lowerCAmelCase_ = str(shared_datadir / f"test_file.json.{extension}" ) JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, compression=UpperCamelCase__ ).write() with fsspec.open(UpperCamelCase__, '''rb''', compression='''infer''' ) as f: lowerCAmelCase_ = f.read() with fsspec.open(UpperCamelCase__, '''rb''', compression='''infer''' ) as f: lowerCAmelCase_ = f.read() assert exported_content == original_content
278
0
import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: List[str]=1 ) -> str: '''simple docstring''' if n_shave_prefix_segments >= 0: return ".".join(path.split("." )[n_shave_prefix_segments:] ) else: return ".".join(path.split("." )[:n_shave_prefix_segments] ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] , SCREAMING_SNAKE_CASE_: Any=0 ) -> Dict: '''simple docstring''' A__ = [] for old_item in old_list: A__ = old_item.replace("in_layers.0" , "norm1" ) A__ = new_item.replace("in_layers.2" , "conv1" ) A__ = new_item.replace("out_layers.0" , "norm2" ) A__ = new_item.replace("out_layers.3" , "conv2" ) A__ = new_item.replace("emb_layers.1" , "time_emb_proj" ) A__ = new_item.replace("skip_connection" , "conv_shortcut" ) A__ = shave_segments(SCREAMING_SNAKE_CASE_ , n_shave_prefix_segments=SCREAMING_SNAKE_CASE_ ) mapping.append({"old": old_item, "new": new_item} ) return mapping def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] , SCREAMING_SNAKE_CASE_: Tuple=0 ) -> Dict: '''simple docstring''' A__ = [] for old_item in old_list: A__ = old_item A__ = new_item.replace("norm.weight" , "group_norm.weight" ) A__ = new_item.replace("norm.bias" , "group_norm.bias" ) A__ = new_item.replace("proj_out.weight" , "proj_attn.weight" ) A__ = new_item.replace("proj_out.bias" , "proj_attn.bias" ) A__ = shave_segments(SCREAMING_SNAKE_CASE_ , n_shave_prefix_segments=SCREAMING_SNAKE_CASE_ ) mapping.append({"old": old_item, "new": new_item} ) return mapping def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] , SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: int=None , SCREAMING_SNAKE_CASE_: Tuple=None , SCREAMING_SNAKE_CASE_: List[str]=None ) -> int: '''simple docstring''' assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): A__ = old_checkpoint[path] A__ = old_tensor.shape[0] // 3 A__ = (-1, channels) if len(old_tensor.shape ) == 3 else (-1) A__ = old_tensor.shape[0] // config["num_head_channels"] // 3 A__ = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) A__ , A__ , A__ = old_tensor.split(channels // num_heads , dim=1 ) A__ = query.reshape(SCREAMING_SNAKE_CASE_ ) A__ = key.reshape(SCREAMING_SNAKE_CASE_ ) A__ = value.reshape(SCREAMING_SNAKE_CASE_ ) for path in paths: A__ = path["new"] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here A__ = new_path.replace("middle_block.0" , "mid_block.resnets.0" ) A__ = new_path.replace("middle_block.1" , "mid_block.attentions.0" ) A__ = new_path.replace("middle_block.2" , "mid_block.resnets.1" ) if additional_replacements is not None: for replacement in additional_replacements: A__ = new_path.replace(replacement["old"] , replacement["new"] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: A__ = old_checkpoint[path["old"]][:, :, 0] else: A__ = old_checkpoint[path["old"]] def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: str ) -> Union[str, Any]: '''simple docstring''' A__ = {} A__ = checkpoint["time_embed.0.weight"] A__ = checkpoint["time_embed.0.bias"] A__ = checkpoint["time_embed.2.weight"] A__ = checkpoint["time_embed.2.bias"] A__ = checkpoint["input_blocks.0.0.weight"] A__ = checkpoint["input_blocks.0.0.bias"] A__ = checkpoint["out.0.weight"] A__ = checkpoint["out.0.bias"] A__ = checkpoint["out.2.weight"] A__ = checkpoint["out.2.bias"] # Retrieves the keys for the input blocks only A__ = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "input_blocks" in layer} ) A__ = { layer_id: [key for key in checkpoint if F'input_blocks.{layer_id}' in key] for layer_id in range(SCREAMING_SNAKE_CASE_ ) } # Retrieves the keys for the middle blocks only A__ = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "middle_block" in layer} ) A__ = { layer_id: [key for key in checkpoint if F'middle_block.{layer_id}' in key] for layer_id in range(SCREAMING_SNAKE_CASE_ ) } # Retrieves the keys for the output blocks only A__ = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "output_blocks" in layer} ) A__ = { layer_id: [key for key in checkpoint if F'output_blocks.{layer_id}' in key] for layer_id in range(SCREAMING_SNAKE_CASE_ ) } for i in range(1 , SCREAMING_SNAKE_CASE_ ): A__ = (i - 1) // (config["num_res_blocks"] + 1) A__ = (i - 1) % (config["num_res_blocks"] + 1) A__ = [key for key in input_blocks[i] if F'input_blocks.{i}.0' in key] A__ = [key for key in input_blocks[i] if F'input_blocks.{i}.1' in key] if F'input_blocks.{i}.0.op.weight' in checkpoint: A__ = checkpoint[ F'input_blocks.{i}.0.op.weight' ] A__ = checkpoint[ F'input_blocks.{i}.0.op.bias' ] continue A__ = renew_resnet_paths(SCREAMING_SNAKE_CASE_ ) A__ = {"old": F'input_blocks.{i}.0', "new": F'down_blocks.{block_id}.resnets.{layer_in_block_id}'} A__ = {"old": "resnets.2.op", "new": "downsamplers.0.op"} assign_to_checkpoint( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , additional_replacements=[meta_path, resnet_op] , config=SCREAMING_SNAKE_CASE_ ) if len(SCREAMING_SNAKE_CASE_ ): A__ = renew_attention_paths(SCREAMING_SNAKE_CASE_ ) A__ = { "old": F'input_blocks.{i}.1', "new": F'down_blocks.{block_id}.attentions.{layer_in_block_id}', } A__ = { F'input_blocks.{i}.1.qkv.bias': { "key": F'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias', "query": F'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias', "value": F'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias', }, F'input_blocks.{i}.1.qkv.weight': { "key": F'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight', "query": F'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight', "value": F'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight', }, } assign_to_checkpoint( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , additional_replacements=[meta_path] , attention_paths_to_split=SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ , ) A__ = middle_blocks[0] A__ = middle_blocks[1] A__ = middle_blocks[2] A__ = renew_resnet_paths(SCREAMING_SNAKE_CASE_ ) assign_to_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ ) A__ = renew_resnet_paths(SCREAMING_SNAKE_CASE_ ) assign_to_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ ) A__ = renew_attention_paths(SCREAMING_SNAKE_CASE_ ) A__ = { "middle_block.1.qkv.bias": { "key": "mid_block.attentions.0.key.bias", "query": "mid_block.attentions.0.query.bias", "value": "mid_block.attentions.0.value.bias", }, "middle_block.1.qkv.weight": { "key": "mid_block.attentions.0.key.weight", "query": "mid_block.attentions.0.query.weight", "value": "mid_block.attentions.0.value.weight", }, } assign_to_checkpoint( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , attention_paths_to_split=SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ ) for i in range(SCREAMING_SNAKE_CASE_ ): A__ = i // (config["num_res_blocks"] + 1) A__ = i % (config["num_res_blocks"] + 1) A__ = [shave_segments(SCREAMING_SNAKE_CASE_ , 2 ) for name in output_blocks[i]] A__ = {} for layer in output_block_layers: A__ , A__ = layer.split("." )[0], shave_segments(SCREAMING_SNAKE_CASE_ , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(SCREAMING_SNAKE_CASE_ ) else: A__ = [layer_name] if len(SCREAMING_SNAKE_CASE_ ) > 1: A__ = [key for key in output_blocks[i] if F'output_blocks.{i}.0' in key] A__ = [key for key in output_blocks[i] if F'output_blocks.{i}.1' in key] A__ = renew_resnet_paths(SCREAMING_SNAKE_CASE_ ) A__ = renew_resnet_paths(SCREAMING_SNAKE_CASE_ ) A__ = {"old": F'output_blocks.{i}.0', "new": F'up_blocks.{block_id}.resnets.{layer_in_block_id}'} assign_to_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , additional_replacements=[meta_path] , config=SCREAMING_SNAKE_CASE_ ) if ["conv.weight", "conv.bias"] in output_block_list.values(): A__ = list(output_block_list.values() ).index(["conv.weight", "conv.bias"] ) A__ = checkpoint[ F'output_blocks.{i}.{index}.conv.weight' ] A__ = checkpoint[ F'output_blocks.{i}.{index}.conv.bias' ] # Clear attentions as they have been attributed above. if len(SCREAMING_SNAKE_CASE_ ) == 2: A__ = [] if len(SCREAMING_SNAKE_CASE_ ): A__ = renew_attention_paths(SCREAMING_SNAKE_CASE_ ) A__ = { "old": F'output_blocks.{i}.1', "new": F'up_blocks.{block_id}.attentions.{layer_in_block_id}', } A__ = { F'output_blocks.{i}.1.qkv.bias': { "key": F'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias', "query": F'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias', "value": F'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias', }, F'output_blocks.{i}.1.qkv.weight': { "key": F'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight', "query": F'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight', "value": F'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight', }, } assign_to_checkpoint( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("qkv" in key for key in attentions ) else None , config=SCREAMING_SNAKE_CASE_ , ) else: A__ = renew_resnet_paths(SCREAMING_SNAKE_CASE_ , n_shave_prefix_segments=1 ) for path in resnet_0_paths: A__ = ".".join(["output_blocks", str(SCREAMING_SNAKE_CASE_ ), path["old"]] ) A__ = ".".join(["up_blocks", str(SCREAMING_SNAKE_CASE_ ), "resnets", str(SCREAMING_SNAKE_CASE_ ), path["new"]] ) A__ = checkpoint[old_path] return new_checkpoint if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() parser.add_argument( """--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert.""" ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help="""The config json file corresponding to the architecture.""", ) parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") lowerCAmelCase__ = parser.parse_args() lowerCAmelCase__ = torch.load(args.checkpoint_path) with open(args.config_file) as f: lowerCAmelCase__ = json.loads(f.read()) lowerCAmelCase__ = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] lowerCAmelCase__ = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: lowerCAmelCase__ = DDPMScheduler.from_config("""/""".join(args.checkpoint_path.split("""/""")[:-1])) lowerCAmelCase__ = VQModel.from_pretrained("""/""".join(args.checkpoint_path.split("""/""")[:-1])) lowerCAmelCase__ = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
68
import importlib import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Union import torch from ..utils import BaseOutput _A = '''scheduler_config.json''' class A ( __UpperCAmelCase ): __snake_case = 1 __snake_case = 2 __snake_case = 3 __snake_case = 4 __snake_case = 5 __snake_case = 6 __snake_case = 7 __snake_case = 8 __snake_case = 9 __snake_case = 10 __snake_case = 11 __snake_case = 12 __snake_case = 13 __snake_case = 14 @dataclass class A ( __UpperCAmelCase ): __snake_case = 42 class A : __snake_case = SCHEDULER_CONFIG_NAME __snake_case = [] __snake_case = True @classmethod def SCREAMING_SNAKE_CASE__ ( cls, UpperCamelCase__ = None, UpperCamelCase__ = None, UpperCamelCase__=False, **UpperCamelCase__, ): """simple docstring""" lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = cls.load_config( pretrained_model_name_or_path=UpperCamelCase__, subfolder=UpperCamelCase__, return_unused_kwargs=UpperCamelCase__, return_commit_hash=UpperCamelCase__, **UpperCamelCase__, ) return cls.from_config(UpperCamelCase__, return_unused_kwargs=UpperCamelCase__, **UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = False, **UpperCamelCase__ ): """simple docstring""" self.save_config(save_directory=UpperCamelCase__, push_to_hub=UpperCamelCase__, **UpperCamelCase__ ) @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return self._get_compatibles() @classmethod def SCREAMING_SNAKE_CASE__ ( cls ): """simple docstring""" lowerCAmelCase_ = list(set([cls.__name__] + cls._compatibles ) ) lowerCAmelCase_ = importlib.import_module(__name__.split('''.''' )[0] ) lowerCAmelCase_ = [ getattr(UpperCamelCase__, UpperCamelCase__ ) for c in compatible_classes_str if hasattr(UpperCamelCase__, UpperCamelCase__ ) ] return compatible_classes
278
0
"""simple docstring""" def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> list[str]: return [sentence[i : i + ngram_size] for i in range(len(UpperCAmelCase ) - ngram_size + 1 )] if __name__ == "__main__": from doctest import testmod testmod()
69
import unittest import numpy as np import torch from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class A ( unittest.TestCase ): @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCAmelCase_ = UNetaDModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=3, out_channels=3, down_block_types=('''DownBlock2D''', '''AttnDownBlock2D'''), up_block_types=('''AttnUpBlock2D''', '''UpBlock2D'''), ) return model def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.dummy_uncond_unet lowerCAmelCase_ = KarrasVeScheduler() lowerCAmelCase_ = KarrasVePipeline(unet=UpperCamelCase__, scheduler=UpperCamelCase__ ) pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowerCAmelCase_ = torch.manual_seed(0 ) lowerCAmelCase_ = pipe(num_inference_steps=2, generator=UpperCamelCase__, output_type='''numpy''' ).images lowerCAmelCase_ = torch.manual_seed(0 ) lowerCAmelCase_ = pipe(num_inference_steps=2, generator=UpperCamelCase__, output_type='''numpy''', return_dict=UpperCamelCase__ )[0] lowerCAmelCase_ = image[0, -3:, -3:, -1] lowerCAmelCase_ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCAmelCase_ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class A ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = '''google/ncsnpp-celebahq-256''' lowerCAmelCase_ = UNetaDModel.from_pretrained(UpperCamelCase__ ) lowerCAmelCase_ = KarrasVeScheduler() lowerCAmelCase_ = KarrasVePipeline(unet=UpperCamelCase__, scheduler=UpperCamelCase__ ) pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowerCAmelCase_ = torch.manual_seed(0 ) lowerCAmelCase_ = pipe(num_inference_steps=20, generator=UpperCamelCase__, output_type='''numpy''' ).images lowerCAmelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) lowerCAmelCase_ = np.array([0.578, 0.5_811, 0.5_924, 0.5_809, 0.587, 0.5_886, 0.5_861, 0.5_802, 0.586] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
278
0
'''simple docstring''' import warnings from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging A__ : Tuple =logging.get_logger(__name__) A__ : List[Any] ={ '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/config.json''', # See all BART models at https://huggingface.co/models?filter=bart } class UpperCAmelCase ( snake_case_ ): _lowercase: Any = '''bart''' _lowercase: Union[str, Any] = ['''past_key_values'''] _lowercase: str = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self : Tuple , __snake_case : List[str]=5_02_65 , __snake_case : List[str]=10_24 , __snake_case : Union[str, Any]=12 , __snake_case : Optional[Any]=40_96 , __snake_case : Tuple=16 , __snake_case : Optional[Any]=12 , __snake_case : List[Any]=40_96 , __snake_case : Dict=16 , __snake_case : Any=0.0 , __snake_case : List[str]=0.0 , __snake_case : Union[str, Any]="gelu" , __snake_case : str=10_24 , __snake_case : int=0.1 , __snake_case : int=0.0 , __snake_case : List[str]=0.0 , __snake_case : Union[str, Any]=0.02 , __snake_case : List[Any]=0.0 , __snake_case : Optional[int]=False , __snake_case : List[str]=True , __snake_case : Any=3 , __snake_case : List[Any]=1 , __snake_case : List[str]=0 , __snake_case : Optional[Any]=2 , __snake_case : int=True , __snake_case : List[str]=2 , __snake_case : Optional[int]=2 , **__snake_case : Union[str, Any] , ) -> Optional[Any]: _lowerCAmelCase = vocab_size _lowerCAmelCase = max_position_embeddings _lowerCAmelCase = d_model _lowerCAmelCase = encoder_ffn_dim _lowerCAmelCase = encoder_layers _lowerCAmelCase = encoder_attention_heads _lowerCAmelCase = decoder_ffn_dim _lowerCAmelCase = decoder_layers _lowerCAmelCase = decoder_attention_heads _lowerCAmelCase = dropout _lowerCAmelCase = attention_dropout _lowerCAmelCase = activation_dropout _lowerCAmelCase = activation_function _lowerCAmelCase = init_std _lowerCAmelCase = encoder_layerdrop _lowerCAmelCase = decoder_layerdrop _lowerCAmelCase = classifier_dropout _lowerCAmelCase = use_cache _lowerCAmelCase = encoder_layers _lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( num_labels=__snake_case , pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , decoder_start_token_id=__snake_case , forced_eos_token_id=__snake_case , **__snake_case , ) # ensure backward compatibility for BART CNN models if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , __snake_case ): _lowerCAmelCase = self.bos_token_id warnings.warn( f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. " """The config can simply be saved and uploaded again to be fixed.""" ) class UpperCAmelCase ( snake_case_ ): @property def lowercase__ ( self : Dict ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: _lowerCAmelCase = OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ] ) if self.use_past: _lowerCAmelCase = {0: """batch"""} _lowerCAmelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""} else: _lowerCAmelCase = {0: """batch""", 1: """decoder_sequence"""} _lowerCAmelCase = {0: """batch""", 1: """decoder_sequence"""} if self.use_past: self.fill_with_past_key_values_(__snake_case , direction="""inputs""" ) elif self.task == "causal-lm": # TODO: figure this case out. _lowerCAmelCase = OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ] ) if self.use_past: _lowerCAmelCase , _lowerCAmelCase = self.num_layers for i in range(__snake_case ): _lowerCAmelCase = {0: """batch""", 2: """past_sequence + sequence"""} _lowerCAmelCase = {0: """batch""", 2: """past_sequence + sequence"""} else: _lowerCAmelCase = OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}), ("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}), ] ) return common_inputs @property def lowercase__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: _lowerCAmelCase = super().outputs else: _lowerCAmelCase = super(__snake_case , self ).outputs if self.use_past: _lowerCAmelCase , _lowerCAmelCase = self.num_layers for i in range(__snake_case ): _lowerCAmelCase = {0: """batch""", 2: """past_sequence + sequence"""} _lowerCAmelCase = {0: """batch""", 2: """past_sequence + sequence"""} return common_outputs def lowercase__ ( self : Optional[int] , __snake_case : PreTrainedTokenizer , __snake_case : int = -1 , __snake_case : int = -1 , __snake_case : bool = False , __snake_case : Optional[TensorType] = None , ) -> Mapping[str, Any]: _lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) # Generate decoder inputs _lowerCAmelCase = seq_length if not self.use_past else 1 _lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) _lowerCAmelCase = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()} _lowerCAmelCase = dict(**__snake_case , **__snake_case ) if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch _lowerCAmelCase , _lowerCAmelCase = common_inputs["""input_ids"""].shape _lowerCAmelCase = common_inputs["""decoder_input_ids"""].shape[1] _lowerCAmelCase , _lowerCAmelCase = self.num_attention_heads _lowerCAmelCase = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) _lowerCAmelCase = decoder_seq_length + 3 _lowerCAmelCase = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) _lowerCAmelCase = torch.cat( [common_inputs["""decoder_attention_mask"""], torch.ones(__snake_case , __snake_case )] , dim=1 ) _lowerCAmelCase = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered _lowerCAmelCase , _lowerCAmelCase = self.num_layers _lowerCAmelCase = min(__snake_case , __snake_case ) _lowerCAmelCase = max(__snake_case , __snake_case ) - min_num_layers _lowerCAmelCase = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder""" for _ in range(__snake_case ): common_inputs["past_key_values"].append( ( torch.zeros(__snake_case ), torch.zeros(__snake_case ), torch.zeros(__snake_case ), torch.zeros(__snake_case ), ) ) # TODO: test this. _lowerCAmelCase = encoder_shape if remaining_side_name == """encoder""" else decoder_shape for _ in range(__snake_case , __snake_case ): common_inputs["past_key_values"].append((torch.zeros(__snake_case ), torch.zeros(__snake_case )) ) return common_inputs def lowercase__ ( self : Tuple , __snake_case : PreTrainedTokenizer , __snake_case : int = -1 , __snake_case : int = -1 , __snake_case : bool = False , __snake_case : Optional[TensorType] = None , ) -> Mapping[str, Any]: _lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch _lowerCAmelCase , _lowerCAmelCase = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values _lowerCAmelCase = seqlen + 2 _lowerCAmelCase , _lowerCAmelCase = self.num_layers _lowerCAmelCase , _lowerCAmelCase = self.num_attention_heads _lowerCAmelCase = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) _lowerCAmelCase = common_inputs["""attention_mask"""].dtype _lowerCAmelCase = torch.cat( [common_inputs["""attention_mask"""], torch.ones(__snake_case , __snake_case , dtype=__snake_case )] , dim=1 ) _lowerCAmelCase = [ (torch.zeros(__snake_case ), torch.zeros(__snake_case )) for _ in range(__snake_case ) ] return common_inputs def lowercase__ ( self : Any , __snake_case : PreTrainedTokenizer , __snake_case : int = -1 , __snake_case : int = -1 , __snake_case : bool = False , __snake_case : Optional[TensorType] = None , ) -> Mapping[str, Any]: # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX _lowerCAmelCase = compute_effective_axis_dimension( __snake_case , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX _lowerCAmelCase = tokenizer.num_special_tokens_to_add(__snake_case ) _lowerCAmelCase = compute_effective_axis_dimension( __snake_case , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__snake_case ) # Generate dummy inputs according to compute batch and sequence _lowerCAmelCase = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size _lowerCAmelCase = dict(tokenizer(__snake_case , return_tensors=__snake_case ) ) return common_inputs def lowercase__ ( self : Dict , __snake_case : PreTrainedTokenizer , __snake_case : int = -1 , __snake_case : int = -1 , __snake_case : bool = False , __snake_case : Optional[TensorType] = None , ) -> Mapping[str, Any]: if self.task in ["default", "seq2seq-lm"]: _lowerCAmelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm( __snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case ) elif self.task == "causal-lm": _lowerCAmelCase = self._generate_dummy_inputs_for_causal_lm( __snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case ) else: _lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case ) return common_inputs def lowercase__ ( self : Union[str, Any] , __snake_case : List[Any] , __snake_case : str , __snake_case : Optional[Any] , __snake_case : int ) -> List[Any]: if self.task in ["default", "seq2seq-lm"]: _lowerCAmelCase = super()._flatten_past_key_values_(__snake_case , __snake_case , __snake_case , __snake_case ) else: _lowerCAmelCase = super(__snake_case , self )._flatten_past_key_values_( __snake_case , __snake_case , __snake_case , __snake_case )
70
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
278
0
import argparse import os from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_task_guides.py A_ :Tuple = '''src/transformers''' A_ :str = '''docs/source/en/tasks''' def A ( a_ ,a_ ,a_ ) -> Optional[int]: with open(a_ ,'r' ,encoding='utf-8' ,newline='\n' ) as f: __UpperCamelCase : Optional[int] =f.readlines() # Find the start prompt. __UpperCamelCase : Optional[Any] =0 while not lines[start_index].startswith(a_ ): start_index += 1 start_index += 1 __UpperCamelCase : Any =start_index while not lines[end_index].startswith(a_ ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # This is to make sure the transformers module imported is the one in the repo. A_ :int = direct_transformers_import(TRANSFORMERS_PATH) A_ :Union[str, Any] = { '''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES, '''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, '''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, '''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, '''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES, '''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, '''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, '''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, '''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, '''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, '''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, '''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, '''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, '''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, '''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, '''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES, } # This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any # `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`). A_ :Optional[Any] = { '''summarization.md''': ('''nllb''',), '''translation.md''': ('''nllb''',), } def A ( a_ ) -> Tuple: __UpperCamelCase : int =TASK_GUIDE_TO_MODELS[task_guide] __UpperCamelCase : Dict =SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(a_ ,set() ) __UpperCamelCase : List[Any] ={ code: name for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if (code in model_maping_names or code in special_model_types) } return ", ".join([F'[{name}](../model_doc/{code})' for code, name in model_names.items()] ) + "\n" def A ( a_ ,a_=False ) -> Optional[int]: __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Tuple =_find_text_in_file( filename=os.path.join(a_ ,a_ ) ,start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' ,end_prompt='<!--End of the generated tip-->' ,) __UpperCamelCase : Optional[Any] =get_model_list_for_task(a_ ) if current_list != new_list: if overwrite: with open(os.path.join(a_ ,a_ ) ,'w' ,encoding='utf-8' ,newline='\n' ) as f: f.writelines(lines[:start_index] + [new_list] + lines[end_index:] ) else: raise ValueError( F'The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`' ' to fix this.' ) if __name__ == "__main__": A_ :List[Any] = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') A_ :int = parser.parse_args() for task_guide in TASK_GUIDE_TO_MODELS.keys(): check_model_list_for_task(task_guide, args.fix_and_overwrite)
71
import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def __UpperCamelCase ( _A = 3 ): if isinstance(_A , _A ): raise TypeError('''number of qubits must be a integer.''' ) if number_of_qubits <= 0: raise ValueError('''number of qubits must be > 0.''' ) if math.floor(_A ) != number_of_qubits: raise ValueError('''number of qubits must be exact integer.''' ) if number_of_qubits > 10: raise ValueError('''number of qubits too large to simulate(>10).''' ) lowerCAmelCase_ = QuantumRegister(_A , '''qr''' ) lowerCAmelCase_ = ClassicalRegister(_A , '''cr''' ) lowerCAmelCase_ = QuantumCircuit(_A , _A ) lowerCAmelCase_ = number_of_qubits for i in range(_A ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(_A ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , _A , _A ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(_A , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(_A , _A ) # simulate with 10000 shots lowerCAmelCase_ = Aer.get_backend('''qasm_simulator''' ) lowerCAmelCase_ = execute(_A , _A , shots=10000 ) return job.result().get_counts(_A ) if __name__ == "__main__": print( f"Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}" )
278
0
"""simple docstring""" import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''') if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right lowerCAmelCase__ = 250004 lowerCAmelCase__ = 250020 @require_sentencepiece @require_tokenizers class __snake_case ( _lowercase , unittest.TestCase): snake_case__ : List[str] = MBartaaTokenizer snake_case__ : Tuple = MBartaaTokenizerFast snake_case__ : Any = True snake_case__ : Optional[Any] = True def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing _lowerCamelCase : str = MBartaaTokenizer(__lowerCAmelCase , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=__lowerCAmelCase ) tokenizer.save_pretrained(self.tmpdirname ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : Optional[Any] = '''<s>''' _lowerCamelCase : Tuple = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase ) , __lowerCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase ) , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" _lowerCamelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(vocab_keys[-1] , '''<mask>''' ) self.assertEqual(len(__lowerCAmelCase ) , 1_0_5_4 ) def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1_0_5_4 ) def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase : str = MBartaaTokenizer(__lowerCAmelCase , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=__lowerCAmelCase ) _lowerCamelCase : Optional[int] = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(__lowerCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , ) _lowerCamelCase : Tuple = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( __lowerCAmelCase , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , ) _lowerCamelCase : Union[str, Any] = tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) self.assertListEqual( __lowerCAmelCase , [ value + tokenizer.fairseq_offset for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4] ] , ) _lowerCamelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(__lowerCAmelCase ) self.assertListEqual( __lowerCAmelCase , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , ) @slow def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase : Any = {'''input_ids''': [[2_5_0_0_0_4, 1_1_0_6_2, 8_2_7_7_2, 7, 1_5, 8_2_7_7_2, 5_3_8, 5_1_5_2_9, 2_3_7, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 2_1_5_1_7_5, 1_3_1_4, 1_3_6, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 5_6_3_5_9, 4_2, 1_2_2_0_0_9, 9, 1_6_4_6_6, 1_6, 8_7_3_4_4, 4_5_3_7, 9, 4_7_1_7, 7_8_3_8_1, 6, 1_5_9_9_5_8, 7, 1_5, 2_4_4_8_0, 6_1_8, 4, 5_2_7, 2_2_6_9_3, 5_4_2_8, 4, 2_7_7_7, 2_4_4_8_0, 9_8_7_4, 4, 4_3_5_2_3, 5_9_4, 4, 8_0_3, 1_8_3_9_2, 3_3_1_8_9, 1_8, 4, 4_3_5_2_3, 2_4_4_4_7, 1_2_3_9_9, 1_0_0, 2_4_9_5_5, 8_3_6_5_8, 9_6_2_6, 1_4_4_0_5_7, 1_5, 8_3_9, 2_2_3_3_5, 1_6, 1_3_6, 2_4_9_5_5, 8_3_6_5_8, 8_3_4_7_9, 1_5, 3_9_1_0_2, 7_2_4, 1_6, 6_7_8, 6_4_5, 2_7_8_9, 1_3_2_8, 4_5_8_9, 4_2, 1_2_2_0_0_9, 1_1_5_7_7_4, 2_3, 8_0_5, 1_3_2_8, 4_6_8_7_6, 7, 1_3_6, 5_3_8_9_4, 1_9_4_0, 4_2_2_2_7, 4_1_1_5_9, 1_7_7_2_1, 8_2_3, 4_2_5, 4, 2_7_5_1_2, 9_8_7_2_2, 2_0_6, 1_3_6, 5_5_3_1, 4_9_7_0, 9_1_9, 1_7_3_3_6, 5, 2], [2_5_0_0_0_4, 2_0_0_8_0, 6_1_8, 8_3, 8_2_7_7_5, 4_7, 4_7_9, 9, 1_5_1_7, 7_3, 5_3_8_9_4, 3_3_3, 8_0_5_8_1, 1_1_0_1_1_7, 1_8_8_1_1, 5_2_5_6, 1_2_9_5, 5_1, 1_5_2_5_2_6, 2_9_7, 7_9_8_6, 3_9_0, 1_2_4_4_1_6, 5_3_8, 3_5_4_3_1, 2_1_4, 9_8, 1_5_0_4_4, 2_5_7_3_7, 1_3_6, 7_1_0_8, 4_3_7_0_1, 2_3, 7_5_6, 1_3_5_3_5_5, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_5_0_0_0_4, 5_8_1, 6_3_7_7_3, 1_1_9_4_5_5, 6, 1_4_7_7_9_7, 8_8_2_0_3, 7, 6_4_5, 7_0, 2_1, 3_2_8_5, 1_0_2_6_9, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__lowerCAmelCase , model_name='''facebook/mbart-large-50''' , revision='''d3913889c59cd5c9e456b269c376325eabad57e2''' , ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return _lowerCamelCase : Dict = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart50''', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): _lowerCamelCase : Tuple = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase ) _lowerCamelCase : Dict = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase ) _lowerCamelCase : int = tempfile.mkdtemp() _lowerCamelCase : Union[str, Any] = tokenizer_r.save_pretrained(__lowerCAmelCase ) _lowerCamelCase : Optional[int] = tokenizer_p.save_pretrained(__lowerCAmelCase ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) _lowerCamelCase : List[str] = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f ) self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase ) # Checks everything loads correctly in the same way _lowerCamelCase : int = tokenizer_r.from_pretrained(__lowerCAmelCase ) _lowerCamelCase : Tuple = tokenizer_p.from_pretrained(__lowerCAmelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(__lowerCAmelCase ) # Save tokenizer rust, legacy_format=True _lowerCamelCase : int = tempfile.mkdtemp() _lowerCamelCase : List[str] = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase ) _lowerCamelCase : Tuple = tokenizer_p.save_pretrained(__lowerCAmelCase ) # Checks it save with the same files self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase ) # Checks everything loads correctly in the same way _lowerCamelCase : List[Any] = tokenizer_r.from_pretrained(__lowerCAmelCase ) _lowerCamelCase : Any = tokenizer_p.from_pretrained(__lowerCAmelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase ) ) shutil.rmtree(__lowerCAmelCase ) # Save tokenizer rust, legacy_format=False _lowerCamelCase : Union[str, Any] = tempfile.mkdtemp() _lowerCamelCase : Optional[Any] = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = tokenizer_p.save_pretrained(__lowerCAmelCase ) # Checks it saved the tokenizer.json file self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way _lowerCamelCase : Dict = tokenizer_r.from_pretrained(__lowerCAmelCase ) _lowerCamelCase : Dict = tokenizer_p.from_pretrained(__lowerCAmelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase ) ) shutil.rmtree(__lowerCAmelCase ) @require_torch @require_sentencepiece @require_tokenizers class __snake_case ( unittest.TestCase): snake_case__ : int = "facebook/mbart-large-50-one-to-many-mmt" snake_case__ : Optional[int] = [ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.", ] snake_case__ : Optional[Any] = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei" " pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor" " face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] snake_case__ : int = [EN_CODE, 8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2] @classmethod def SCREAMING_SNAKE_CASE ( cls : Dict ): """simple docstring""" _lowerCamelCase : MBartaaTokenizer = MBartaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' ) _lowerCamelCase : Any = 1 return cls def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 2_5_0_0_0_1 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 2_5_0_0_0_4 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 2_5_0_0_2_0 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''mr_IN'''] , 2_5_0_0_3_8 ) def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase : Optional[int] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" self.assertIn(__lowerCAmelCase , self.tokenizer.all_special_ids ) _lowerCamelCase : Optional[Any] = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2] _lowerCamelCase : Optional[Any] = self.tokenizer.decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase ) _lowerCamelCase : Any = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) self.assertNotIn(self.tokenizer.eos_token , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : Optional[Any] = ['''this is gunna be a long sentence ''' * 2_0] assert isinstance(src_text[0] , __lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = 1_0 _lowerCamelCase : Any = self.tokenizer(__lowerCAmelCase , max_length=__lowerCAmelCase , truncation=__lowerCAmelCase ).input_ids[0] self.assertEqual(ids[0] , __lowerCAmelCase ) self.assertEqual(ids[-1] , 2 ) self.assertEqual(len(__lowerCAmelCase ) , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [2_5_0_0_5_3, 2_5_0_0_0_1] ) def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" _lowerCamelCase : str = tempfile.mkdtemp() _lowerCamelCase : Tuple = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(__lowerCAmelCase ) _lowerCamelCase : Any = MBartaaTokenizer.from_pretrained(__lowerCAmelCase ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __lowerCAmelCase ) @require_torch def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" _lowerCamelCase : Tuple = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__lowerCAmelCase , return_tensors='''pt''' ) _lowerCamelCase : Optional[Any] = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == RO_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE] @require_torch def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" _lowerCamelCase : Tuple = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , ) _lowerCamelCase : Any = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) self.assertEqual((2, 1_4) , batch.input_ids.shape ) self.assertEqual((2, 1_4) , batch.attention_mask.shape ) _lowerCamelCase : str = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , __lowerCAmelCase ) self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" _lowerCamelCase : List[Any] = self.tokenizer(self.src_text , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=3 , return_tensors='''pt''' ) _lowerCamelCase : Optional[Any] = self.tokenizer( text_target=self.tgt_text , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=1_0 , return_tensors='''pt''' ) _lowerCamelCase : List[Any] = targets['''input_ids'''] _lowerCamelCase : int = shift_tokens_right(__lowerCAmelCase , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 ) @require_torch def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" _lowerCamelCase : Union[str, Any] = self.tokenizer._build_translation_inputs( '''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , { # en_XX, A, test, EOS '''input_ids''': [[2_5_0_0_0_4, 6_2, 3_0_3_4, 2]], '''attention_mask''': [[1, 1, 1, 1]], # ar_AR '''forced_bos_token_id''': 2_5_0_0_0_1, } , )
72
from functools import lru_cache @lru_cache def __UpperCamelCase ( _A ): if num < 0: raise ValueError('''Number should not be negative.''' ) return 1 if num in (0, 1) else num * factorial(num - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
278
0
import argparse import requests import torch from PIL import Image from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[int]: if "cls_token" in name: __lowerCamelCase : Dict = name.replace('cls_token' , 'vit.embeddings.cls_token' ) if "mask_token" in name: __lowerCamelCase : Any = name.replace('mask_token' , 'decoder.mask_token' ) if "decoder_pos_embed" in name: __lowerCamelCase : Optional[Any] = name.replace('decoder_pos_embed' , 'decoder.decoder_pos_embed' ) if "pos_embed" in name and "decoder" not in name: __lowerCamelCase : Dict = name.replace('pos_embed' , 'vit.embeddings.position_embeddings' ) if "patch_embed.proj" in name: __lowerCamelCase : Tuple = name.replace('patch_embed.proj' , 'vit.embeddings.patch_embeddings.projection' ) if "patch_embed.norm" in name: __lowerCamelCase : Optional[int] = name.replace('patch_embed.norm' , 'vit.embeddings.norm' ) if "decoder_blocks" in name: __lowerCamelCase : Optional[Any] = name.replace('decoder_blocks' , 'decoder.decoder_layers' ) if "blocks" in name: __lowerCamelCase : int = name.replace('blocks' , 'vit.encoder.layer' ) if "attn.proj" in name: __lowerCamelCase : str = name.replace('attn.proj' , 'attention.output.dense' ) if "attn" in name: __lowerCamelCase : Any = name.replace('attn' , 'attention.self' ) if "norm1" in name: __lowerCamelCase : Tuple = name.replace('norm1' , 'layernorm_before' ) if "norm2" in name: __lowerCamelCase : Dict = name.replace('norm2' , 'layernorm_after' ) if "mlp.fc1" in name: __lowerCamelCase : Tuple = name.replace('mlp.fc1' , 'intermediate.dense' ) if "mlp.fc2" in name: __lowerCamelCase : Any = name.replace('mlp.fc2' , 'output.dense' ) if "decoder_embed" in name: __lowerCamelCase : List[Any] = name.replace('decoder_embed' , 'decoder.decoder_embed' ) if "decoder_norm" in name: __lowerCamelCase : Dict = name.replace('decoder_norm' , 'decoder.decoder_norm' ) if "decoder_pred" in name: __lowerCamelCase : List[Any] = name.replace('decoder_pred' , 'decoder.decoder_pred' ) if "norm.weight" in name and "decoder" not in name: __lowerCamelCase : Optional[Any] = name.replace('norm.weight' , 'vit.layernorm.weight' ) if "norm.bias" in name and "decoder" not in name: __lowerCamelCase : str = name.replace('norm.bias' , 'vit.layernorm.bias' ) return name def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]: for key in orig_state_dict.copy().keys(): __lowerCamelCase : List[Any] = orig_state_dict.pop(lowerCamelCase__ ) if "qkv" in key: __lowerCamelCase : Tuple = key.split('.' ) __lowerCamelCase : Tuple = int(key_split[1] ) if "decoder_blocks" in key: __lowerCamelCase : int = config.decoder_hidden_size __lowerCamelCase : Dict = 'decoder.decoder_layers.' if "weight" in key: __lowerCamelCase : int = val[:dim, :] __lowerCamelCase : str = val[dim : dim * 2, :] __lowerCamelCase : Optional[Any] = val[-dim:, :] elif "bias" in key: __lowerCamelCase : Union[str, Any] = val[:dim] __lowerCamelCase : Optional[int] = val[dim : dim * 2] __lowerCamelCase : Dict = val[-dim:] else: __lowerCamelCase : List[Any] = config.hidden_size __lowerCamelCase : Tuple = 'vit.encoder.layer.' if "weight" in key: __lowerCamelCase : Dict = val[:dim, :] __lowerCamelCase : Tuple = val[dim : dim * 2, :] __lowerCamelCase : Union[str, Any] = val[-dim:, :] elif "bias" in key: __lowerCamelCase : Optional[Any] = val[:dim] __lowerCamelCase : Dict = val[dim : dim * 2] __lowerCamelCase : List[Any] = val[-dim:] else: __lowerCamelCase : Optional[Any] = val return orig_state_dict def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Any: __lowerCamelCase : List[Any] = ViTMAEConfig() if "large" in checkpoint_url: __lowerCamelCase : Tuple = 1_0_2_4 __lowerCamelCase : Optional[Any] = 4_0_9_6 __lowerCamelCase : List[Any] = 2_4 __lowerCamelCase : Tuple = 1_6 elif "huge" in checkpoint_url: __lowerCamelCase : int = 1_4 __lowerCamelCase : Any = 1_2_8_0 __lowerCamelCase : Dict = 5_1_2_0 __lowerCamelCase : Tuple = 3_2 __lowerCamelCase : Tuple = 1_6 __lowerCamelCase : Tuple = ViTMAEForPreTraining(lowerCamelCase__ ) __lowerCamelCase : Tuple = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location='cpu' )['model'] __lowerCamelCase : str = ViTMAEImageProcessor(size=config.image_size ) __lowerCamelCase : List[Any] = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ ) model.load_state_dict(lowerCamelCase__ ) model.eval() __lowerCamelCase : Dict = 'https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg' __lowerCamelCase : Optional[Any] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ) __lowerCamelCase : str = ViTMAEImageProcessor(size=config.image_size ) __lowerCamelCase : Union[str, Any] = image_processor(images=lowerCamelCase__ , return_tensors='pt' ) # forward pass torch.manual_seed(2 ) __lowerCamelCase : Tuple = model(**lowerCamelCase__ ) __lowerCamelCase : Optional[int] = outputs.logits if "large" in checkpoint_url: __lowerCamelCase : Optional[Any] = torch.tensor( [[-0.7309, -0.7128, -1.0169], [-1.0161, -0.9058, -1.1878], [-1.0478, -0.9411, -1.1911]] ) elif "huge" in checkpoint_url: __lowerCamelCase : Optional[int] = torch.tensor( [[-1.1599, -0.9199, -1.2221], [-1.1952, -0.9269, -1.2307], [-1.2143, -0.9337, -1.2262]] ) else: __lowerCamelCase : List[Any] = torch.tensor( [[-0.9192, -0.8481, -1.1259], [-1.1349, -1.0034, -1.2599], [-1.1757, -1.0429, -1.2726]] ) # verify logits assert torch.allclose(logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 ) print(F"Saving model to {pytorch_dump_folder_path}" ) model.save_pretrained(lowerCamelCase__ ) print(F"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(lowerCamelCase__ ) if __name__ == "__main__": a =argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint_url""", default="""https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth""", type=str, help="""URL of the checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) a =parser.parse_args() convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
73
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation def __UpperCamelCase ( _A ): lowerCAmelCase_ = 384 lowerCAmelCase_ = 7 if "tiny" in model_name: lowerCAmelCase_ = 96 lowerCAmelCase_ = (2, 2, 6, 2) lowerCAmelCase_ = (3, 6, 12, 24) elif "small" in model_name: lowerCAmelCase_ = 96 lowerCAmelCase_ = (2, 2, 18, 2) lowerCAmelCase_ = (3, 6, 12, 24) elif "base" in model_name: lowerCAmelCase_ = 128 lowerCAmelCase_ = (2, 2, 18, 2) lowerCAmelCase_ = (4, 8, 16, 32) lowerCAmelCase_ = 12 lowerCAmelCase_ = 512 elif "large" in model_name: lowerCAmelCase_ = 192 lowerCAmelCase_ = (2, 2, 18, 2) lowerCAmelCase_ = (6, 12, 24, 48) lowerCAmelCase_ = 12 lowerCAmelCase_ = 768 # set label information lowerCAmelCase_ = 150 lowerCAmelCase_ = '''huggingface/label-files''' lowerCAmelCase_ = '''ade20k-id2label.json''' lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) ) lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()} lowerCAmelCase_ = {v: k for k, v in idalabel.items()} lowerCAmelCase_ = SwinConfig( embed_dim=_A , depths=_A , num_heads=_A , window_size=_A , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , ) lowerCAmelCase_ = UperNetConfig( backbone_config=_A , auxiliary_in_channels=_A , num_labels=_A , idalabel=_A , labelaid=_A , ) return config def __UpperCamelCase ( _A ): lowerCAmelCase_ = [] # fmt: off # stem rename_keys.append(('''backbone.patch_embed.projection.weight''', '''backbone.embeddings.patch_embeddings.projection.weight''') ) rename_keys.append(('''backbone.patch_embed.projection.bias''', '''backbone.embeddings.patch_embeddings.projection.bias''') ) rename_keys.append(('''backbone.patch_embed.norm.weight''', '''backbone.embeddings.norm.weight''') ) rename_keys.append(('''backbone.patch_embed.norm.bias''', '''backbone.embeddings.norm.bias''') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm1.weight", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm1.bias", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight", f"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias", f"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm2.weight", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm2.bias", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight", f"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias", f"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight", f"backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias", f"backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias") ) if i < 3: rename_keys.append((f"backbone.stages.{i}.downsample.reduction.weight", f"backbone.encoder.layers.{i}.downsample.reduction.weight") ) rename_keys.append((f"backbone.stages.{i}.downsample.norm.weight", f"backbone.encoder.layers.{i}.downsample.norm.weight") ) rename_keys.append((f"backbone.stages.{i}.downsample.norm.bias", f"backbone.encoder.layers.{i}.downsample.norm.bias") ) rename_keys.append((f"backbone.norm{i}.weight", f"backbone.hidden_states_norms.stage{i+1}.weight") ) rename_keys.append((f"backbone.norm{i}.bias", f"backbone.hidden_states_norms.stage{i+1}.bias") ) # decode head rename_keys.extend( [ ('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''), ('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''), ('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''), ('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''), ] ) # fmt: on return rename_keys def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = dct.pop(_A ) lowerCAmelCase_ = val def __UpperCamelCase ( _A , _A ): lowerCAmelCase_ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): lowerCAmelCase_ = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) lowerCAmelCase_ = state_dict.pop(f"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight" ) lowerCAmelCase_ = state_dict.pop(f"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict lowerCAmelCase_ = in_proj_weight[:dim, :] lowerCAmelCase_ = in_proj_bias[: dim] lowerCAmelCase_ = in_proj_weight[ dim : dim * 2, : ] lowerCAmelCase_ = in_proj_bias[ dim : dim * 2 ] lowerCAmelCase_ = in_proj_weight[ -dim :, : ] lowerCAmelCase_ = in_proj_bias[-dim :] # fmt: on def __UpperCamelCase ( _A ): lowerCAmelCase_ , lowerCAmelCase_ = x.shape lowerCAmelCase_ = x.reshape(_A , 4 , in_channel // 4 ) lowerCAmelCase_ = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(_A , _A ) return x def __UpperCamelCase ( _A ): lowerCAmelCase_ , lowerCAmelCase_ = x.shape lowerCAmelCase_ = x.reshape(_A , in_channel // 4 , 4 ) lowerCAmelCase_ = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(_A , _A ) return x def __UpperCamelCase ( _A ): lowerCAmelCase_ = x.shape[0] lowerCAmelCase_ = x.reshape(4 , in_channel // 4 ) lowerCAmelCase_ = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(_A ) return x def __UpperCamelCase ( _A ): lowerCAmelCase_ = x.shape[0] lowerCAmelCase_ = x.reshape(in_channel // 4 , 4 ) lowerCAmelCase_ = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(_A ) return x def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = { '''upernet-swin-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth''', '''upernet-swin-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth''', '''upernet-swin-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth''', '''upernet-swin-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth''', } lowerCAmelCase_ = model_name_to_url[model_name] lowerCAmelCase_ = torch.hub.load_state_dict_from_url(_A , map_location='''cpu''' , file_name=_A )[ '''state_dict''' ] for name, param in state_dict.items(): print(_A , param.shape ) lowerCAmelCase_ = get_upernet_config(_A ) lowerCAmelCase_ = UperNetForSemanticSegmentation(_A ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): lowerCAmelCase_ = state_dict.pop(_A ) if "bn" in key: lowerCAmelCase_ = key.replace('''bn''' , '''batch_norm''' ) lowerCAmelCase_ = val # rename keys lowerCAmelCase_ = create_rename_keys(_A ) for src, dest in rename_keys: rename_key(_A , _A , _A ) read_in_q_k_v(_A , config.backbone_config ) # fix downsample parameters for key, value in state_dict.items(): if "downsample" in key: if "reduction" in key: lowerCAmelCase_ = reverse_correct_unfold_reduction_order(_A ) if "norm" in key: lowerCAmelCase_ = reverse_correct_unfold_norm_order(_A ) model.load_state_dict(_A ) # verify on image lowerCAmelCase_ = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg''' lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw ).convert('''RGB''' ) lowerCAmelCase_ = SegformerImageProcessor() lowerCAmelCase_ = processor(_A , return_tensors='''pt''' ).pixel_values with torch.no_grad(): lowerCAmelCase_ = model(_A ) lowerCAmelCase_ = outputs.logits print(logits.shape ) print('''First values of logits:''' , logits[0, 0, :3, :3] ) # assert values if model_name == "upernet-swin-tiny": lowerCAmelCase_ = torch.tensor( [[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] ) elif model_name == "upernet-swin-small": lowerCAmelCase_ = torch.tensor( [[-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.0_9_0_8, -7.0_9_0_8, -6.8_5_3_4]] ) elif model_name == "upernet-swin-base": lowerCAmelCase_ = torch.tensor( [[-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.4_7_6_3, -6.4_7_6_3, -6.3_2_5_4]] ) elif model_name == "upernet-swin-large": lowerCAmelCase_ = torch.tensor( [[-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.4_0_4_4, -7.4_0_4_4, -7.2_5_8_6]] ) print('''Logits:''' , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , _A , atol=1E-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(f"Saving model {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_A ) print(f"Saving processor to {pytorch_dump_folder_path}" ) processor.save_pretrained(_A ) if push_to_hub: print(f"Pushing model and processor for {model_name} to hub" ) model.push_to_hub(f"openmmlab/{model_name}" ) processor.push_to_hub(f"openmmlab/{model_name}" ) if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''upernet-swin-tiny''', type=str, choices=[f"upernet-swin-{size}" for size in ['''tiny''', '''small''', '''base''', '''large''']], help='''Name of the Swin + UperNet model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) _A = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
278
0
"""simple docstring""" _lowercase = '''Tobias Carryer''' from time import time class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Optional[Any] ,A_ : Optional[int] ,A_ : Any ,A_ : Union[str, Any] ,A_ : Dict=int(time() ) ) -> str: # noqa: B008 A = multiplier A = increment A = modulo A = seed def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]: A = (self.multiplier * self.seed + self.increment) % self.modulo return self.seed if __name__ == "__main__": # Show the LCG in action. _lowercase = LinearCongruentialGenerator(1_66_45_25, 10_13_90_42_23, 2 << 31) while True: print(lcg.next_number())
74
import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def __UpperCamelCase ( _A , _A ): lowerCAmelCase_ = args.log_outputs lowerCAmelCase_ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] ) # load metric lowerCAmelCase_ = load_metric('''wer''' ) lowerCAmelCase_ = load_metric('''cer''' ) # compute metrics lowerCAmelCase_ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] ) lowerCAmelCase_ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] ) # print & log results lowerCAmelCase_ = f"WER: {wer_result}\nCER: {cer_result}" print(_A ) with open(f"{dataset_id}_eval_results.txt" , '''w''' ) as f: f.write(_A ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: lowerCAmelCase_ = f"log_{dataset_id}_predictions.txt" lowerCAmelCase_ = f"log_{dataset_id}_targets.txt" with open(_A , '''w''' ) as p, open(_A , '''w''' ) as t: # mapping function to write output def write_to_file(_A , _A ): p.write(f"{i}" + '''\n''' ) p.write(batch['''prediction'''] + '''\n''' ) t.write(f"{i}" + '''\n''' ) t.write(batch['''target'''] + '''\n''' ) result.map(_A , with_indices=_A ) def __UpperCamelCase ( _A ): lowerCAmelCase_ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training lowerCAmelCase_ = re.sub(_A , '''''' , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! lowerCAmelCase_ = ['''\n\n''', '''\n''', ''' ''', ''' '''] for t in token_sequences_to_ignore: lowerCAmelCase_ = ''' '''.join(text.split(_A ) ) return text def __UpperCamelCase ( _A ): # load dataset lowerCAmelCase_ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=_A ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor lowerCAmelCase_ = AutoFeatureExtractor.from_pretrained(args.model_id ) lowerCAmelCase_ = feature_extractor.sampling_rate # resample audio lowerCAmelCase_ = dataset.cast_column('''audio''' , Audio(sampling_rate=_A ) ) # load eval pipeline if args.device is None: lowerCAmelCase_ = 0 if torch.cuda.is_available() else -1 lowerCAmelCase_ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(_A ): lowerCAmelCase_ = asr( batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) lowerCAmelCase_ = prediction['''text'''] lowerCAmelCase_ = normalize_text(batch['''sentence'''] ) return batch # run inference on all examples lowerCAmelCase_ = dataset.map(_A , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(_A , _A ) if __name__ == "__main__": _A = argparse.ArgumentParser() parser.add_argument( '''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers''' ) parser.add_argument( '''--dataset''', type=str, required=True, help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''', ) parser.add_argument( '''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice''' ) parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''') parser.add_argument( '''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.''' ) parser.add_argument( '''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.''' ) parser.add_argument( '''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.''' ) parser.add_argument( '''--device''', type=int, default=None, help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''', ) _A = parser.parse_args() main(args)
278
0
'''simple docstring''' import math def a_ ( __snake_case : int ) -> bool: """simple docstring""" return math.sqrt(__snake_case ) * math.sqrt(__snake_case ) == num def a_ ( __snake_case : int ) -> bool: """simple docstring""" lowerCamelCase_ =0 lowerCamelCase_ =n while left <= right: lowerCamelCase_ =(left + right) // 2 if mid**2 == n: return True elif mid**2 > n: lowerCamelCase_ =mid - 1 else: lowerCamelCase_ =mid + 1 return False if __name__ == "__main__": import doctest doctest.testmod()
75
from typing import Dict, List, Optional from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _A = logging.get_logger(__name__) _A = { '''nielsr/canine-s''': 2_048, } # Unicode defines 1,114,112 total “codepoints” _A = 1_114_112 # Below: Constants defining canonical codepoints for special, pseudo-characters. # Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py _A = 0 _A = 0xe0_00 _A = 0xe0_01 _A = 0xe0_02 _A = 0xe0_03 _A = 0xe0_04 # Maps special codepoints to human-readable names. _A = { # Special symbols are represented using codepoints values that are valid, # but designated as "Private Use", meaning that they will never be assigned # characters by the Unicode Consortium, and are thus safe for use here. # # NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly # excluded and should fail with a hard error. CLS: "[CLS]", SEP: "[SEP]", BOS: "[BOS]", MASK: "[MASK]", PAD: "[PAD]", RESERVED: "[RESERVED]", } # Maps special codepoint human-readable names to their codepoint values. _A = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()} class A ( __UpperCAmelCase ): __snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self, UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=False, UpperCamelCase__=2048, **UpperCamelCase__, ): """simple docstring""" lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else bos_token lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else eos_token lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else sep_token lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else cls_token lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else mask_token super().__init__( bos_token=UpperCamelCase__, eos_token=UpperCamelCase__, sep_token=UpperCamelCase__, cls_token=UpperCamelCase__, pad_token=UpperCamelCase__, mask_token=UpperCamelCase__, add_prefix_space=UpperCamelCase__, model_max_length=UpperCamelCase__, **UpperCamelCase__, ) # Creates a mapping for looking up the IDs of special symbols. lowerCAmelCase_ = {} for codepoint, name in SPECIAL_CODEPOINTS.items(): lowerCAmelCase_ = codepoint # Creates a mapping for looking up the string forms of special symbol IDs. lowerCAmelCase_ = { codepoint: name for name, codepoint in self._special_codepoints.items() } lowerCAmelCase_ = UNICODE_VOCAB_SIZE lowerCAmelCase_ = len(self._special_codepoints ) @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return self._unicode_vocab_size def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ): """simple docstring""" return list(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ): """simple docstring""" try: return ord(UpperCamelCase__ ) except TypeError: raise ValueError(f"invalid token: '{token}'" ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ): """simple docstring""" try: if index in SPECIAL_CODEPOINTS: return SPECIAL_CODEPOINTS[index] return chr(UpperCamelCase__ ) except TypeError: raise ValueError(f"invalid id: {index}" ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ): """simple docstring""" return "".join(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ): """simple docstring""" lowerCAmelCase_ = [self.sep_token_id] lowerCAmelCase_ = [self.cls_token_id] lowerCAmelCase_ = cls + token_ids_a + sep if token_ids_a is not None: result += token_ids_a + sep return result def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None, UpperCamelCase__ = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase__, token_ids_a=UpperCamelCase__, already_has_special_tokens=UpperCamelCase__ ) lowerCAmelCase_ = [1] + ([0] * len(UpperCamelCase__ )) + [1] if token_ids_a is not None: result += ([0] * len(UpperCamelCase__ )) + [1] return result def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ): """simple docstring""" lowerCAmelCase_ = [self.sep_token_id] lowerCAmelCase_ = [self.cls_token_id] lowerCAmelCase_ = len(cls + token_ids_a + sep ) * [0] if token_ids_a is not None: result += len(token_ids_a + sep ) * [1] return result def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ): """simple docstring""" return ()
278
0
def lowerCamelCase__ ( _a , _a , _a): return round(float(moles / volume) * nfactor) def lowerCamelCase__ ( _a , _a , _a): return round(float((moles * 0.0821 * temperature) / (volume))) def lowerCamelCase__ ( _a , _a , _a): return round(float((moles * 0.0821 * temperature) / (pressure))) def lowerCamelCase__ ( _a , _a , _a): return round(float((pressure * volume) / (0.0821 * moles))) if __name__ == "__main__": import doctest doctest.testmod()
76
def __UpperCamelCase ( _A = 1000000 ): lowerCAmelCase_ = 1 lowerCAmelCase_ = 1 lowerCAmelCase_ = {1: 1} for inputa in range(2 , _A ): lowerCAmelCase_ = 0 lowerCAmelCase_ = inputa while True: if number in counters: counter += counters[number] break if number % 2 == 0: number //= 2 counter += 1 else: lowerCAmelCase_ = (3 * number) + 1 counter += 1 if inputa not in counters: lowerCAmelCase_ = counter if counter > pre_counter: lowerCAmelCase_ = inputa lowerCAmelCase_ = counter return largest_number if __name__ == "__main__": print(solution(int(input().strip())))
278
0
"""simple docstring""" from timeit import timeit def a_ ( _lowerCAmelCase : int ): '''simple docstring''' if number < 0: raise ValueError('the value of input must not be negative' ) lowercase__ : Any = 0 while number: number &= number - 1 result += 1 return result def a_ ( _lowerCAmelCase : int ): '''simple docstring''' if number < 0: raise ValueError('the value of input must not be negative' ) lowercase__ : List[str] = 0 while number: if number % 2 == 1: result += 1 number >>= 1 return result def a_ ( ): '''simple docstring''' def do_benchmark(_lowerCAmelCase : int ) -> None: lowercase__ : List[str] = 'import __main__ as z' print(f"""Benchmark when {number = }:""" ) print(f"""{get_set_bits_count_using_modulo_operator(_lowerCAmelCase ) = }""" ) lowercase__ : Tuple = timeit('z.get_set_bits_count_using_modulo_operator(25)' , setup=_lowerCAmelCase ) print(f"""timeit() runs in {timing} seconds""" ) print(f"""{get_set_bits_count_using_brian_kernighans_algorithm(_lowerCAmelCase ) = }""" ) lowercase__ : List[Any] = timeit( 'z.get_set_bits_count_using_brian_kernighans_algorithm(25)' , setup=_lowerCAmelCase , ) print(f"""timeit() runs in {timing} seconds""" ) for number in (25, 37, 58, 0): do_benchmark(_lowerCAmelCase ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
77
import unittest from transformers import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device if is_torch_available(): import torch from transformers import AutoModelForImageClassification if is_vision_available(): from transformers import AutoImageProcessor @require_torch @require_vision class A ( unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = AutoImageProcessor.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' ) lowerCAmelCase_ = AutoModelForImageClassification.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' ) model.to(UpperCamelCase__ ) from datasets import load_dataset lowerCAmelCase_ = load_dataset('''nielsr/rvlcdip-demo''' ) lowerCAmelCase_ = dataset['''train'''][0]['''image'''].convert('''RGB''' ) lowerCAmelCase_ = image_processor(UpperCamelCase__, return_tensors='''pt''' ).to(UpperCamelCase__ ) # forward pass with torch.no_grad(): lowerCAmelCase_ = model(**UpperCamelCase__ ) lowerCAmelCase_ = outputs.logits lowerCAmelCase_ = torch.Size((1, 16) ) self.assertEqual(logits.shape, UpperCamelCase__ ) lowerCAmelCase_ = torch.tensor( [-0.4_158, -0.4_092, -0.4_347], device=UpperCamelCase__, dtype=torch.float, ) self.assertTrue(torch.allclose(logits[0, :3], UpperCamelCase__, atol=1E-4 ) )
278
0
"""simple docstring""" import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging snake_case_ = """\ """ snake_case_ = """ Perplexity (PPL) is one of the most common metrics for evaluating language models. It is defined as the exponentiated average negative log-likelihood of a sequence. For more information, see https://huggingface.co/docs/transformers/perplexity """ snake_case_ = """ Args: model_id (str): model used for calculating Perplexity NOTE: Perplexity can only be calculated for causal language models. This includes models such as gpt2, causal variations of bert, causal versions of t5, and more (the full list can be found in the AutoModelForCausalLM documentation here: https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM ) input_texts (list of str): input text, each separate text snippet is one list entry. batch_size (int): the batch size to run texts through the model. Defaults to 16. add_start_token (bool): whether to add the start token to the texts, so the perplexity can include the probability of the first word. Defaults to True. device (str): device to run on, defaults to 'cuda' when available Returns: perplexity: dictionary containing the perplexity scores for the texts in the input list, as well as the mean perplexity. If one of the input texts is longer than the max input length of the model, then it is truncated to the max length for the perplexity computation. Examples: Example 1: >>> perplexity = datasets.load_metric(\"perplexity\") >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"] >>> results = perplexity.compute(model_id='gpt2', ... add_start_token=False, ... input_texts=input_texts) # doctest:+ELLIPSIS >>> print(list(results.keys())) ['perplexities', 'mean_perplexity'] >>> print(round(results[\"mean_perplexity\"], 2)) 78.22 >>> print(round(results[\"perplexities\"][0], 2)) 11.11 Example 2: >>> perplexity = datasets.load_metric(\"perplexity\") >>> input_texts = datasets.load_dataset(\"wikitext\", ... \"wikitext-2-raw-v1\", ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS [...] >>> input_texts = [s for s in input_texts if s!=''] >>> results = perplexity.compute(model_id='gpt2', ... input_texts=input_texts) # doctest:+ELLIPSIS >>> print(list(results.keys())) ['perplexities', 'mean_perplexity'] >>> print(round(results[\"mean_perplexity\"], 2)) 60.35 >>> print(round(results[\"perplexities\"][0], 2)) 81.12 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A_ ( datasets.Metric ): """simple docstring""" def UpperCAmelCase__ ( self :str ) -> List[str]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'input_texts': datasets.Value('string' ), } ) , reference_urls=['https://huggingface.co/docs/transformers/perplexity'] , ) def UpperCAmelCase__ ( self :str , lowercase_ :Optional[int] , lowercase_ :Optional[Any] , lowercase_ :int = 16 , lowercase_ :bool = True , lowercase_ :Any=None ) -> Optional[int]: if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": UpperCAmelCase = 'cuda' else: UpperCAmelCase = 'cuda' if torch.cuda.is_available() else 'cpu' UpperCAmelCase = AutoModelForCausalLM.from_pretrained(lowercase_ ) UpperCAmelCase = model.to(lowercase_ ) UpperCAmelCase = AutoTokenizer.from_pretrained(lowercase_ ) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: UpperCAmelCase = list(tokenizer.special_tokens_map_extended.values() ) # check that the model already has at least one special token defined assert ( len(lowercase_ ) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} ) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" UpperCAmelCase = model.config.max_length - 1 else: UpperCAmelCase = model.config.max_length UpperCAmelCase = tokenizer( lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , return_tensors='pt' , return_attention_mask=lowercase_ , ).to(lowercase_ ) UpperCAmelCase = encodings['input_ids'] UpperCAmelCase = encodings['attention_mask'] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." UpperCAmelCase = [] UpperCAmelCase = CrossEntropyLoss(reduction='none' ) for start_index in logging.tqdm(range(0 , len(lowercase_ ) , lowercase_ ) ): UpperCAmelCase = min(start_index + batch_size , len(lowercase_ ) ) UpperCAmelCase = encoded_texts[start_index:end_index] UpperCAmelCase = attn_masks[start_index:end_index] if add_start_token: UpperCAmelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(lowercase_ ) UpperCAmelCase = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 ) UpperCAmelCase = torch.cat( [torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(lowercase_ ), attn_mask] , dim=1 ) UpperCAmelCase = encoded_batch with torch.no_grad(): UpperCAmelCase = model(lowercase_ , attention_mask=lowercase_ ).logits UpperCAmelCase = out_logits[..., :-1, :].contiguous() UpperCAmelCase = labels[..., 1:].contiguous() UpperCAmelCase = attn_mask[..., 1:].contiguous() UpperCAmelCase = torch.expa( (loss_fct(shift_logits.transpose(1 , 2 ) , lowercase_ ) * shift_attention_mask_batch).sum(1 ) / shift_attention_mask_batch.sum(1 ) ) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(lowercase_ )}
78
import argparse import os import re import torch from flax.traverse_util import flatten_dict from tax import checkpoints from transformers import ( AutoTokenizer, PixaStructConfig, PixaStructForConditionalGeneration, PixaStructImageProcessor, PixaStructProcessor, PixaStructTextConfig, PixaStructVisionConfig, ) def __UpperCamelCase ( _A ): lowerCAmelCase_ = checkpoints.load_tax_checkpoint(_A ) lowerCAmelCase_ = flatten_dict(_A ) return flax_params def __UpperCamelCase ( _A ): lowerCAmelCase_ = {} lowerCAmelCase_ = { '''token_embedder''': '''embeddings''', '''encoder_norm''': '''layernorm''', '''kernel''': '''weight''', '''.out''': '''.output''', '''scale''': '''weight''', '''embedders_0.pos_embedding''': '''row_embedder.weight''', '''embedders_1.pos_embedding''': '''column_embedder.weight''', } lowerCAmelCase_ = { '''query''': '''attention.query''', '''key''': '''attention.key''', '''value''': '''attention.value''', '''output.dense''': '''output''', '''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''', '''pre_self_attention_layer_norm''': '''self_attention.layer_norm''', '''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''', '''mlp.''': '''mlp.DenseReluDense.''', '''pre_mlp_layer_norm''': '''mlp.layer_norm''', '''self_attention.o''': '''self_attention.attention.o''', '''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''', '''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''', '''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''', '''decoder.logits_dense.weight''': '''decoder.lm_head.weight''', } for key in flax_dict.keys(): if "target" in key: # remove the first prefix from the key lowerCAmelCase_ = '''.'''.join(key[1:] ) # rename the key for old, new in CONVERSION_MAPPING.items(): lowerCAmelCase_ = new_key.replace(_A , _A ) if "decoder" in new_key: for old, new in DECODER_CONVERSION_MAPPING.items(): lowerCAmelCase_ = new_key.replace(_A , _A ) if "layers" in new_key and "decoder" not in new_key: # use regex to replace the layer number lowerCAmelCase_ = re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , _A ) lowerCAmelCase_ = new_key.replace('''encoder''' , '''encoder.encoder''' ) elif "layers" in new_key and "decoder" in new_key: # use regex to replace the layer number lowerCAmelCase_ = re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , _A ) lowerCAmelCase_ = flax_dict[key] lowerCAmelCase_ = {} # convert converted_dict into torch format for key in converted_dict.keys(): if ("embed_tokens" not in key) and ("embedder" not in key): lowerCAmelCase_ = torch.from_numpy(converted_dict[key].T ) else: lowerCAmelCase_ = torch.from_numpy(converted_dict[key] ) return converted_torch_dict def __UpperCamelCase ( _A , _A , _A=False , _A=False ): lowerCAmelCase_ = get_flax_param(_A ) if not use_large: lowerCAmelCase_ = PixaStructVisionConfig() lowerCAmelCase_ = PixaStructTextConfig() else: lowerCAmelCase_ = PixaStructVisionConfig( hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 ) lowerCAmelCase_ = PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 ) lowerCAmelCase_ = PixaStructConfig( vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=_A ) lowerCAmelCase_ = PixaStructForConditionalGeneration(_A ) lowerCAmelCase_ = rename_and_convert_flax_params(_A ) model.load_state_dict(_A ) lowerCAmelCase_ = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' ) lowerCAmelCase_ = PixaStructImageProcessor() lowerCAmelCase_ = PixaStructProcessor(image_processor=_A , tokenizer=_A ) if use_large: lowerCAmelCase_ = 4096 lowerCAmelCase_ = True # mkdir if needed os.makedirs(_A , exist_ok=_A ) model.save_pretrained(_A ) processor.save_pretrained(_A ) print('''Model saved in {}'''.format(_A ) ) if __name__ == "__main__": _A = argparse.ArgumentParser() parser.add_argument('''--t5x_checkpoint_path''', default=None, type=str, help='''Path to the original T5x checkpoint.''') parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--use_large''', action='''store_true''', help='''Use large model.''') parser.add_argument('''--is_vqa''', action='''store_true''', help='''Use large model.''') _A = parser.parse_args() convert_pixastruct_original_pytorch_checkpoint_to_hf( args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large )
278
0
'''simple docstring''' from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = TypeVar('''DatasetType''', Dataset, IterableDataset) def __lowercase ( __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = "first_exhausted" , ) -> DatasetType: '''simple docstring''' from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError("Unable to interleave an empty list of datasets." ) for i, dataset in enumerate(__lowercase ): if not isinstance(__lowercase , (Dataset, IterableDataset) ): if isinstance(__lowercase , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} ''' "is an empty dataset dictionary." ) raise ValueError( F'''Dataset at position {i} has at least one split: {list(__lowercase )}\n''' F'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(__lowercase ) )}\']''' ) raise ValueError( F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__lowercase ).__name__}.''' ) if i == 0: _A , _A = ( (Dataset, IterableDataset) if isinstance(__lowercase , __lowercase ) else (IterableDataset, Dataset) ) elif not isinstance(__lowercase , __lowercase ): raise ValueError( F'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(F'''{stopping_strategy} is not supported. Please enter a valid stopping_strategy.''' ) if dataset_type is Dataset: return _interleave_map_style_datasets( __lowercase , __lowercase , __lowercase , info=__lowercase , split=__lowercase , stopping_strategy=__lowercase ) else: return _interleave_iterable_datasets( __lowercase , __lowercase , __lowercase , info=__lowercase , split=__lowercase , stopping_strategy=__lowercase ) def __lowercase ( __lowercase , __lowercase = None , __lowercase = None , __lowercase = 0 , ) -> DatasetType: '''simple docstring''' if not dsets: raise ValueError("Unable to concatenate an empty list of datasets." ) for i, dataset in enumerate(__lowercase ): if not isinstance(__lowercase , (Dataset, IterableDataset) ): if isinstance(__lowercase , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} ''' "is an empty dataset dictionary." ) raise ValueError( F'''Dataset at position {i} has at least one split: {list(__lowercase )}\n''' F'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(__lowercase ) )}\']''' ) raise ValueError( F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__lowercase ).__name__}.''' ) if i == 0: _A , _A = ( (Dataset, IterableDataset) if isinstance(__lowercase , __lowercase ) else (IterableDataset, Dataset) ) elif not isinstance(__lowercase , __lowercase ): raise ValueError( F'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' ) if dataset_type is Dataset: return _concatenate_map_style_datasets(__lowercase , info=__lowercase , split=__lowercase , axis=__lowercase ) else: return _concatenate_iterable_datasets(__lowercase , info=__lowercase , split=__lowercase , axis=__lowercase )
79
import warnings from ...utils import logging from .image_processing_beit import BeitImageProcessor _A = logging.get_logger(__name__) class A ( __UpperCAmelCase ): def __init__( self, *UpperCamelCase__, **UpperCamelCase__ ): """simple docstring""" warnings.warn( '''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use BeitImageProcessor instead.''', UpperCamelCase__, ) super().__init__(*UpperCamelCase__, **UpperCamelCase__ )
278
0
'''simple docstring''' from __future__ import annotations import unittest from transformers import DebertaVaConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDebertaVaForMaskedLM, TFDebertaVaForQuestionAnswering, TFDebertaVaForSequenceClassification, TFDebertaVaForTokenClassification, TFDebertaVaModel, ) class lowercase_ : def __init__( self , a , a=13 , a=7 , a=True , a=True , a=True , a=True , a=99 , a=32 , a=2 , a=4 , a=37 , a="gelu" , a=0.1 , a=0.1 , a=5_12 , a=16 , a=2 , a=0.02 , a=False , a=True , a="None" , a=3 , a=4 , a=None , ): UpperCamelCase__ = parent UpperCamelCase__ = batch_size UpperCamelCase__ = seq_length UpperCamelCase__ = is_training UpperCamelCase__ = use_input_mask UpperCamelCase__ = use_token_type_ids UpperCamelCase__ = use_labels UpperCamelCase__ = vocab_size UpperCamelCase__ = hidden_size UpperCamelCase__ = num_hidden_layers UpperCamelCase__ = num_attention_heads UpperCamelCase__ = intermediate_size UpperCamelCase__ = hidden_act UpperCamelCase__ = hidden_dropout_prob UpperCamelCase__ = attention_probs_dropout_prob UpperCamelCase__ = max_position_embeddings UpperCamelCase__ = type_vocab_size UpperCamelCase__ = type_sequence_label_size UpperCamelCase__ = initializer_range UpperCamelCase__ = num_labels UpperCamelCase__ = num_choices UpperCamelCase__ = relative_attention UpperCamelCase__ = position_biased_input UpperCamelCase__ = pos_att_type UpperCamelCase__ = scope def __a ( self ): UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase__ = None if self.use_input_mask: UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase__ = None if self.use_token_type_ids: UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCamelCase__ = None UpperCamelCase__ = None UpperCamelCase__ = None if self.use_labels: UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase__ = DebertaVaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=a , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __a ( self , a , a , a , a , a , a , a ): UpperCamelCase__ = TFDebertaVaModel(config=a ) UpperCamelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} UpperCamelCase__ = [input_ids, input_mask] UpperCamelCase__ = model(a ) UpperCamelCase__ = model(a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __a ( self , a , a , a , a , a , a , a ): UpperCamelCase__ = TFDebertaVaForMaskedLM(config=a ) UpperCamelCase__ = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } UpperCamelCase__ = model(a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __a ( self , a , a , a , a , a , a , a ): UpperCamelCase__ = self.num_labels UpperCamelCase__ = TFDebertaVaForSequenceClassification(config=a ) UpperCamelCase__ = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } UpperCamelCase__ = model(a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __a ( self , a , a , a , a , a , a , a ): UpperCamelCase__ = self.num_labels UpperCamelCase__ = TFDebertaVaForTokenClassification(config=a ) UpperCamelCase__ = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } UpperCamelCase__ = model(a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __a ( self , a , a , a , a , a , a , a ): UpperCamelCase__ = TFDebertaVaForQuestionAnswering(config=a ) UpperCamelCase__ = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } UpperCamelCase__ = model(a ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __a ( self ): UpperCamelCase__ = self.prepare_config_and_inputs() ( ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ) = config_and_inputs UpperCamelCase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class lowercase_ ( a__ , a__ , unittest.TestCase ): __UpperCAmelCase = ( ( TFDebertaVaModel, TFDebertaVaForMaskedLM, TFDebertaVaForQuestionAnswering, TFDebertaVaForSequenceClassification, TFDebertaVaForTokenClassification, ) if is_tf_available() else () ) __UpperCAmelCase = ( { 'feature-extraction': TFDebertaVaModel, 'fill-mask': TFDebertaVaForMaskedLM, 'question-answering': TFDebertaVaForQuestionAnswering, 'text-classification': TFDebertaVaForSequenceClassification, 'token-classification': TFDebertaVaForTokenClassification, 'zero-shot': TFDebertaVaForSequenceClassification, } if is_tf_available() else {} ) __UpperCAmelCase = False __UpperCAmelCase = False def __a ( self ): UpperCamelCase__ = TFDebertaVaModelTester(self ) UpperCamelCase__ = ConfigTester(self , config_class=a , hidden_size=37 ) def __a ( self ): self.config_tester.run_common_tests() def __a ( self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a ) def __a ( self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*a ) def __a ( self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*a ) def __a ( self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*a ) def __a ( self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*a ) @slow def __a ( self ): UpperCamelCase__ = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" ) self.assertIsNotNone(a ) @require_tf class lowercase_ ( unittest.TestCase ): @unittest.skip(reason="Model not available yet" ) def __a ( self ): pass @slow def __a ( self ): UpperCamelCase__ = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" ) UpperCamelCase__ = tf.constant([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] ) UpperCamelCase__ = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) UpperCamelCase__ = model(a , attention_mask=a )[0] UpperCamelCase__ = tf.constant( [[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] ) tf.debugging.assert_near(output[:, 1:4, 1:4] , a , atol=1e-4 )
80
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() _A = logging.get_logger(__name__) def __UpperCamelCase ( _A ): lowerCAmelCase_ = YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: lowerCAmelCase_ = 192 lowerCAmelCase_ = 768 lowerCAmelCase_ = 12 lowerCAmelCase_ = 3 lowerCAmelCase_ = [800, 1333] lowerCAmelCase_ = False elif yolos_name == "yolos_s_dWr": lowerCAmelCase_ = 330 lowerCAmelCase_ = 14 lowerCAmelCase_ = 6 lowerCAmelCase_ = 1320 elif "yolos_s" in yolos_name: lowerCAmelCase_ = 384 lowerCAmelCase_ = 1536 lowerCAmelCase_ = 12 lowerCAmelCase_ = 6 elif "yolos_b" in yolos_name: lowerCAmelCase_ = [800, 1344] lowerCAmelCase_ = 91 lowerCAmelCase_ = '''huggingface/label-files''' lowerCAmelCase_ = '''coco-detection-id2label.json''' lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) ) lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()} lowerCAmelCase_ = idalabel lowerCAmelCase_ = {v: k for k, v in idalabel.items()} return config def __UpperCamelCase ( _A , _A , _A = False ): for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCAmelCase_ = state_dict.pop(f"blocks.{i}.attn.qkv.weight" ) lowerCAmelCase_ = state_dict.pop(f"blocks.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict lowerCAmelCase_ = in_proj_weight[: config.hidden_size, :] lowerCAmelCase_ = in_proj_bias[: config.hidden_size] lowerCAmelCase_ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCAmelCase_ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCAmelCase_ = in_proj_weight[-config.hidden_size :, :] lowerCAmelCase_ = in_proj_bias[-config.hidden_size :] def __UpperCamelCase ( _A ): if "backbone" in name: lowerCAmelCase_ = name.replace('''backbone''' , '''vit''' ) if "cls_token" in name: lowerCAmelCase_ = name.replace('''cls_token''' , '''embeddings.cls_token''' ) if "det_token" in name: lowerCAmelCase_ = name.replace('''det_token''' , '''embeddings.detection_tokens''' ) if "mid_pos_embed" in name: lowerCAmelCase_ = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' ) if "pos_embed" in name: lowerCAmelCase_ = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' ) if "patch_embed.proj" in name: lowerCAmelCase_ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "blocks" in name: lowerCAmelCase_ = name.replace('''blocks''' , '''encoder.layer''' ) if "attn.proj" in name: lowerCAmelCase_ = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: lowerCAmelCase_ = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: lowerCAmelCase_ = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: lowerCAmelCase_ = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: lowerCAmelCase_ = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: lowerCAmelCase_ = name.replace('''mlp.fc2''' , '''output.dense''' ) if "class_embed" in name: lowerCAmelCase_ = name.replace('''class_embed''' , '''class_labels_classifier''' ) if "bbox_embed" in name: lowerCAmelCase_ = name.replace('''bbox_embed''' , '''bbox_predictor''' ) if "vit.norm" in name: lowerCAmelCase_ = name.replace('''vit.norm''' , '''vit.layernorm''' ) return name def __UpperCamelCase ( _A , _A ): for key in orig_state_dict.copy().keys(): lowerCAmelCase_ = orig_state_dict.pop(_A ) if "qkv" in key: lowerCAmelCase_ = key.split('''.''' ) lowerCAmelCase_ = int(key_split[2] ) lowerCAmelCase_ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: lowerCAmelCase_ = val[:dim, :] lowerCAmelCase_ = val[ dim : dim * 2, : ] lowerCAmelCase_ = val[-dim:, :] else: lowerCAmelCase_ = val[:dim] lowerCAmelCase_ = val[dim : dim * 2] lowerCAmelCase_ = val[-dim:] else: lowerCAmelCase_ = val return orig_state_dict def __UpperCamelCase ( ): lowerCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw ) return im @torch.no_grad() def __UpperCamelCase ( _A , _A , _A , _A = False ): lowerCAmelCase_ = get_yolos_config(_A ) # load original state_dict lowerCAmelCase_ = torch.load(_A , map_location='''cpu''' )['''model'''] # load 🤗 model lowerCAmelCase_ = YolosForObjectDetection(_A ) model.eval() lowerCAmelCase_ = convert_state_dict(_A , _A ) model.load_state_dict(_A ) # Check outputs on an image, prepared by YolosImageProcessor lowerCAmelCase_ = 800 if yolos_name != '''yolos_ti''' else 512 lowerCAmelCase_ = YolosImageProcessor(format='''coco_detection''' , size=_A ) lowerCAmelCase_ = image_processor(images=prepare_img() , return_tensors='''pt''' ) lowerCAmelCase_ = model(**_A ) lowerCAmelCase_ , lowerCAmelCase_ = outputs.logits, outputs.pred_boxes lowerCAmelCase_ , lowerCAmelCase_ = None, None if yolos_name == "yolos_ti": lowerCAmelCase_ = torch.tensor( [[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] ) lowerCAmelCase_ = torch.tensor( [[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] ) elif yolos_name == "yolos_s_200_pre": lowerCAmelCase_ = torch.tensor( [[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] ) lowerCAmelCase_ = torch.tensor( [[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] ) elif yolos_name == "yolos_s_300_pre": lowerCAmelCase_ = torch.tensor( [[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] ) lowerCAmelCase_ = torch.tensor( [[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] ) elif yolos_name == "yolos_s_dWr": lowerCAmelCase_ = torch.tensor( [[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] ) lowerCAmelCase_ = torch.tensor( [[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] ) elif yolos_name == "yolos_base": lowerCAmelCase_ = torch.tensor( [[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] ) lowerCAmelCase_ = torch.tensor( [[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] ) else: raise ValueError(f"Unknown yolos_name: {yolos_name}" ) assert torch.allclose(logits[0, :3, :3] , _A , atol=1E-4 ) assert torch.allclose(pred_boxes[0, :3, :3] , _A , atol=1E-4 ) Path(_A ).mkdir(exist_ok=_A ) print(f"Saving model {yolos_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_A ) print(f"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(_A ) if push_to_hub: lowerCAmelCase_ = { '''yolos_ti''': '''yolos-tiny''', '''yolos_s_200_pre''': '''yolos-small''', '''yolos_s_300_pre''': '''yolos-small-300''', '''yolos_s_dWr''': '''yolos-small-dwr''', '''yolos_base''': '''yolos-base''', } print('''Pushing to the hub...''' ) lowerCAmelCase_ = model_mapping[yolos_name] image_processor.push_to_hub(_A , organization='''hustvl''' ) model.push_to_hub(_A , organization='''hustvl''' ) if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--yolos_name''', default='''yolos_s_200_pre''', type=str, help=( '''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\',''' ''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.''' ), ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) _A = parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
278
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase_ : str = logging.get_logger(__name__) lowerCamelCase_ : str = { """google/pegasus-large""": """https://huggingface.co/google/pegasus-large/resolve/main/config.json""", # See all PEGASUS models at https://huggingface.co/models?filter=pegasus } class __A ( _SCREAMING_SNAKE_CASE ): """simple docstring""" __lowerCAmelCase = "pegasus" __lowerCAmelCase = ["past_key_values"] __lowerCAmelCase = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self , __A=5_0265 , __A=1024 , __A=12 , __A=4096 , __A=16 , __A=12 , __A=4096 , __A=16 , __A=0.0 , __A=0.0 , __A=True , __A=True , __A="gelu" , __A=1024 , __A=0.1 , __A=0.0 , __A=0.0 , __A=0.02 , __A=0 , __A=False , __A=0 , __A=1 , __A=1 , **__A , ) -> Union[str, Any]: a =vocab_size a =max_position_embeddings a =d_model a =encoder_ffn_dim a =encoder_layers a =encoder_attention_heads a =decoder_ffn_dim a =decoder_layers a =decoder_attention_heads a =dropout a =attention_dropout a =activation_dropout a =activation_function a =init_std a =encoder_layerdrop a =decoder_layerdrop a =use_cache a =encoder_layers a =scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=__A , eos_token_id=__A , is_encoder_decoder=__A , decoder_start_token_id=__A , forced_eos_token_id=__A , **__A , ) @property def SCREAMING_SNAKE_CASE ( self ) -> int: return self.encoder_attention_heads @property def SCREAMING_SNAKE_CASE ( self ) -> int: return self.d_model
81
def __UpperCamelCase ( _A ): if not numbers: return 0 if not isinstance(_A , (list, tuple) ) or not all( isinstance(_A , _A ) for number in numbers ): raise ValueError('''numbers must be an iterable of integers''' ) lowerCAmelCase_ = lowerCAmelCase_ = lowerCAmelCase_ = numbers[0] for i in range(1 , len(_A ) ): # update the maximum and minimum subarray products lowerCAmelCase_ = numbers[i] if number < 0: lowerCAmelCase_ , lowerCAmelCase_ = min_till_now, max_till_now lowerCAmelCase_ = max(_A , max_till_now * number ) lowerCAmelCase_ = min(_A , min_till_now * number ) # update the maximum product found till now lowerCAmelCase_ = max(_A , _A ) return max_prod
278
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig A__ = { """albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""", """albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""", """albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""", """albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""", """albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""", """albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""", """albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""", """albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""", } class __lowerCAmelCase ( lowerCamelCase__ ): __lowerCamelCase = '''albert''' def __init__( self , _snake_case=30000 , _snake_case=128 , _snake_case=4096 , _snake_case=12 , _snake_case=1 , _snake_case=64 , _snake_case=16384 , _snake_case=1 , _snake_case="gelu_new" , _snake_case=0 , _snake_case=0 , _snake_case=512 , _snake_case=2 , _snake_case=0.02 , _snake_case=1e-12 , _snake_case=0.1 , _snake_case="absolute" , _snake_case=0 , _snake_case=2 , _snake_case=3 , **_snake_case , ): """simple docstring""" super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case ) _lowerCAmelCase = vocab_size _lowerCAmelCase = embedding_size _lowerCAmelCase = hidden_size _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = num_hidden_groups _lowerCAmelCase = num_attention_heads _lowerCAmelCase = inner_group_num _lowerCAmelCase = hidden_act _lowerCAmelCase = intermediate_size _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = max_position_embeddings _lowerCAmelCase = type_vocab_size _lowerCAmelCase = initializer_range _lowerCAmelCase = layer_norm_eps _lowerCAmelCase = classifier_dropout_prob _lowerCAmelCase = position_embedding_type class __lowerCAmelCase ( lowerCamelCase__ ): @property def snake_case ( self ): """simple docstring""" if self.task == "multiple-choice": _lowerCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""} else: _lowerCAmelCase = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis), ] )
82
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision import transforms from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def __UpperCamelCase ( _A ): lowerCAmelCase_ = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2] lowerCAmelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False lowerCAmelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False lowerCAmelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False if "large" in model_name or "xlarge" in model_name or "huge" in model_name: if "fl3" in model_name: lowerCAmelCase_ = [3, 3, 3, 3] lowerCAmelCase_ = [5, 5, 5, 5] elif "fl4" in model_name: lowerCAmelCase_ = [4, 4, 4, 4] lowerCAmelCase_ = [3, 3, 3, 3] if "tiny" in model_name or "small" in model_name or "base" in model_name: lowerCAmelCase_ = [3, 3, 3, 3] if "lrf" in model_name: lowerCAmelCase_ = [3, 3, 3, 3] else: lowerCAmelCase_ = [2, 2, 2, 2] if "tiny" in model_name: lowerCAmelCase_ = 96 elif "small" in model_name: lowerCAmelCase_ = 96 elif "base" in model_name: lowerCAmelCase_ = 128 elif "large" in model_name: lowerCAmelCase_ = 192 elif "xlarge" in model_name: lowerCAmelCase_ = 256 elif "huge" in model_name: lowerCAmelCase_ = 352 # set label information lowerCAmelCase_ = '''huggingface/label-files''' if "large" in model_name or "huge" in model_name: lowerCAmelCase_ = '''imagenet-22k-id2label.json''' else: lowerCAmelCase_ = '''imagenet-1k-id2label.json''' lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) ) lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()} lowerCAmelCase_ = {v: k for k, v in idalabel.items()} lowerCAmelCase_ = FocalNetConfig( embed_dim=_A , depths=_A , focal_levels=_A , focal_windows=_A , use_conv_embed=_A , idalabel=_A , labelaid=_A , use_post_layernorm=_A , use_layerscale=_A , ) return config def __UpperCamelCase ( _A ): if "patch_embed.proj" in name: lowerCAmelCase_ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "patch_embed.norm" in name: lowerCAmelCase_ = name.replace('''patch_embed.norm''' , '''embeddings.norm''' ) if "layers" in name: lowerCAmelCase_ = '''encoder.''' + name if "encoder.layers" in name: lowerCAmelCase_ = name.replace('''encoder.layers''' , '''encoder.stages''' ) if "downsample.proj" in name: lowerCAmelCase_ = name.replace('''downsample.proj''' , '''downsample.projection''' ) if "blocks" in name: lowerCAmelCase_ = name.replace('''blocks''' , '''layers''' ) if "modulation.f.weight" in name or "modulation.f.bias" in name: lowerCAmelCase_ = name.replace('''modulation.f''' , '''modulation.projection_in''' ) if "modulation.h.weight" in name or "modulation.h.bias" in name: lowerCAmelCase_ = name.replace('''modulation.h''' , '''modulation.projection_context''' ) if "modulation.proj.weight" in name or "modulation.proj.bias" in name: lowerCAmelCase_ = name.replace('''modulation.proj''' , '''modulation.projection_out''' ) if name == "norm.weight": lowerCAmelCase_ = '''layernorm.weight''' if name == "norm.bias": lowerCAmelCase_ = '''layernorm.bias''' if "head" in name: lowerCAmelCase_ = name.replace('''head''' , '''classifier''' ) else: lowerCAmelCase_ = '''focalnet.''' + name return name def __UpperCamelCase ( _A , _A , _A=False ): # fmt: off lowerCAmelCase_ = { '''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''', '''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''', '''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''', '''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''', '''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''', '''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''', '''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''', '''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''', '''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''', '''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''', } # fmt: on lowerCAmelCase_ = model_name_to_url[model_name] print('''Checkpoint URL: ''' , _A ) lowerCAmelCase_ = torch.hub.load_state_dict_from_url(_A , map_location='''cpu''' )['''model'''] # rename keys for key in state_dict.copy().keys(): lowerCAmelCase_ = state_dict.pop(_A ) lowerCAmelCase_ = val lowerCAmelCase_ = get_focalnet_config(_A ) lowerCAmelCase_ = FocalNetForImageClassification(_A ) model.eval() # load state dict model.load_state_dict(_A ) # verify conversion lowerCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowerCAmelCase_ = BitImageProcessor( do_resize=_A , size={'''shortest_edge''': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=_A , crop_size=224 , do_normalize=_A , image_mean=_A , image_std=_A , ) lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw ) lowerCAmelCase_ = processor(images=_A , return_tensors='''pt''' ) lowerCAmelCase_ = transforms.Compose( [ transforms.Resize(256 ), transforms.CenterCrop(224 ), transforms.ToTensor(), transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ), ] ) lowerCAmelCase_ = image_transforms(_A ).unsqueeze(0 ) # verify pixel_values assert torch.allclose(inputs.pixel_values , _A , atol=1E-4 ) lowerCAmelCase_ = model(**_A ) lowerCAmelCase_ = outputs.logits.argmax(-1 ).item() print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] ) print('''First values of logits:''' , outputs.logits[0, :3] ) if model_name == "focalnet-tiny": lowerCAmelCase_ = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ) elif model_name == "focalnet-tiny-lrf": lowerCAmelCase_ = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] ) elif model_name == "focalnet-small": lowerCAmelCase_ = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] ) elif model_name == "focalnet-small-lrf": lowerCAmelCase_ = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] ) elif model_name == "focalnet-base": lowerCAmelCase_ = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] ) elif model_name == "focalnet-base-lrf": lowerCAmelCase_ = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] ) assert torch.allclose(outputs.logits[0, :3] , _A , atol=1E-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(f"Saving model and processor of {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_A ) processor.save_pretrained(_A ) if push_to_hub: print(f"Pushing model and processor of {model_name} to the hub..." ) model.push_to_hub(f"{model_name}" ) processor.push_to_hub(f"{model_name}" ) if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''focalnet-tiny''', type=str, help='''Name of the FocalNet model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub.''', ) _A = parser.parse_args() convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
278
0
'''simple docstring''' from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class lowercase__ : lowercase__ = XGLMConfig lowercase__ = {} lowercase__ = """gelu""" def __init__( self : List[str] ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Optional[Any]=14 ,lowerCamelCase__ : Optional[int]=7 ,lowerCamelCase__ : Union[str, Any]=True ,lowerCamelCase__ : Any=True ,lowerCamelCase__ : Tuple=True ,lowerCamelCase__ : Union[str, Any]=99 ,lowerCamelCase__ : List[Any]=32 ,lowerCamelCase__ : int=2 ,lowerCamelCase__ : Tuple=4 ,lowerCamelCase__ : str=37 ,lowerCamelCase__ : List[Any]="gelu" ,lowerCamelCase__ : List[Any]=0.1 ,lowerCamelCase__ : Dict=0.1 ,lowerCamelCase__ : int=512 ,lowerCamelCase__ : List[str]=0.0_2 ,): '''simple docstring''' _UpperCamelCase : str = parent _UpperCamelCase : Optional[int] = batch_size _UpperCamelCase : str = seq_length _UpperCamelCase : Tuple = is_training _UpperCamelCase : Dict = use_input_mask _UpperCamelCase : str = use_labels _UpperCamelCase : Union[str, Any] = vocab_size _UpperCamelCase : Tuple = d_model _UpperCamelCase : Any = num_hidden_layers _UpperCamelCase : Union[str, Any] = num_attention_heads _UpperCamelCase : Optional[Any] = ffn_dim _UpperCamelCase : Optional[Any] = activation_function _UpperCamelCase : List[Any] = activation_dropout _UpperCamelCase : Union[str, Any] = attention_dropout _UpperCamelCase : Optional[int] = max_position_embeddings _UpperCamelCase : Tuple = initializer_range _UpperCamelCase : int = None _UpperCamelCase : Optional[int] = 0 _UpperCamelCase : Optional[int] = 2 _UpperCamelCase : int = 1 def UpperCamelCase_ ( self : str ): '''simple docstring''' return XGLMConfig.from_pretrained('facebook/xglm-564M' ) def UpperCamelCase_ ( self : int ): '''simple docstring''' _UpperCamelCase : List[Any] = tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) ,clip_value_min=0 ,clip_value_max=3 ) _UpperCamelCase : List[Any] = None if self.use_input_mask: _UpperCamelCase : int = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCamelCase : Union[str, Any] = self.get_config() _UpperCamelCase : str = floats_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 ) return ( config, input_ids, input_mask, head_mask, ) def UpperCamelCase_ ( self : Any ): '''simple docstring''' return XGLMConfig( vocab_size=self.vocab_size ,d_model=self.hidden_size ,num_layers=self.num_hidden_layers ,attention_heads=self.num_attention_heads ,ffn_dim=self.ffn_dim ,activation_function=self.activation_function ,activation_dropout=self.activation_dropout ,attention_dropout=self.attention_dropout ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,use_cache=lowerCamelCase__ ,bos_token_id=self.bos_token_id ,eos_token_id=self.eos_token_id ,pad_token_id=self.pad_token_id ,return_dict=lowerCamelCase__ ,) def UpperCamelCase_ ( self : Optional[int] ): '''simple docstring''' _UpperCamelCase : List[Any] = self.prepare_config_and_inputs() ( ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ) : int = config_and_inputs _UpperCamelCase : str = { 'input_ids': input_ids, 'head_mask': head_mask, } return config, inputs_dict @require_tf class lowercase__ ( lowercase , lowercase , unittest.TestCase ): lowercase__ = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () lowercase__ = (TFXGLMForCausalLM,) if is_tf_available() else () lowercase__ = ( {"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {} ) lowercase__ = False lowercase__ = False lowercase__ = False def UpperCamelCase_ ( self : Optional[Any] ): '''simple docstring''' _UpperCamelCase : List[str] = TFXGLMModelTester(self ) _UpperCamelCase : str = ConfigTester(self ,config_class=lowerCamelCase__ ,n_embd=37 ) def UpperCamelCase_ ( self : Any ): '''simple docstring''' self.config_tester.run_common_tests() @slow def UpperCamelCase_ ( self : Dict ): '''simple docstring''' for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCamelCase : int = TFXGLMModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) @unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' ) def UpperCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' super().test_resize_token_embeddings() @require_tf class lowercase__ ( unittest.TestCase ): @slow def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : Optional[Any]=True ): '''simple docstring''' _UpperCamelCase : int = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' ) _UpperCamelCase : Optional[int] = tf.convert_to_tensor([[2, 268, 9865]] ,dtype=tf.intaa ) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off _UpperCamelCase : Union[str, Any] = [2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581] # fmt: on _UpperCamelCase : Tuple = model.generate(lowerCamelCase__ ,do_sample=lowerCamelCase__ ,num_beams=1 ) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist() ,lowerCamelCase__ ) @slow def UpperCamelCase_ ( self : List[str] ): '''simple docstring''' _UpperCamelCase : Optional[int] = XGLMTokenizer.from_pretrained('facebook/xglm-564M' ) _UpperCamelCase : int = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' ) tf.random.set_seed(0 ) _UpperCamelCase : List[str] = tokenizer('Today is a nice day and' ,return_tensors='tf' ) _UpperCamelCase : Any = tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(':/CPU:0' ): _UpperCamelCase : Optional[int] = model.generate(lowerCamelCase__ ,do_sample=lowerCamelCase__ ,seed=[7, 0] ) _UpperCamelCase : Any = tokenizer.decode(output_ids[0] ,skip_special_tokens=lowerCamelCase__ ) _UpperCamelCase : List[Any] = ( 'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due' ) self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ ) @slow def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' _UpperCamelCase : List[str] = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' ) _UpperCamelCase : List[str] = XGLMTokenizer.from_pretrained('facebook/xglm-564M' ) _UpperCamelCase : Optional[Any] = 'left' # use different length sentences to test batching _UpperCamelCase : List[str] = [ 'This is an extremelly long sentence that only exists to test the ability of the model to cope with ' 'left-padding, such as in batched generation. The output for the sequence below should be the same ' 'regardless of whether left padding is applied or not. When', 'Hello, my dog is a little', ] _UpperCamelCase : Dict = tokenizer(lowerCamelCase__ ,return_tensors='tf' ,padding=lowerCamelCase__ ) _UpperCamelCase : List[Any] = inputs['input_ids'] _UpperCamelCase : int = model.generate(input_ids=lowerCamelCase__ ,attention_mask=inputs['attention_mask'] ,max_new_tokens=12 ) _UpperCamelCase : int = tokenizer(sentences[0] ,return_tensors='tf' ).input_ids _UpperCamelCase : List[str] = model.generate(input_ids=lowerCamelCase__ ,max_new_tokens=12 ) _UpperCamelCase : List[str] = tokenizer(sentences[1] ,return_tensors='tf' ).input_ids _UpperCamelCase : Union[str, Any] = model.generate(input_ids=lowerCamelCase__ ,max_new_tokens=12 ) _UpperCamelCase : List[str] = tokenizer.batch_decode(lowerCamelCase__ ,skip_special_tokens=lowerCamelCase__ ) _UpperCamelCase : Optional[int] = tokenizer.decode(output_non_padded[0] ,skip_special_tokens=lowerCamelCase__ ) _UpperCamelCase : List[Any] = tokenizer.decode(output_padded[0] ,skip_special_tokens=lowerCamelCase__ ) _UpperCamelCase : Any = [ 'This is an extremelly long sentence that only exists to test the ability of the model to cope with ' 'left-padding, such as in batched generation. The output for the sequence below should be the same ' 'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be ' 'a single', 'Hello, my dog is a little bit of a shy one, but he is very friendly', ] self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ ,[non_padded_sentence, padded_sentence] )
83
import argparse from argparse import Namespace import torch from torch import nn from transformers import XGLMConfig, XGLMForCausalLM def __UpperCamelCase ( _A ): lowerCAmelCase_ = [ '''decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(_A , _A ) def __UpperCamelCase ( _A ): lowerCAmelCase_ , lowerCAmelCase_ = emb.weight.shape lowerCAmelCase_ = nn.Linear(_A , _A , bias=_A ) lowerCAmelCase_ = emb.weight.data return lin_layer def __UpperCamelCase ( _A ): lowerCAmelCase_ = torch.load(_A , map_location='''cpu''' ) lowerCAmelCase_ = Namespace(**checkpoint['''cfg''']['''model'''] ) lowerCAmelCase_ = checkpoint['''model'''] remove_ignore_keys_(_A ) lowerCAmelCase_ = state_dict['''decoder.embed_tokens.weight'''].shape[0] lowerCAmelCase_ = {key.replace('''decoder''' , '''model''' ): val for key, val in state_dict.items()} lowerCAmelCase_ = XGLMConfig( vocab_size=_A , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''gelu''' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , ) lowerCAmelCase_ = XGLMForCausalLM(_A ) lowerCAmelCase_ = model.load_state_dict(_A , strict=_A ) print(_A ) lowerCAmelCase_ = make_linear_from_emb(model.model.embed_tokens ) return model if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') _A = parser.parse_args() _A = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path) model.save_pretrained(args.pytorch_dump_folder_path)
278
0
"""simple docstring""" def _snake_case ( lowercase__ : int = 1_0_0_0 ) -> int: '''simple docstring''' lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = 1, 1 lowerCAmelCase_ :Dict = 2 while True: lowerCAmelCase_ :str = 0 lowerCAmelCase_ :str = fa + fa lowerCAmelCase_ , lowerCAmelCase_ :Tuple = fa, f index += 1 for _ in str(lowercase__ ): i += 1 if i == n: break return index if __name__ == "__main__": print(solution(int(str(input()).strip())))
84
# This script creates a super tiny model that is useful inside tests, when we just want to test that # the machinery works, without needing to the check the quality of the outcomes. # # This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny - # all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and # emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files. # The latter is done by `fsmt-make-super-tiny-model.py`. # # It will be used then as "stas/tiny-wmt19-en-ru" from pathlib import Path import json import tempfile from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES _A = '''tiny-wmt19-en-ru''' # Build # borrowed from a test _A = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''w</w>''', '''r</w>''', '''t</w>''', '''lo''', '''low''', '''er</w>''', '''low</w>''', '''lowest</w>''', '''newer</w>''', '''wider</w>''', '''<unk>''', ] _A = dict(zip(vocab, range(len(vocab)))) _A = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', ''''''] with tempfile.TemporaryDirectory() as tmpdirname: _A = Path(tmpdirname) _A = build_dir / VOCAB_FILES_NAMES['''src_vocab_file'''] _A = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file'''] _A = build_dir / VOCAB_FILES_NAMES['''merges_file'''] with open(src_vocab_file, '''w''') as fp: fp.write(json.dumps(vocab_tokens)) with open(tgt_vocab_file, '''w''') as fp: fp.write(json.dumps(vocab_tokens)) with open(merges_file, '''w''') as fp: fp.write('''\n'''.join(merges)) _A = FSMTTokenizer( langs=['''en''', '''ru'''], src_vocab_size=len(vocab), tgt_vocab_size=len(vocab), src_vocab_file=src_vocab_file, tgt_vocab_file=tgt_vocab_file, merges_file=merges_file, ) _A = FSMTConfig( langs=['''ru''', '''en'''], src_vocab_size=1_000, tgt_vocab_size=1_000, d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) _A = FSMTForConditionalGeneration(config) print(f"num of params {tiny_model.num_parameters()}") # Test _A = tokenizer(['''Making tiny model'''], return_tensors='''pt''') _A = tiny_model(**batch) print('''test output:''', len(outputs.logits[0])) # Save tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(f"Generated {mname_tiny}") # Upload # transformers-cli upload tiny-wmt19-en-ru
278
0
'''simple docstring''' import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class _snake_case ( lowercase_ ): lowerCAmelCase_ : int = (DPMSolverSinglestepScheduler,) lowerCAmelCase_ : Optional[int] = (("num_inference_steps", 25),) def lowerCAmelCase__ ( self , **a__ ) -> int: '''simple docstring''' snake_case_ = { "num_train_timesteps": 1_000, "beta_start": 0.0_0_0_1, "beta_end": 0.0_2, "beta_schedule": "linear", "solver_order": 2, "prediction_type": "epsilon", "thresholding": False, "sample_max_value": 1.0, "algorithm_type": "dpmsolver++", "solver_type": "midpoint", "lambda_min_clipped": -float("inf" ), "variance_type": None, } config.update(**a__ ) return config def lowerCAmelCase__ ( self , a__=0 , **a__ ) -> Optional[int]: '''simple docstring''' snake_case_ = dict(self.forward_default_kwargs ) snake_case_ = kwargs.pop("num_inference_steps" , a__ ) snake_case_ = self.dummy_sample snake_case_ = 0.1 * sample snake_case_ = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: snake_case_ = self.get_scheduler_config(**a__ ) snake_case_ = scheduler_class(**a__ ) scheduler.set_timesteps(a__ ) # copy over dummy past residuals snake_case_ = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(a__ ) snake_case_ = scheduler_class.from_pretrained(a__ ) new_scheduler.set_timesteps(a__ ) # copy over dummy past residuals snake_case_ = dummy_past_residuals[: new_scheduler.config.solver_order] snake_case_ , snake_case_ = sample, sample for t in range(a__ , time_step + scheduler.config.solver_order + 1 ): snake_case_ = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample snake_case_ = new_scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def lowerCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' pass def lowerCAmelCase__ ( self , a__=0 , **a__ ) -> Any: '''simple docstring''' snake_case_ = dict(self.forward_default_kwargs ) snake_case_ = kwargs.pop("num_inference_steps" , a__ ) snake_case_ = self.dummy_sample snake_case_ = 0.1 * sample snake_case_ = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: snake_case_ = self.get_scheduler_config() snake_case_ = scheduler_class(**a__ ) scheduler.set_timesteps(a__ ) # copy over dummy past residuals (must be after setting timesteps) snake_case_ = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(a__ ) snake_case_ = scheduler_class.from_pretrained(a__ ) # copy over dummy past residuals new_scheduler.set_timesteps(a__ ) # copy over dummy past residual (must be after setting timesteps) snake_case_ = dummy_past_residuals[: new_scheduler.config.solver_order] snake_case_ = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample snake_case_ = new_scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def lowerCAmelCase__ ( self , a__=None , **a__ ) -> List[str]: '''simple docstring''' if scheduler is None: snake_case_ = self.scheduler_classes[0] snake_case_ = self.get_scheduler_config(**a__ ) snake_case_ = scheduler_class(**a__ ) snake_case_ = self.scheduler_classes[0] snake_case_ = self.get_scheduler_config(**a__ ) snake_case_ = scheduler_class(**a__ ) snake_case_ = 10 snake_case_ = self.dummy_model() snake_case_ = self.dummy_sample_deter scheduler.set_timesteps(a__ ) for i, t in enumerate(scheduler.timesteps ): snake_case_ = model(a__ , a__ ) snake_case_ = scheduler.step(a__ , a__ , a__ ).prev_sample return sample def lowerCAmelCase__ ( self ) -> Any: '''simple docstring''' snake_case_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) snake_case_ = 50 snake_case_ = self.dummy_model() snake_case_ = self.dummy_sample_deter scheduler.set_timesteps(a__ ) # make sure that the first t is uneven for i, t in enumerate(scheduler.timesteps[3:] ): snake_case_ = model(a__ , a__ ) snake_case_ = scheduler.step(a__ , a__ , a__ ).prev_sample snake_case_ = torch.mean(torch.abs(a__ ) ) assert abs(result_mean.item() - 0.2_5_7_4 ) < 1e-3 def lowerCAmelCase__ ( self ) -> List[str]: '''simple docstring''' for timesteps in [25, 50, 100, 999, 1_000]: self.check_over_configs(num_train_timesteps=a__ ) def lowerCAmelCase__ ( self ) -> List[str]: '''simple docstring''' snake_case_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) snake_case_ = self.full_loop(scheduler=a__ ) snake_case_ = torch.mean(torch.abs(a__ ) ) assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3 snake_case_ = DEISMultistepScheduler.from_config(scheduler.config ) snake_case_ = DPMSolverMultistepScheduler.from_config(scheduler.config ) snake_case_ = UniPCMultistepScheduler.from_config(scheduler.config ) snake_case_ = DPMSolverSinglestepScheduler.from_config(scheduler.config ) snake_case_ = self.full_loop(scheduler=a__ ) snake_case_ = torch.mean(torch.abs(a__ ) ) assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3 def lowerCAmelCase__ ( self ) -> Dict: '''simple docstring''' self.check_over_configs(thresholding=a__ ) for order in [1, 2, 3]: for solver_type in ["midpoint", "heun"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=a__ , prediction_type=a__ , sample_max_value=a__ , algorithm_type="dpmsolver++" , solver_order=a__ , solver_type=a__ , ) def lowerCAmelCase__ ( self ) -> int: '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=a__ ) def lowerCAmelCase__ ( self ) -> Any: '''simple docstring''' for algorithm_type in ["dpmsolver", "dpmsolver++"]: for solver_type in ["midpoint", "heun"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=a__ , solver_type=a__ , prediction_type=a__ , algorithm_type=a__ , ) snake_case_ = self.full_loop( solver_order=a__ , solver_type=a__ , prediction_type=a__ , algorithm_type=a__ , ) assert not torch.isnan(a__ ).any(), "Samples have nan numbers" def lowerCAmelCase__ ( self ) -> int: '''simple docstring''' self.check_over_configs(lower_order_final=a__ ) self.check_over_configs(lower_order_final=a__ ) def lowerCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' self.check_over_configs(lambda_min_clipped=-float("inf" ) ) self.check_over_configs(lambda_min_clipped=-5.1 ) def lowerCAmelCase__ ( self ) -> List[str]: '''simple docstring''' self.check_over_configs(variance_type=a__ ) self.check_over_configs(variance_type="learned_range" ) def lowerCAmelCase__ ( self ) -> Tuple: '''simple docstring''' for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]: self.check_over_forward(num_inference_steps=a__ , time_step=0 ) def lowerCAmelCase__ ( self ) -> Tuple: '''simple docstring''' snake_case_ = self.full_loop() snake_case_ = torch.mean(torch.abs(a__ ) ) assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3 def lowerCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' snake_case_ = self.full_loop(use_karras_sigmas=a__ ) snake_case_ = torch.mean(torch.abs(a__ ) ) assert abs(result_mean.item() - 0.2_2_4_8 ) < 1e-3 def lowerCAmelCase__ ( self ) -> int: '''simple docstring''' snake_case_ = self.full_loop(prediction_type="v_prediction" ) snake_case_ = torch.mean(torch.abs(a__ ) ) assert abs(result_mean.item() - 0.1_4_5_3 ) < 1e-3 def lowerCAmelCase__ ( self ) -> List[str]: '''simple docstring''' snake_case_ = self.full_loop(prediction_type="v_prediction" , use_karras_sigmas=a__ ) snake_case_ = torch.mean(torch.abs(a__ ) ) assert abs(result_mean.item() - 0.0_6_4_9 ) < 1e-3 def lowerCAmelCase__ ( self ) -> int: '''simple docstring''' snake_case_ = self.scheduler_classes[0] snake_case_ = self.get_scheduler_config(thresholding=a__ , dynamic_thresholding_ratio=0 ) snake_case_ = scheduler_class(**a__ ) snake_case_ = 10 snake_case_ = self.dummy_model() snake_case_ = self.dummy_sample_deter.half() scheduler.set_timesteps(a__ ) for i, t in enumerate(scheduler.timesteps ): snake_case_ = model(a__ , a__ ) snake_case_ = scheduler.step(a__ , a__ , a__ ).prev_sample assert sample.dtype == torch.floataa
85
import argparse from collections import defaultdict import yaml _A = '''docs/source/en/_toctree.yml''' def __UpperCamelCase ( _A ): lowerCAmelCase_ = defaultdict(_A ) for doc in model_doc: counts[doc["local"]] += 1 lowerCAmelCase_ = [key for key, value in counts.items() if value > 1] lowerCAmelCase_ = [] for duplicate_key in duplicates: lowerCAmelCase_ = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} ) if len(_A ) > 1: raise ValueError( f"{duplicate_key} is present several times in the documentation table of content at " '''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the ''' '''others.''' ) # Only add this once new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] ) # Sort return sorted(_A , key=lambda _A : s["title"].lower() ) def __UpperCamelCase ( _A=False ): with open(_A , encoding='''utf-8''' ) as f: lowerCAmelCase_ = yaml.safe_load(f.read() ) # Get to the API doc lowerCAmelCase_ = 0 while content[api_idx]["title"] != "API": api_idx += 1 lowerCAmelCase_ = content[api_idx]['''sections'''] # Then to the model doc lowerCAmelCase_ = 0 while api_doc[model_idx]["title"] != "Models": model_idx += 1 lowerCAmelCase_ = api_doc[model_idx]['''sections'''] lowerCAmelCase_ = [(idx, section) for idx, section in enumerate(_A ) if '''sections''' in section] lowerCAmelCase_ = False for idx, modality_doc in modalities_docs: lowerCAmelCase_ = modality_doc['''sections'''] lowerCAmelCase_ = clean_model_doc_toc(_A ) if old_modality_doc != new_modality_doc: lowerCAmelCase_ = True if overwrite: lowerCAmelCase_ = new_modality_doc if diff: if overwrite: lowerCAmelCase_ = model_doc lowerCAmelCase_ = api_doc with open(_A , '''w''' , encoding='''utf-8''' ) as f: f.write(yaml.dump(_A , allow_unicode=_A ) ) else: raise ValueError( '''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' ) if __name__ == "__main__": _A = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') _A = parser.parse_args() check_model_doc(args.fix_and_overwrite)
278
0
"""simple docstring""" import copy import inspect import unittest from transformers import AutoBackbone from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import require_timm, require_torch, torch_device from transformers.utils.import_utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor if is_torch_available(): import torch from transformers import TimmBackbone, TimmBackboneConfig from ...test_pipeline_mixin import PipelineTesterMixin class A__ : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="resnet50" , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , ): __lowerCAmelCase : Optional[int] = parent __lowerCAmelCase : Optional[int] = out_indices if out_indices is not None else [4] __lowerCAmelCase : Optional[int] = stage_names __lowerCAmelCase : Optional[Any] = out_features __lowerCAmelCase : Any = backbone __lowerCAmelCase : List[Any] = batch_size __lowerCAmelCase : List[str] = image_size __lowerCAmelCase : str = num_channels __lowerCAmelCase : int = use_pretrained_backbone __lowerCAmelCase : Optional[Any] = is_training def __lowerCamelCase ( self ): __lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowerCAmelCase : str = self.get_config() return config, pixel_values def __lowerCamelCase ( self ): return TimmBackboneConfig( image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : List[Any] = TimmBackbone(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): __lowerCAmelCase : List[Any] = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , ) def __lowerCamelCase ( self ): __lowerCAmelCase : int = self.prepare_config_and_inputs() __lowerCAmelCase , __lowerCAmelCase : Optional[Any] = config_and_inputs __lowerCAmelCase : Union[str, Any] = {'pixel_values': pixel_values} return config, inputs_dict @require_torch @require_timm class A__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase): A_ : Tuple = (TimmBackbone,) if is_torch_available() else () A_ : Dict = {'feature-extraction': TimmBackbone} if is_torch_available() else {} A_ : Optional[int] = False A_ : List[str] = False A_ : str = False A_ : Dict = False def __lowerCamelCase ( self ): __lowerCAmelCase : int = TimmBackboneModelTester(self ) __lowerCAmelCase : List[str] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __lowerCamelCase ( self ): __lowerCAmelCase : List[Any] = 'resnet18' __lowerCAmelCase : Optional[int] = 'microsoft/resnet-18' __lowerCAmelCase : Optional[Any] = AutoBackbone.from_pretrained(_SCREAMING_SNAKE_CASE , use_timm_backbone=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Tuple = AutoBackbone.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) # Out indices are set to the last layer by default. For timm models, we don't know # the number of layers in advance, so we set it to (-1,), whereas for transformers # models, we set it to [len(stage_names) - 1] (kept for backward compatibility). self.assertEqual(timm_model.out_indices , (-1,) ) self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] ) __lowerCAmelCase : int = AutoBackbone.from_pretrained(_SCREAMING_SNAKE_CASE , use_timm_backbone=_SCREAMING_SNAKE_CASE , out_indices=[1, 2, 3] ) __lowerCAmelCase : Optional[int] = AutoBackbone.from_pretrained(_SCREAMING_SNAKE_CASE , out_indices=[1, 2, 3] ) self.assertEqual(timm_model.out_indices , transformers_model.out_indices ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) @unittest.skip('TimmBackbone doesn\'t support feed forward chunking' ) def __lowerCamelCase ( self ): pass @unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' ) def __lowerCamelCase ( self ): pass @unittest.skip('TimmBackbone initialization is managed on the timm side' ) def __lowerCamelCase ( self ): pass @unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' ) def __lowerCamelCase ( self ): pass @unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' ) def __lowerCamelCase ( self ): pass @unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' ) def __lowerCamelCase ( self ): pass @unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' ) def __lowerCamelCase ( self ): pass @unittest.skip('model weights aren\'t tied in TimmBackbone.' ) def __lowerCamelCase ( self ): pass @unittest.skip('model weights aren\'t tied in TimmBackbone.' ) def __lowerCamelCase ( self ): pass @unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' ) def __lowerCamelCase ( self ): pass @unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' ) def __lowerCamelCase ( self ): pass @unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' ) def __lowerCamelCase ( self ): pass @unittest.skip('TimmBackbone doesn\'t support output_attentions.' ) def __lowerCamelCase ( self ): pass @unittest.skip('Safetensors is not supported by timm.' ) def __lowerCamelCase ( self ): pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def __lowerCamelCase ( self ): pass def __lowerCamelCase ( self ): __lowerCAmelCase , __lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCAmelCase : Optional[Any] = model_class(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCAmelCase : List[str] = [*signature.parameters.keys()] __lowerCAmelCase : Optional[int] = ['pixel_values'] self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): __lowerCAmelCase , __lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common() __lowerCAmelCase : Any = True __lowerCAmelCase : Optional[Any] = self.has_attentions # no need to test all models as different heads yield the same functionality __lowerCAmelCase : Any = self.all_model_classes[0] __lowerCAmelCase : Any = model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Tuple = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[str] = model(**_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Any = outputs[0][-1] # Encoder-/Decoder-only models __lowerCAmelCase : Dict = outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: __lowerCAmelCase : Tuple = outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(hidden_states.grad ) if self.has_attentions: self.assertIsNotNone(attentions.grad ) def __lowerCamelCase ( self ): __lowerCAmelCase , __lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCAmelCase : Optional[Any] = model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() __lowerCAmelCase : Optional[int] = model(**_SCREAMING_SNAKE_CASE ) self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) ) self.assertEqual(len(model.channels ) , len(config.out_indices ) ) # Check output of last stage is taken if out_features=None, out_indices=None __lowerCAmelCase : Optional[int] = copy.deepcopy(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : int = None __lowerCAmelCase : int = model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() __lowerCAmelCase : int = model(**_SCREAMING_SNAKE_CASE ) self.assertEqual(len(result.feature_maps ) , 1 ) self.assertEqual(len(model.channels ) , 1 ) # Check backbone can be initialized with fresh weights __lowerCAmelCase : List[Any] = copy.deepcopy(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[str] = False __lowerCAmelCase : int = model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() __lowerCAmelCase : Tuple = model(**_SCREAMING_SNAKE_CASE )
86
import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest class A ( __UpperCAmelCase ): __snake_case = (UnCLIPScheduler,) def SCREAMING_SNAKE_CASE__ ( self, **UpperCamelCase__ ): """simple docstring""" lowerCAmelCase_ = { '''num_train_timesteps''': 1000, '''variance_type''': '''fixed_small_log''', '''clip_sample''': True, '''clip_sample_range''': 1.0, '''prediction_type''': '''epsilon''', } config.update(**UpperCamelCase__ ) return config def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" for clip_sample_range in [1, 5, 10, 20]: self.check_over_configs(clip_sample_range=UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" for time_step in [0, 500, 999]: for prev_timestep in [None, 5, 100, 250, 500, 750]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=UpperCamelCase__, prev_timestep=UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.scheduler_classes[0] lowerCAmelCase_ = self.get_scheduler_config(variance_type='''fixed_small_log''' ) lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1E-5 def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.scheduler_classes[0] lowerCAmelCase_ = self.get_scheduler_config(variance_type='''learned_range''' ) lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ ) lowerCAmelCase_ = 0.5 assert scheduler._get_variance(1, predicted_variance=UpperCamelCase__ ) - -10.1_712_790 < 1E-5 assert scheduler._get_variance(487, predicted_variance=UpperCamelCase__ ) - -5.7_998_052 < 1E-5 assert scheduler._get_variance(999, predicted_variance=UpperCamelCase__ ) - -0.0_010_011 < 1E-5 def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.scheduler_classes[0] lowerCAmelCase_ = self.get_scheduler_config() lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ ) lowerCAmelCase_ = scheduler.timesteps lowerCAmelCase_ = self.dummy_model() lowerCAmelCase_ = self.dummy_sample_deter lowerCAmelCase_ = torch.manual_seed(0 ) for i, t in enumerate(UpperCamelCase__ ): # 1. predict noise residual lowerCAmelCase_ = model(UpperCamelCase__, UpperCamelCase__ ) # 2. predict previous mean of sample x_t-1 lowerCAmelCase_ = scheduler.step(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, generator=UpperCamelCase__ ).prev_sample lowerCAmelCase_ = pred_prev_sample lowerCAmelCase_ = torch.sum(torch.abs(UpperCamelCase__ ) ) lowerCAmelCase_ = torch.mean(torch.abs(UpperCamelCase__ ) ) assert abs(result_sum.item() - 252.2_682_495 ) < 1E-2 assert abs(result_mean.item() - 0.3_284_743 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.scheduler_classes[0] lowerCAmelCase_ = self.get_scheduler_config() lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ ) scheduler.set_timesteps(25 ) lowerCAmelCase_ = scheduler.timesteps lowerCAmelCase_ = self.dummy_model() lowerCAmelCase_ = self.dummy_sample_deter lowerCAmelCase_ = torch.manual_seed(0 ) for i, t in enumerate(UpperCamelCase__ ): # 1. predict noise residual lowerCAmelCase_ = model(UpperCamelCase__, UpperCamelCase__ ) if i + 1 == timesteps.shape[0]: lowerCAmelCase_ = None else: lowerCAmelCase_ = timesteps[i + 1] # 2. predict previous mean of sample x_t-1 lowerCAmelCase_ = scheduler.step( UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, prev_timestep=UpperCamelCase__, generator=UpperCamelCase__ ).prev_sample lowerCAmelCase_ = pred_prev_sample lowerCAmelCase_ = torch.sum(torch.abs(UpperCamelCase__ ) ) lowerCAmelCase_ = torch.mean(torch.abs(UpperCamelCase__ ) ) assert abs(result_sum.item() - 258.2_044_983 ) < 1E-2 assert abs(result_mean.item() - 0.3_362_038 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" pass def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" pass
278
0
from typing import List import jiwer import jiwer.transforms as tr from packaging import version import datasets from datasets.config import PY_VERSION if PY_VERSION < version.parse('''3.8'''): import importlib_metadata else: import importlib.metadata as importlib_metadata UpperCamelCase = '''''' if version.parse(importlib_metadata.version('''jiwer''')) < version.parse('''2.3.0'''): class snake_case_ ( tr.AbstractTransform ): def __init__( self : int , lowercase_ : str = " " ) -> Optional[Any]: lowercase__ : Optional[int] = sentence_delimiter def __UpperCamelCase ( self : Any , lowercase_ : str ) -> List[str]: return list(lowercase_ ) def __UpperCamelCase ( self : Optional[int] , lowercase_ : List[str] ) -> Union[str, Any]: lowercase__ : str = [] for sent_idx, sentence in enumerate(lowercase_ ): chars.extend(self.process_string(lowercase_ ) ) if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(lowercase_ ) - 1: chars.append(self.sentence_delimiter ) return chars UpperCamelCase = tr.Compose( [tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)] ) else: UpperCamelCase = tr.Compose( [ tr.RemoveMultipleSpaces(), tr.Strip(), tr.ReduceToSingleSentence(SENTENCE_DELIMITER), tr.ReduceToListOfListOfChars(), ] ) UpperCamelCase = '''\ @inproceedings{inproceedings, author = {Morris, Andrew and Maier, Viktoria and Green, Phil}, year = {2004}, month = {01}, pages = {}, title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.} } ''' UpperCamelCase = '''\ Character error rate (CER) is a common metric of the performance of an automatic speech recognition system. CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information. Character error rate can be computed as: CER = (S + D + I) / N = (S + D + I) / (S + D + C) where S is the number of substitutions, D is the number of deletions, I is the number of insertions, C is the number of correct characters, N is the number of characters in the reference (N=S+D+C). CER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the performance of the ASR system with a CER of 0 being a perfect score. ''' UpperCamelCase = ''' Computes CER score of transcribed segments against references. Args: references: list of references for each speech input. predictions: list of transcribtions to score. concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result. Returns: (float): the character error rate Examples: >>> predictions = ["this is the prediction", "there is an other sample"] >>> references = ["this is the reference", "there is another one"] >>> cer = datasets.load_metric("cer") >>> cer_score = cer.compute(predictions=predictions, references=references) >>> print(cer_score) 0.34146341463414637 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class snake_case_ ( datasets.Metric ): def __UpperCamelCase ( self : Tuple ) -> Any: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[ "https://en.wikipedia.org/wiki/Word_error_rate", "https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates", ] , ) def __UpperCamelCase ( self : List[str] , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Union[str, Any]=False ) -> Union[str, Any]: if concatenate_texts: return jiwer.compute_measures( lowercase_ , lowercase_ , truth_transform=lowercase_ , hypothesis_transform=lowercase_ , )["wer"] lowercase__ : List[str] = 0 lowercase__ : Dict = 0 for prediction, reference in zip(lowercase_ , lowercase_ ): lowercase__ : Union[str, Any] = jiwer.compute_measures( lowercase_ , lowercase_ , truth_transform=lowercase_ , hypothesis_transform=lowercase_ , ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
87
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _A = logging.get_logger(__name__) _A = torch.device('''cpu''') def __UpperCamelCase ( ): lowerCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw ) return im def __UpperCamelCase ( _A ): if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.1_7_0_3E0_0, 2.1_1_0_7E0_0, -2.0_8_1_1E0_0, 8.8_6_8_5E-0_1, 2.4_3_6_0E-0_1] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.9_6_3_6E-0_1, 2.3_4_7_8E-0_1, -1.6_9_6_3E0_0, -1.7_3_8_1E0_0, -8.6_3_3_7E-0_1] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.2_7_6_8E-0_1, -4.7_4_2_9E-0_1, -1.0_8_9_7E0_0, -1.0_2_4_8E0_0, 3.5_5_2_3E-0_2] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.5_3_3_0E-0_1, 2.4_2_1_1E-0_1, -6.0_1_8_5E-0_1, -8.2_7_8_9E-0_1, -6.0_4_4_6E-0_2] ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = dct.pop(_A ) lowerCAmelCase_ = val def __UpperCamelCase ( _A ): lowerCAmelCase_ = [] for k in state_dict.keys(): lowerCAmelCase_ = k if ".pwconv" in k: lowerCAmelCase_ = k_new.replace('''.pwconv''' , '''.point_wise_conv''' ) if ".dwconv" in k: lowerCAmelCase_ = k_new.replace('''.dwconv''' , '''.depth_wise_conv''' ) if ".Proj." in k: lowerCAmelCase_ = k_new.replace('''.Proj.''' , '''.proj.''' ) if "patch_embed" in k_new: lowerCAmelCase_ = k_new.replace('''patch_embed''' , '''swiftformer.patch_embed.patch_embedding''' ) if "network" in k_new: lowerCAmelCase_ = k_new.split('''.''' ) if ls[2].isdigit(): lowerCAmelCase_ = '''swiftformer.encoder.network.''' + ls[1] + '''.blocks.''' + ls[2] + '''.''' + '''.'''.join(ls[3:] ) else: lowerCAmelCase_ = k_new.replace('''network''' , '''swiftformer.encoder.network''' ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size lowerCAmelCase_ = 1000 lowerCAmelCase_ = '''huggingface/label-files''' lowerCAmelCase_ = '''imagenet-1k-id2label.json''' lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) ) lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()} lowerCAmelCase_ = idalabel lowerCAmelCase_ = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": lowerCAmelCase_ = [3, 3, 6, 4] lowerCAmelCase_ = [48, 56, 112, 220] elif swiftformer_name == "swiftformer_s": lowerCAmelCase_ = [3, 3, 9, 6] lowerCAmelCase_ = [48, 64, 168, 224] elif swiftformer_name == "swiftformer_l1": lowerCAmelCase_ = [4, 3, 10, 5] lowerCAmelCase_ = [48, 96, 192, 384] elif swiftformer_name == "swiftformer_l3": lowerCAmelCase_ = [4, 4, 12, 6] lowerCAmelCase_ = [64, 128, 320, 512] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith('''https''' ): lowerCAmelCase_ = torch.hub.load_state_dict_from_url(_A , map_location='''cpu''' , check_hash=_A ) else: lowerCAmelCase_ = torch.load(_A , map_location='''cpu''' ) lowerCAmelCase_ = checkpoint lowerCAmelCase_ = create_rename_keys(_A ) for rename_key_src, rename_key_dest in rename_keys: rename_key(_A , _A , _A ) # load HuggingFace model lowerCAmelCase_ = SwiftFormerForImageClassification(_A ).eval() hf_model.load_state_dict(_A ) # prepare test inputs lowerCAmelCase_ = prepare_img() lowerCAmelCase_ = ViTImageProcessor.from_pretrained('''preprocessor_config''' ) lowerCAmelCase_ = processor(images=_A , return_tensors='''pt''' ) # compare outputs from both models lowerCAmelCase_ = get_expected_output(_A ) lowerCAmelCase_ = hf_model(inputs['''pixel_values'''] ).logits assert hf_logits.shape == torch.Size([1, 1000] ) assert torch.allclose(hf_logits[0, 0:5] , _A , atol=1E-3 ) Path(_A ).mkdir(exist_ok=_A ) print(f"Saving model {swiftformer_name} to {pytorch_dump_folder_path}" ) hf_model.save_pretrained(_A ) if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--swiftformer_name''', default='''swiftformer_xs''', choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''], type=str, help='''Name of the SwiftFormer model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''./converted_outputs/''', type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''') _A = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
278
0
def a__ ( A_ ): '''simple docstring''' if a < 0: raise ValueError("""Input value must be a positive integer""" ) elif isinstance(A_, A_ ): raise TypeError("""Input value must be a 'int' type""" ) return bin(A_ ).count("""1""" ) if __name__ == "__main__": import doctest doctest.testmod()
88
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _A = logging.get_logger(__name__) _A = { '''google/vit-base-patch16-224''': '''https://huggingface.co/vit-base-patch16-224/resolve/main/config.json''', # See all ViT models at https://huggingface.co/models?filter=vit } class A ( __UpperCAmelCase ): __snake_case = 'vit' def __init__( self, UpperCamelCase__=768, UpperCamelCase__=12, UpperCamelCase__=12, UpperCamelCase__=3072, UpperCamelCase__="gelu", UpperCamelCase__=0.0, UpperCamelCase__=0.0, UpperCamelCase__=0.02, UpperCamelCase__=1E-12, UpperCamelCase__=224, UpperCamelCase__=16, UpperCamelCase__=3, UpperCamelCase__=True, UpperCamelCase__=16, **UpperCamelCase__, ): """simple docstring""" super().__init__(**UpperCamelCase__ ) lowerCAmelCase_ = hidden_size lowerCAmelCase_ = num_hidden_layers lowerCAmelCase_ = num_attention_heads lowerCAmelCase_ = intermediate_size lowerCAmelCase_ = hidden_act lowerCAmelCase_ = hidden_dropout_prob lowerCAmelCase_ = attention_probs_dropout_prob lowerCAmelCase_ = initializer_range lowerCAmelCase_ = layer_norm_eps lowerCAmelCase_ = image_size lowerCAmelCase_ = patch_size lowerCAmelCase_ = num_channels lowerCAmelCase_ = qkv_bias lowerCAmelCase_ = encoder_stride class A ( __UpperCAmelCase ): __snake_case = version.parse('1.11' ) @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return 1E-4
278
0
'''simple docstring''' import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy __lowerCAmelCase = logging.getLogger(__name__) def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = False , ) -> Dict: _a : str = bnb_quantization_config.load_in_abit _a : Dict = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( 'You have a version of `bitsandbytes` that is not compatible with 8bit quantization,' ' make sure you have the latest version of `bitsandbytes` installed.' ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( 'You have a version of `bitsandbytes` that is not compatible with 4bit quantization,' 'make sure you have the latest version of `bitsandbytes` installed.' ) _a : str = [] # custom device map if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(device_map.keys() ) > 1: _a : List[Any] = [key for key, value in device_map.items() if value in ['disk', 'cpu']] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: _a : Optional[int] = get_keys_to_not_convert(lowerCAmelCase_ ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(lowerCAmelCase_ ) _a : Any = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: _a : List[str] = [] _a : Optional[Any] = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(lowerCAmelCase_ ) # compatibility with peft _a : Optional[Any] = load_in_abit _a : Dict = load_in_abit _a : Tuple = get_parameter_device(lowerCAmelCase_ ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( 'It is not recommended to quantize a loaded model. ' 'The model should be instantiated under the `init_empty_weights` context manager.' ) _a : int = replace_with_bnb_layers(lowerCAmelCase_ , lowerCAmelCase_ , modules_to_not_convert=lowerCAmelCase_ ) # convert param to the right dtype _a : Dict = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: _a : Any = name.replace('.weight' , '' ).replace('.bias' , '' ) _a : str = getattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(lowerCAmelCase_ ): param.to(lowerCAmelCase_ ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError('No GPU found. A GPU is needed for quantization.' ) logger.info( f"""The model device type is {model_device.type}. However, cuda is needed for quantization.""" 'We move the model to cuda.' ) return model elif weights_location is None: raise RuntimeError( f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ ) else: with init_empty_weights(): _a : str = replace_with_bnb_layers( lowerCAmelCase_ , lowerCAmelCase_ , modules_to_not_convert=lowerCAmelCase_ ) _a : List[Any] = get_quantized_model_device_map( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , max_memory=lowerCAmelCase_ , no_split_module_classes=lowerCAmelCase_ , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): _a : Any = True _a : Union[str, Any] = any(x in list(device_map.values() ) for x in ['cpu', 'disk'] ) load_checkpoint_in_model( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , dtype=bnb_quantization_config.torch_dtype , offload_folder=lowerCAmelCase_ , offload_state_dict=lowerCAmelCase_ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(lowerCAmelCase_ , device_map=lowerCAmelCase_ , offload_dir=lowerCAmelCase_ ) def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None ) -> Optional[int]: if device_map is None: if torch.cuda.is_available(): _a : List[Any] = {'': torch.cuda.current_device()} else: raise RuntimeError('No GPU found. A GPU is needed for quantization.' ) logger.info('The device_map was not initialized.' 'Setting device_map to `{\'\':torch.cuda.current_device()}`.' ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( 'If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or ' '\'sequential\'.' ) _a : Union[str, Any] = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) _a : int = {} _a : str = special_dtypes _a : Tuple = no_split_module_classes _a : Optional[Any] = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": _a : Optional[Any] = get_balanced_memory( lowerCAmelCase_ , low_zero=(device_map == 'balanced_low_0') , max_memory=lowerCAmelCase_ , **lowerCAmelCase_ , ) _a : Optional[Any] = max_memory _a : Any = infer_auto_device_map(lowerCAmelCase_ , **lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): # check if don't have any quantized module on the cpu _a : Tuple = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules _a : Tuple = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( '\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ' ) else: logger.info( 'Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit' ) del device_map_without_some_modules return device_map def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None ) -> Tuple: if modules_to_not_convert is None: _a : Optional[int] = [] _a , _a : Tuple = _replace_with_bnb_layers( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) if not has_been_replaced: logger.warning( 'You are loading your model in 8bit or 4bit but no linear modules were found in your model.' ' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.' ' Please double check your model architecture, or submit an issue on github if you think this is' ' a bug.' ) return model def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> List[Any]: _a : Any = False for name, module in model.named_children(): if current_key_name is None: _a : List[Any] = [] current_key_name.append(lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` _a : str = '.'.join(lowerCAmelCase_ ) _a : Any = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: _a : Any = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: _a : Tuple = bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=lowerCAmelCase_ , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: _a : Optional[Any] = bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError('load_in_8bit and load_in_4bit can\'t be both False' ) _a : Optional[Any] = module.weight.data if module.bias is not None: _a : Union[str, Any] = module.bias.data bnb_module.requires_grad_(lowerCAmelCase_ ) setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) _a : Optional[int] = True if len(list(module.children() ) ) > 0: _a , _a : Optional[int] = _replace_with_bnb_layers( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) _a : Any = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def __lowerCamelCase ( lowerCAmelCase_ ) -> int: # Create a copy of the model with init_empty_weights(): _a : Optional[int] = deepcopy(lowerCAmelCase_ ) # this has 0 cost since it is done inside `init_empty_weights` context manager` _a : int = find_tied_parameters(lowerCAmelCase_ ) # For compatibility with Accelerate < 0.18 if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _a : str = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: _a : List[str] = sum(lowerCAmelCase_ , [] ) _a : Union[str, Any] = len(lowerCAmelCase_ ) > 0 # Check if it is a base model _a : str = False if hasattr(lowerCAmelCase_ , 'base_model_prefix' ): _a : str = not hasattr(lowerCAmelCase_ , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head _a : Optional[int] = list(model.named_children() ) _a : int = [list_modules[-1][0]] # add last module together with tied weights _a : str = set(lowerCAmelCase_ ) - set(lowerCAmelCase_ ) _a : Optional[Any] = list(set(lowerCAmelCase_ ) ) + list(lowerCAmelCase_ ) # remove ".weight" from the keys _a : List[str] = ['.weight', '.bias'] _a : Any = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: _a : int = name.replace(lowerCAmelCase_ , '' ) filtered_module_names.append(lowerCAmelCase_ ) return filtered_module_names def __lowerCamelCase ( lowerCAmelCase_ ) -> Optional[Any]: for m in model.modules(): if isinstance(lowerCAmelCase_ , bnb.nn.Linearabit ): return True return False def __lowerCamelCase ( lowerCAmelCase_ ) -> Optional[Any]: return next(parameter.parameters() ).device def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]: # if it is not quantized, we quantize and offload the quantized weights and the SCB stats if fpaa_statistics is None: set_module_tensor_to_device(lowerCAmelCase_ , lowerCAmelCase_ , 0 , dtype=lowerCAmelCase_ , value=lowerCAmelCase_ ) _a : List[Any] = param_name _a : List[Any] = model if "." in tensor_name: _a : Tuple = tensor_name.split('.' ) for split in splits[:-1]: _a : Union[str, Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ ) if new_module is None: raise ValueError(f"""{module} has no attribute {split}.""" ) _a : Dict = new_module _a : List[Any] = splits[-1] # offload weights _a : List[str] = False offload_weight(module._parameters[tensor_name] , lowerCAmelCase_ , lowerCAmelCase_ , index=lowerCAmelCase_ ) if hasattr(module._parameters[tensor_name] , 'SCB' ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace('weight' , 'SCB' ) , lowerCAmelCase_ , index=lowerCAmelCase_ , ) else: offload_weight(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , index=lowerCAmelCase_ ) offload_weight(lowerCAmelCase_ , param_name.replace('weight' , 'SCB' ) , lowerCAmelCase_ , index=lowerCAmelCase_ ) set_module_tensor_to_device(lowerCAmelCase_ , lowerCAmelCase_ , 'meta' , dtype=lowerCAmelCase_ , value=torch.empty(*param.size() ) )
89
import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def __UpperCamelCase ( _A , _A ): assert isinstance(_A , _A ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCAmelCase_ = JsonDatasetReader(_A , cache_dir=_A , keep_in_memory=_A ).read() _check_json_dataset(_A , _A ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase_ = features.copy() if features else default_expected_features lowerCAmelCase_ = ( Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase_ = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read() _check_json_dataset(_A , _A ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}, ] , ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''} lowerCAmelCase_ = features.copy() if features else default_expected_features lowerCAmelCase_ = ( Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase_ = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read() assert isinstance(_A , _A ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def __UpperCamelCase ( _A , _A ): # jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"} lowerCAmelCase_ = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''} lowerCAmelCase_ = features.copy() lowerCAmelCase_ = ( Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read() assert isinstance(_A , _A ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase_ = JsonDatasetReader(_A , cache_dir=_A , split=_A ).read() _check_json_dataset(_A , _A ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''' , [str, list] ) def __UpperCamelCase ( _A , _A , _A ): if issubclass(_A , _A ): lowerCAmelCase_ = jsonl_path elif issubclass(_A , _A ): lowerCAmelCase_ = [jsonl_path] lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase_ = JsonDatasetReader(_A , cache_dir=_A ).read() _check_json_dataset(_A , _A ) def __UpperCamelCase ( _A , _A , _A=("train",) ): assert isinstance(_A , _A ) for split in splits: lowerCAmelCase_ = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCAmelCase_ = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=_A , keep_in_memory=_A ).read() _check_json_datasetdict(_A , _A ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase_ = features.copy() if features else default_expected_features lowerCAmelCase_ = ( Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase_ = JsonDatasetReader({'''train''': jsonl_path} , features=_A , cache_dir=_A ).read() _check_json_datasetdict(_A , _A ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def __UpperCamelCase ( _A , _A , _A ): if split: lowerCAmelCase_ = {split: jsonl_path} else: lowerCAmelCase_ = '''train''' lowerCAmelCase_ = {'''train''': jsonl_path, '''test''': jsonl_path} lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase_ = JsonDatasetReader(_A , cache_dir=_A ).read() _check_json_datasetdict(_A , _A , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def __UpperCamelCase ( _A ): return json.load(_A ) def __UpperCamelCase ( _A ): return [json.loads(_A ) for line in buffer] class A : @pytest.mark.parametrize('''lines, load_json_function''', [(True, load_json_lines), (False, load_json)] ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ): """simple docstring""" with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, lines=UpperCamelCase__ ).write() buffer.seek(0 ) lowerCAmelCase_ = load_json_function(UpperCamelCase__ ) assert isinstance(UpperCamelCase__, UpperCamelCase__ ) assert isinstance(exported_content[0], UpperCamelCase__ ) assert len(UpperCamelCase__ ) == 10 @pytest.mark.parametrize( '''orient, container, keys, len_at''', [ ('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None), ('''split''', dict, {'''columns''', '''data'''}, '''data'''), ('''index''', dict, set('''0123456789''' ), None), ('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''), ('''values''', list, None, None), ('''table''', dict, {'''schema''', '''data'''}, '''data'''), ], ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ): """simple docstring""" with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, lines=UpperCamelCase__, orient=UpperCamelCase__ ).write() buffer.seek(0 ) lowerCAmelCase_ = load_json(UpperCamelCase__ ) assert isinstance(UpperCamelCase__, UpperCamelCase__ ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(UpperCamelCase__, '''keys''' ) and not hasattr(exported_content[0], '''keys''' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(UpperCamelCase__ ) == 10 @pytest.mark.parametrize('''lines, load_json_function''', [(True, load_json_lines), (False, load_json)] ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ): """simple docstring""" with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, lines=UpperCamelCase__, num_proc=2 ).write() buffer.seek(0 ) lowerCAmelCase_ = load_json_function(UpperCamelCase__ ) assert isinstance(UpperCamelCase__, UpperCamelCase__ ) assert isinstance(exported_content[0], UpperCamelCase__ ) assert len(UpperCamelCase__ ) == 10 @pytest.mark.parametrize( '''orient, container, keys, len_at''', [ ('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None), ('''split''', dict, {'''columns''', '''data'''}, '''data'''), ('''index''', dict, set('''0123456789''' ), None), ('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''), ('''values''', list, None, None), ('''table''', dict, {'''schema''', '''data'''}, '''data'''), ], ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ): """simple docstring""" with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, lines=UpperCamelCase__, orient=UpperCamelCase__, num_proc=2 ).write() buffer.seek(0 ) lowerCAmelCase_ = load_json(UpperCamelCase__ ) assert isinstance(UpperCamelCase__, UpperCamelCase__ ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(UpperCamelCase__, '''keys''' ) and not hasattr(exported_content[0], '''keys''' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(UpperCamelCase__ ) == 10 def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ): """simple docstring""" with pytest.raises(UpperCamelCase__ ): with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, num_proc=0 ) @pytest.mark.parametrize('''compression, extension''', [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ): """simple docstring""" lowerCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / f"test.json.{extension}" lowerCAmelCase_ = str(shared_datadir / f"test_file.json.{extension}" ) JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, compression=UpperCamelCase__ ).write() with fsspec.open(UpperCamelCase__, '''rb''', compression='''infer''' ) as f: lowerCAmelCase_ = f.read() with fsspec.open(UpperCamelCase__, '''rb''', compression='''infer''' ) as f: lowerCAmelCase_ = f.read() assert exported_content == original_content
278
0
def lowerCamelCase_ ( UpperCamelCase__ : int ) -> list: """simple docstring""" __lowerCamelCase = int(UpperCamelCase__ ) if n_element < 1: __lowerCamelCase = ValueError('a should be a positive number' ) raise my_error __lowerCamelCase = [1] __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = (0, 0, 0) __lowerCamelCase = 1 while index < n_element: while hamming_list[i] * 2 <= hamming_list[-1]: i += 1 while hamming_list[j] * 3 <= hamming_list[-1]: j += 1 while hamming_list[k] * 5 <= hamming_list[-1]: k += 1 hamming_list.append( min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) ) index += 1 return hamming_list if __name__ == "__main__": __A = input("Enter the last number (nth term) of the Hamming Number Series: ") print("Formula of Hamming Number Series => 2^i * 3^j * 5^k") __A = hamming(int(n)) print("-----------------------------------------------------") print(f'''The list with nth numbers is: {hamming_numbers}''') print("-----------------------------------------------------")
90
import importlib import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Union import torch from ..utils import BaseOutput _A = '''scheduler_config.json''' class A ( __UpperCAmelCase ): __snake_case = 1 __snake_case = 2 __snake_case = 3 __snake_case = 4 __snake_case = 5 __snake_case = 6 __snake_case = 7 __snake_case = 8 __snake_case = 9 __snake_case = 10 __snake_case = 11 __snake_case = 12 __snake_case = 13 __snake_case = 14 @dataclass class A ( __UpperCAmelCase ): __snake_case = 42 class A : __snake_case = SCHEDULER_CONFIG_NAME __snake_case = [] __snake_case = True @classmethod def SCREAMING_SNAKE_CASE__ ( cls, UpperCamelCase__ = None, UpperCamelCase__ = None, UpperCamelCase__=False, **UpperCamelCase__, ): """simple docstring""" lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = cls.load_config( pretrained_model_name_or_path=UpperCamelCase__, subfolder=UpperCamelCase__, return_unused_kwargs=UpperCamelCase__, return_commit_hash=UpperCamelCase__, **UpperCamelCase__, ) return cls.from_config(UpperCamelCase__, return_unused_kwargs=UpperCamelCase__, **UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = False, **UpperCamelCase__ ): """simple docstring""" self.save_config(save_directory=UpperCamelCase__, push_to_hub=UpperCamelCase__, **UpperCamelCase__ ) @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return self._get_compatibles() @classmethod def SCREAMING_SNAKE_CASE__ ( cls ): """simple docstring""" lowerCAmelCase_ = list(set([cls.__name__] + cls._compatibles ) ) lowerCAmelCase_ = importlib.import_module(__name__.split('''.''' )[0] ) lowerCAmelCase_ = [ getattr(UpperCamelCase__, UpperCamelCase__ ) for c in compatible_classes_str if hasattr(UpperCamelCase__, UpperCamelCase__ ) ] return compatible_classes
278
0
"""simple docstring""" UpperCAmelCase_ : Any = """ # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git """ UpperCAmelCase_ : Union[str, Any] = [{"""type""": """code""", """content""": INSTALL_CONTENT}] UpperCAmelCase_ : Tuple = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
91
import unittest import numpy as np import torch from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class A ( unittest.TestCase ): @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCAmelCase_ = UNetaDModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=3, out_channels=3, down_block_types=('''DownBlock2D''', '''AttnDownBlock2D'''), up_block_types=('''AttnUpBlock2D''', '''UpBlock2D'''), ) return model def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.dummy_uncond_unet lowerCAmelCase_ = KarrasVeScheduler() lowerCAmelCase_ = KarrasVePipeline(unet=UpperCamelCase__, scheduler=UpperCamelCase__ ) pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowerCAmelCase_ = torch.manual_seed(0 ) lowerCAmelCase_ = pipe(num_inference_steps=2, generator=UpperCamelCase__, output_type='''numpy''' ).images lowerCAmelCase_ = torch.manual_seed(0 ) lowerCAmelCase_ = pipe(num_inference_steps=2, generator=UpperCamelCase__, output_type='''numpy''', return_dict=UpperCamelCase__ )[0] lowerCAmelCase_ = image[0, -3:, -3:, -1] lowerCAmelCase_ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCAmelCase_ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class A ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = '''google/ncsnpp-celebahq-256''' lowerCAmelCase_ = UNetaDModel.from_pretrained(UpperCamelCase__ ) lowerCAmelCase_ = KarrasVeScheduler() lowerCAmelCase_ = KarrasVePipeline(unet=UpperCamelCase__, scheduler=UpperCamelCase__ ) pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowerCAmelCase_ = torch.manual_seed(0 ) lowerCAmelCase_ = pipe(num_inference_steps=20, generator=UpperCamelCase__, output_type='''numpy''' ).images lowerCAmelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) lowerCAmelCase_ = np.array([0.578, 0.5_811, 0.5_924, 0.5_809, 0.587, 0.5_886, 0.5_861, 0.5_802, 0.586] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
278
0
import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""} UpperCamelCase__ = { """vocab_file""": { """allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json""", """allenai/longformer-large-4096""": ( """https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json""" ), """allenai/longformer-large-4096-finetuned-triviaqa""": ( """https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json""" ), """allenai/longformer-base-4096-extra.pos.embd.only""": ( """https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json""" ), """allenai/longformer-large-4096-extra.pos.embd.only""": ( """https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json""" ), }, """merges_file""": { """allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt""", """allenai/longformer-large-4096""": ( """https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt""" ), """allenai/longformer-large-4096-finetuned-triviaqa""": ( """https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt""" ), """allenai/longformer-base-4096-extra.pos.embd.only""": ( """https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt""" ), """allenai/longformer-large-4096-extra.pos.embd.only""": ( """https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt""" ), }, } UpperCamelCase__ = { """allenai/longformer-base-4096""": 4096, """allenai/longformer-large-4096""": 4096, """allenai/longformer-large-4096-finetuned-triviaqa""": 4096, """allenai/longformer-base-4096-extra.pos.embd.only""": 4096, """allenai/longformer-large-4096-extra.pos.embd.only""": 4096, } @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def _a ( ): __lowerCAmelCase = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) __lowerCAmelCase = bs[:] __lowerCAmelCase = 0 for b in range(2**8 ): if b not in bs: bs.append(SCREAMING_SNAKE_CASE_ ) cs.append(2**8 + n ) n += 1 __lowerCAmelCase = [chr(SCREAMING_SNAKE_CASE_ ) for n in cs] return dict(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) def _a ( SCREAMING_SNAKE_CASE_ : List[Any] ): __lowerCAmelCase = set() __lowerCAmelCase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __lowerCAmelCase = char return pairs class a__ ( snake_case__ ): _a : str = VOCAB_FILES_NAMES _a : List[str] = PRETRAINED_VOCAB_FILES_MAP _a : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _a : Optional[Any] = ["""input_ids""", """attention_mask"""] def __init__( self , _A , _A , _A="replace" , _A="<s>" , _A="</s>" , _A="</s>" , _A="<s>" , _A="<unk>" , _A="<pad>" , _A="<mask>" , _A=False , **_A , ): """simple docstring""" __lowerCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else bos_token __lowerCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else eos_token __lowerCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else sep_token __lowerCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else cls_token __lowerCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else unk_token __lowerCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else pad_token # Mask token behave like a normal word, i.e. include the space before it __lowerCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token super().__init__( errors=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , **_A , ) with open(_A , encoding="utf-8" ) as vocab_handle: __lowerCAmelCase = json.load(_A ) __lowerCAmelCase = {v: k for k, v in self.encoder.items()} __lowerCAmelCase = errors # how to handle errors in decoding __lowerCAmelCase = bytes_to_unicode() __lowerCAmelCase = {v: k for k, v in self.byte_encoder.items()} with open(_A , encoding="utf-8" ) as merges_handle: __lowerCAmelCase = merges_handle.read().split("\n" )[1:-1] __lowerCAmelCase = [tuple(merge.split() ) for merge in bpe_merges] __lowerCAmelCase = dict(zip(_A , range(len(_A ) ) ) ) __lowerCAmelCase = {} __lowerCAmelCase = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions __lowerCAmelCase = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return len(self.encoder ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" if token in self.cache: return self.cache[token] __lowerCAmelCase = tuple(_A ) __lowerCAmelCase = get_pairs(_A ) if not pairs: return token while True: __lowerCAmelCase = min(_A , key=lambda _A : self.bpe_ranks.get(_A , float("inf" ) ) ) if bigram not in self.bpe_ranks: break __lowerCAmelCase , __lowerCAmelCase = bigram __lowerCAmelCase = [] __lowerCAmelCase = 0 while i < len(_A ): try: __lowerCAmelCase = word.index(_A , _A ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __lowerCAmelCase = j if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __lowerCAmelCase = tuple(_A ) __lowerCAmelCase = new_word if len(_A ) == 1: break else: __lowerCAmelCase = get_pairs(_A ) __lowerCAmelCase = " ".join(_A ) __lowerCAmelCase = word return word def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" __lowerCAmelCase = [] for token in re.findall(self.pat , _A ): __lowerCAmelCase = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_A ).split(" " ) ) return bpe_tokens def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" return self.encoder.get(_A , self.encoder.get(self.unk_token ) ) def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" return self.decoder.get(_A ) def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" __lowerCAmelCase = "".join(_A ) __lowerCAmelCase = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def __SCREAMING_SNAKE_CASE( self , _A , _A = None ): """simple docstring""" if not os.path.isdir(_A ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __lowerCAmelCase = os.path.join( _A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) __lowerCAmelCase = os.path.join( _A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(_A , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=_A , ensure_ascii=_A ) + "\n" ) __lowerCAmelCase = 0 with open(_A , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _A : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" " Please check that the tokenizer is not corrupted!" ) __lowerCAmelCase = token_index writer.write(" ".join(_A ) + "\n" ) index += 1 return vocab_file, merge_file def __SCREAMING_SNAKE_CASE( self , _A , _A = None ): """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __lowerCAmelCase = [self.cls_token_id] __lowerCAmelCase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __SCREAMING_SNAKE_CASE( self , _A , _A = None , _A = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A ) if token_ids_a is None: return [1] + ([0] * len(_A )) + [1] return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) + [1] def __SCREAMING_SNAKE_CASE( self , _A , _A = None ): """simple docstring""" __lowerCAmelCase = [self.sep_token_id] __lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __SCREAMING_SNAKE_CASE( self , _A , _A=False , **_A ): """simple docstring""" __lowerCAmelCase = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(_A ) > 0 and not text[0].isspace()): __lowerCAmelCase = " " + text return (text, kwargs)
92
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
278
0
'''simple docstring''' class lowerCAmelCase__ : def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase_ : Any = name lowercase_ : List[Any] = value lowercase_ : Tuple = weight def __repr__( self ): """simple docstring""" return F'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})''' def _snake_case ( self ): """simple docstring""" return self.value def _snake_case ( self ): """simple docstring""" return self.name def _snake_case ( self ): """simple docstring""" return self.weight def _snake_case ( self ): """simple docstring""" return self.value / self.weight def snake_case_ ( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[Any] ): """simple docstring""" lowercase_ : List[Any] = [] for i in range(len(__SCREAMING_SNAKE_CASE ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def snake_case_ ( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str ): """simple docstring""" lowercase_ : Dict = sorted(__SCREAMING_SNAKE_CASE , key=__SCREAMING_SNAKE_CASE , reverse=__SCREAMING_SNAKE_CASE ) lowercase_ : int = [] lowercase_ , lowercase_ : Optional[Any] = 0.0, 0.0 for i in range(len(__SCREAMING_SNAKE_CASE ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def snake_case_ ( ): """simple docstring""" if __name__ == "__main__": import doctest doctest.testmod()
93
import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def __UpperCamelCase ( _A = 3 ): if isinstance(_A , _A ): raise TypeError('''number of qubits must be a integer.''' ) if number_of_qubits <= 0: raise ValueError('''number of qubits must be > 0.''' ) if math.floor(_A ) != number_of_qubits: raise ValueError('''number of qubits must be exact integer.''' ) if number_of_qubits > 10: raise ValueError('''number of qubits too large to simulate(>10).''' ) lowerCAmelCase_ = QuantumRegister(_A , '''qr''' ) lowerCAmelCase_ = ClassicalRegister(_A , '''cr''' ) lowerCAmelCase_ = QuantumCircuit(_A , _A ) lowerCAmelCase_ = number_of_qubits for i in range(_A ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(_A ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , _A , _A ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(_A , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(_A , _A ) # simulate with 10000 shots lowerCAmelCase_ = Aer.get_backend('''qasm_simulator''' ) lowerCAmelCase_ = execute(_A , _A , shots=10000 ) return job.result().get_counts(_A ) if __name__ == "__main__": print( f"Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}" )
278
0
import warnings from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging snake_case : Any = logging.get_logger(__name__) class _snake_case ( _snake_case ): SCREAMING_SNAKE_CASE__ = ['input_values', 'attention_mask'] def __init__( self , _lowerCamelCase = 1 , _lowerCamelCase = 1_6000 , _lowerCamelCase = 0.0 , _lowerCamelCase = False , _lowerCamelCase = 80 , _lowerCamelCase = 16 , _lowerCamelCase = 64 , _lowerCamelCase = "hann_window" , _lowerCamelCase = 1.0 , _lowerCamelCase = 80 , _lowerCamelCase = 7600 , _lowerCamelCase = 1e-10 , _lowerCamelCase = 2 , _lowerCamelCase = True , **_lowerCamelCase , ): super().__init__(feature_size=_lowerCamelCase , sampling_rate=_lowerCamelCase , padding_value=_lowerCamelCase , **_lowerCamelCase ) a :Union[str, Any] = do_normalize a :List[Any] = return_attention_mask a :List[str] = num_mel_bins a :List[str] = hop_length a :List[Any] = win_length a :List[Any] = win_function a :List[str] = frame_signal_scale a :List[str] = fmin a :Tuple = fmax a :List[Any] = mel_floor a :Union[str, Any] = reduction_factor a :Union[str, Any] = win_length * sampling_rate // 1000 a :Dict = hop_length * sampling_rate // 1000 a :Any = optimal_fft_length(self.sample_size ) a :List[Any] = (self.n_fft // 2) + 1 a :Any = window_function(window_length=self.sample_size , name=self.win_function , periodic=_lowerCamelCase ) a :str = mel_filter_bank( num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='''slaney''' , mel_scale='''slaney''' , ) if frame_signal_scale != 1.0: warnings.warn( '''The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers''' , _lowerCamelCase , ) if reduction_factor != 2.0: warnings.warn( '''The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers''' , _lowerCamelCase , ) @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def SCREAMING_SNAKE_CASE__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0.0 ): if attention_mask is not None: a :List[Any] = np.array(_lowerCamelCase , np.intaa ) a :List[str] = [] for vector, length in zip(_lowerCamelCase , attention_mask.sum(-1 ) ): a :Dict = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 ) if length < normed_slice.shape[0]: a :Union[str, Any] = padding_value normed_input_values.append(_lowerCamelCase ) else: a :List[str] = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values] return normed_input_values def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , ): a :Union[str, Any] = spectrogram( _lowerCamelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='''log10''' , ) return log_mel_spec.T def __call__( self , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ): if audio is None and audio_target is None: raise ValueError('''You must provide either `audio` or `audio_target` values.''' ) if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with''' F''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( '''It is strongly recommended to pass the ``sampling_rate`` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) if audio is not None: a :Optional[Any] = self._process_audio( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase , ) else: a :int = None if audio_target is not None: a :Optional[int] = self._process_audio( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase , ) if inputs is None: return inputs_target else: a :Optional[Any] = inputs_target['''input_values'''] a :Union[str, Any] = inputs_target.get('''attention_mask''' ) if decoder_attention_mask is not None: a :str = decoder_attention_mask return inputs def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ): a :Optional[int] = isinstance(_lowerCamelCase , np.ndarray ) and len(speech.shape ) > 1 if is_batched_numpy and len(speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) a :List[Any] = is_batched_numpy or ( isinstance(_lowerCamelCase , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: a :str = [np.asarray(_lowerCamelCase , dtype=np.floataa ) for speech in speech] elif not is_batched and not isinstance(_lowerCamelCase , np.ndarray ): a :Union[str, Any] = np.asarray(_lowerCamelCase , dtype=np.floataa ) elif isinstance(_lowerCamelCase , np.ndarray ) and speech.dtype is np.dtype(np.floataa ): a :List[Any] = speech.astype(np.floataa ) # always return batch if not is_batched: a :List[Any] = [speech] # needed to make pad() work on spectrogram inputs a :Optional[int] = self.feature_size # convert into correct format for padding if is_target: a :List[Any] = [self._extract_mel_features(_lowerCamelCase ) for waveform in speech] a :List[Any] = BatchFeature({'''input_values''': features} ) a :List[Any] = self.num_mel_bins else: a :List[str] = BatchFeature({'''input_values''': speech} ) a :Optional[int] = self.pad( _lowerCamelCase , padding=_lowerCamelCase , max_length=_lowerCamelCase , truncation=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , **_lowerCamelCase , ) a :List[str] = feature_size_hack # convert input values to correct format a :Tuple = padded_inputs['''input_values'''] if not isinstance(input_values[0] , np.ndarray ): a :int = [np.asarray(_lowerCamelCase , dtype=np.floataa ) for array in input_values] elif ( not isinstance(_lowerCamelCase , np.ndarray ) and isinstance(input_values[0] , np.ndarray ) and input_values[0].dtype is np.dtype(np.floataa ) ): a :Union[str, Any] = [array.astype(np.floataa ) for array in input_values] elif isinstance(_lowerCamelCase , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ): a :Optional[int] = input_values.astype(np.floataa ) # convert attention_mask to correct format a :Any = padded_inputs.get('''attention_mask''' ) if attention_mask is not None: a :Union[str, Any] = [np.asarray(_lowerCamelCase , dtype=np.intaa ) for array in attention_mask] # zero-mean and unit-variance normalization if not is_target and self.do_normalize: a :Union[str, Any] = ( attention_mask if self._get_padding_strategies(_lowerCamelCase , max_length=_lowerCamelCase ) is not PaddingStrategy.DO_NOT_PAD else None ) a :List[str] = self.zero_mean_unit_var_norm( padded_inputs['''input_values'''] , attention_mask=_lowerCamelCase , padding_value=self.padding_value ) if return_tensors is not None: a :Any = padded_inputs.convert_to_tensors(_lowerCamelCase ) return padded_inputs def SCREAMING_SNAKE_CASE__ ( self ): a :List[str] = super().to_dict() # Don't serialize these as they are derived from the other properties. a :Tuple = ['''window''', '''mel_filters''', '''sample_size''', '''sample_stride''', '''n_fft''', '''n_freqs'''] for name in names: if name in output: del output[name] return output
94
from functools import lru_cache @lru_cache def __UpperCamelCase ( _A ): if num < 0: raise ValueError('''Number should not be negative.''' ) return 1 if num in (0, 1) else num * factorial(num - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
278
0
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase : str = logging.get_logger(__name__) UpperCAmelCase : str = { # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class __lowerCAmelCase ( UpperCamelCase__): _lowercase : List[Any] = """megatron-bert""" def __init__( self , lowerCAmelCase__=2_9_0_5_6 , lowerCAmelCase__=1_0_2_4 , lowerCAmelCase__=2_4 , lowerCAmelCase__=1_6 , lowerCAmelCase__=4_0_9_6 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=0 , lowerCAmelCase__="absolute" , lowerCAmelCase__=True , **lowerCAmelCase__ , ) -> int: '''simple docstring''' super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ ) a__ : Optional[int] =vocab_size a__ : Any =hidden_size a__ : List[Any] =num_hidden_layers a__ : Optional[int] =num_attention_heads a__ : List[str] =hidden_act a__ : Union[str, Any] =intermediate_size a__ : Optional[int] =hidden_dropout_prob a__ : Tuple =attention_probs_dropout_prob a__ : List[str] =max_position_embeddings a__ : Optional[int] =type_vocab_size a__ : int =initializer_range a__ : Any =layer_norm_eps a__ : List[Any] =position_embedding_type a__ : Dict =use_cache
95
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation def __UpperCamelCase ( _A ): lowerCAmelCase_ = 384 lowerCAmelCase_ = 7 if "tiny" in model_name: lowerCAmelCase_ = 96 lowerCAmelCase_ = (2, 2, 6, 2) lowerCAmelCase_ = (3, 6, 12, 24) elif "small" in model_name: lowerCAmelCase_ = 96 lowerCAmelCase_ = (2, 2, 18, 2) lowerCAmelCase_ = (3, 6, 12, 24) elif "base" in model_name: lowerCAmelCase_ = 128 lowerCAmelCase_ = (2, 2, 18, 2) lowerCAmelCase_ = (4, 8, 16, 32) lowerCAmelCase_ = 12 lowerCAmelCase_ = 512 elif "large" in model_name: lowerCAmelCase_ = 192 lowerCAmelCase_ = (2, 2, 18, 2) lowerCAmelCase_ = (6, 12, 24, 48) lowerCAmelCase_ = 12 lowerCAmelCase_ = 768 # set label information lowerCAmelCase_ = 150 lowerCAmelCase_ = '''huggingface/label-files''' lowerCAmelCase_ = '''ade20k-id2label.json''' lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) ) lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()} lowerCAmelCase_ = {v: k for k, v in idalabel.items()} lowerCAmelCase_ = SwinConfig( embed_dim=_A , depths=_A , num_heads=_A , window_size=_A , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , ) lowerCAmelCase_ = UperNetConfig( backbone_config=_A , auxiliary_in_channels=_A , num_labels=_A , idalabel=_A , labelaid=_A , ) return config def __UpperCamelCase ( _A ): lowerCAmelCase_ = [] # fmt: off # stem rename_keys.append(('''backbone.patch_embed.projection.weight''', '''backbone.embeddings.patch_embeddings.projection.weight''') ) rename_keys.append(('''backbone.patch_embed.projection.bias''', '''backbone.embeddings.patch_embeddings.projection.bias''') ) rename_keys.append(('''backbone.patch_embed.norm.weight''', '''backbone.embeddings.norm.weight''') ) rename_keys.append(('''backbone.patch_embed.norm.bias''', '''backbone.embeddings.norm.bias''') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm1.weight", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm1.bias", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight", f"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias", f"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm2.weight", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm2.bias", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight", f"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias", f"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight", f"backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias", f"backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias") ) if i < 3: rename_keys.append((f"backbone.stages.{i}.downsample.reduction.weight", f"backbone.encoder.layers.{i}.downsample.reduction.weight") ) rename_keys.append((f"backbone.stages.{i}.downsample.norm.weight", f"backbone.encoder.layers.{i}.downsample.norm.weight") ) rename_keys.append((f"backbone.stages.{i}.downsample.norm.bias", f"backbone.encoder.layers.{i}.downsample.norm.bias") ) rename_keys.append((f"backbone.norm{i}.weight", f"backbone.hidden_states_norms.stage{i+1}.weight") ) rename_keys.append((f"backbone.norm{i}.bias", f"backbone.hidden_states_norms.stage{i+1}.bias") ) # decode head rename_keys.extend( [ ('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''), ('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''), ('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''), ('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''), ] ) # fmt: on return rename_keys def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = dct.pop(_A ) lowerCAmelCase_ = val def __UpperCamelCase ( _A , _A ): lowerCAmelCase_ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): lowerCAmelCase_ = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) lowerCAmelCase_ = state_dict.pop(f"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight" ) lowerCAmelCase_ = state_dict.pop(f"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict lowerCAmelCase_ = in_proj_weight[:dim, :] lowerCAmelCase_ = in_proj_bias[: dim] lowerCAmelCase_ = in_proj_weight[ dim : dim * 2, : ] lowerCAmelCase_ = in_proj_bias[ dim : dim * 2 ] lowerCAmelCase_ = in_proj_weight[ -dim :, : ] lowerCAmelCase_ = in_proj_bias[-dim :] # fmt: on def __UpperCamelCase ( _A ): lowerCAmelCase_ , lowerCAmelCase_ = x.shape lowerCAmelCase_ = x.reshape(_A , 4 , in_channel // 4 ) lowerCAmelCase_ = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(_A , _A ) return x def __UpperCamelCase ( _A ): lowerCAmelCase_ , lowerCAmelCase_ = x.shape lowerCAmelCase_ = x.reshape(_A , in_channel // 4 , 4 ) lowerCAmelCase_ = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(_A , _A ) return x def __UpperCamelCase ( _A ): lowerCAmelCase_ = x.shape[0] lowerCAmelCase_ = x.reshape(4 , in_channel // 4 ) lowerCAmelCase_ = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(_A ) return x def __UpperCamelCase ( _A ): lowerCAmelCase_ = x.shape[0] lowerCAmelCase_ = x.reshape(in_channel // 4 , 4 ) lowerCAmelCase_ = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(_A ) return x def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = { '''upernet-swin-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth''', '''upernet-swin-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth''', '''upernet-swin-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth''', '''upernet-swin-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth''', } lowerCAmelCase_ = model_name_to_url[model_name] lowerCAmelCase_ = torch.hub.load_state_dict_from_url(_A , map_location='''cpu''' , file_name=_A )[ '''state_dict''' ] for name, param in state_dict.items(): print(_A , param.shape ) lowerCAmelCase_ = get_upernet_config(_A ) lowerCAmelCase_ = UperNetForSemanticSegmentation(_A ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): lowerCAmelCase_ = state_dict.pop(_A ) if "bn" in key: lowerCAmelCase_ = key.replace('''bn''' , '''batch_norm''' ) lowerCAmelCase_ = val # rename keys lowerCAmelCase_ = create_rename_keys(_A ) for src, dest in rename_keys: rename_key(_A , _A , _A ) read_in_q_k_v(_A , config.backbone_config ) # fix downsample parameters for key, value in state_dict.items(): if "downsample" in key: if "reduction" in key: lowerCAmelCase_ = reverse_correct_unfold_reduction_order(_A ) if "norm" in key: lowerCAmelCase_ = reverse_correct_unfold_norm_order(_A ) model.load_state_dict(_A ) # verify on image lowerCAmelCase_ = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg''' lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw ).convert('''RGB''' ) lowerCAmelCase_ = SegformerImageProcessor() lowerCAmelCase_ = processor(_A , return_tensors='''pt''' ).pixel_values with torch.no_grad(): lowerCAmelCase_ = model(_A ) lowerCAmelCase_ = outputs.logits print(logits.shape ) print('''First values of logits:''' , logits[0, 0, :3, :3] ) # assert values if model_name == "upernet-swin-tiny": lowerCAmelCase_ = torch.tensor( [[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] ) elif model_name == "upernet-swin-small": lowerCAmelCase_ = torch.tensor( [[-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.0_9_0_8, -7.0_9_0_8, -6.8_5_3_4]] ) elif model_name == "upernet-swin-base": lowerCAmelCase_ = torch.tensor( [[-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.4_7_6_3, -6.4_7_6_3, -6.3_2_5_4]] ) elif model_name == "upernet-swin-large": lowerCAmelCase_ = torch.tensor( [[-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.4_0_4_4, -7.4_0_4_4, -7.2_5_8_6]] ) print('''Logits:''' , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , _A , atol=1E-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(f"Saving model {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_A ) print(f"Saving processor to {pytorch_dump_folder_path}" ) processor.save_pretrained(_A ) if push_to_hub: print(f"Pushing model and processor for {model_name} to hub" ) model.push_to_hub(f"openmmlab/{model_name}" ) processor.push_to_hub(f"openmmlab/{model_name}" ) if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''upernet-swin-tiny''', type=str, choices=[f"upernet-swin-{size}" for size in ['''tiny''', '''small''', '''base''', '''large''']], help='''Name of the Swin + UperNet model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) _A = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
278
0
"""simple docstring""" from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf lowercase__ = logging.get_logger(__name__) @dataclass class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = [ """no_inference""", """no_cuda""", """no_tpu""", """no_speed""", """no_memory""", """no_env_print""", """no_multi_process""", ] def __init__( self , **lowercase ): for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: _lowerCamelCase : Any = deprecated_arg[3:] _lowerCamelCase : int = not kwargs.pop(lowercase ) logger.warning( F'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or''' F''' {positive_arg}={kwargs[positive_arg]}''' ) _lowerCamelCase : Any = kwargs.pop('tpu_name' , self.tpu_name ) _lowerCamelCase : Dict = kwargs.pop('device_idx' , self.device_idx ) _lowerCamelCase : int = kwargs.pop('eager_mode' , self.eager_mode ) _lowerCamelCase : List[str] = kwargs.pop('use_xla' , self.use_xla ) super().__init__(**lowercase ) lowerCamelCase__ = field( default=lowercase, metadata={"""help""": """Name of TPU"""}, ) lowerCamelCase__ = field( default=0, metadata={"""help""": """CPU / GPU device index. Defaults to 0."""}, ) lowerCamelCase__ = field(default=lowercase, metadata={"""help""": """Benchmark models in eager model."""} ) lowerCamelCase__ = field( default=lowercase, metadata={ """help""": """Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.""" }, ) @cached_property def A_ ( self ): requires_backends(self , ['tf'] ) _lowerCamelCase : int = None if self.tpu: try: if self.tpu_name: _lowerCamelCase : List[Any] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name ) else: _lowerCamelCase : str = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: _lowerCamelCase : List[str] = None return tpu @cached_property def A_ ( self ): requires_backends(self , ['tf'] ) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu ) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu ) _lowerCamelCase : Any = tf.distribute.TPUStrategy(self._setup_tpu ) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx] , 'GPU' ) _lowerCamelCase : Dict = tf.distribute.OneDeviceStrategy(device=F'''/gpu:{self.device_idx}''' ) else: tf.config.set_visible_devices([] , 'GPU' ) # disable GPU _lowerCamelCase : int = tf.distribute.OneDeviceStrategy(device=F'''/cpu:{self.device_idx}''' ) return strategy @property def A_ ( self ): requires_backends(self , ['tf'] ) return self._setup_tpu is not None @property def A_ ( self ): requires_backends(self , ['tf'] ) return self._setup_strategy @property def A_ ( self ): requires_backends(self , ['tf'] ) return tf.config.list_physical_devices('GPU' ) @property def A_ ( self ): requires_backends(self , ['tf'] ) if self.cuda: return len(self.gpu_list ) return 0 @property def A_ ( self ): return self.n_gpu > 0
96
import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def __UpperCamelCase ( _A , _A ): lowerCAmelCase_ = args.log_outputs lowerCAmelCase_ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] ) # load metric lowerCAmelCase_ = load_metric('''wer''' ) lowerCAmelCase_ = load_metric('''cer''' ) # compute metrics lowerCAmelCase_ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] ) lowerCAmelCase_ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] ) # print & log results lowerCAmelCase_ = f"WER: {wer_result}\nCER: {cer_result}" print(_A ) with open(f"{dataset_id}_eval_results.txt" , '''w''' ) as f: f.write(_A ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: lowerCAmelCase_ = f"log_{dataset_id}_predictions.txt" lowerCAmelCase_ = f"log_{dataset_id}_targets.txt" with open(_A , '''w''' ) as p, open(_A , '''w''' ) as t: # mapping function to write output def write_to_file(_A , _A ): p.write(f"{i}" + '''\n''' ) p.write(batch['''prediction'''] + '''\n''' ) t.write(f"{i}" + '''\n''' ) t.write(batch['''target'''] + '''\n''' ) result.map(_A , with_indices=_A ) def __UpperCamelCase ( _A ): lowerCAmelCase_ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training lowerCAmelCase_ = re.sub(_A , '''''' , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! lowerCAmelCase_ = ['''\n\n''', '''\n''', ''' ''', ''' '''] for t in token_sequences_to_ignore: lowerCAmelCase_ = ''' '''.join(text.split(_A ) ) return text def __UpperCamelCase ( _A ): # load dataset lowerCAmelCase_ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=_A ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor lowerCAmelCase_ = AutoFeatureExtractor.from_pretrained(args.model_id ) lowerCAmelCase_ = feature_extractor.sampling_rate # resample audio lowerCAmelCase_ = dataset.cast_column('''audio''' , Audio(sampling_rate=_A ) ) # load eval pipeline if args.device is None: lowerCAmelCase_ = 0 if torch.cuda.is_available() else -1 lowerCAmelCase_ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(_A ): lowerCAmelCase_ = asr( batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) lowerCAmelCase_ = prediction['''text'''] lowerCAmelCase_ = normalize_text(batch['''sentence'''] ) return batch # run inference on all examples lowerCAmelCase_ = dataset.map(_A , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(_A , _A ) if __name__ == "__main__": _A = argparse.ArgumentParser() parser.add_argument( '''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers''' ) parser.add_argument( '''--dataset''', type=str, required=True, help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''', ) parser.add_argument( '''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice''' ) parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''') parser.add_argument( '''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.''' ) parser.add_argument( '''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.''' ) parser.add_argument( '''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.''' ) parser.add_argument( '''--device''', type=int, default=None, help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''', ) _A = parser.parse_args() main(args)
278
0
'''simple docstring''' from pathlib import Path import fire from tqdm import tqdm def a ( __a="ro" , __a="en" , __a="wmt16" , __a=None ) -> None: '''simple docstring''' try: import datasets except (ModuleNotFoundError, ImportError): raise ImportError('''run pip install datasets''' ) UpperCamelCase__ :int = f'''{src_lang}-{tgt_lang}''' print(f'''Converting {dataset}-{pair}''' ) UpperCamelCase__ :Tuple = datasets.load_dataset(__a , __a ) if save_dir is None: UpperCamelCase__ :Any = f'''{dataset}-{pair}''' UpperCamelCase__ :Dict = Path(__a ) save_dir.mkdir(exist_ok=__a ) for split in ds.keys(): print(f'''Splitting {split} with {ds[split].num_rows} records''' ) # to save to val.source, val.target like summary datasets UpperCamelCase__ :Dict = '''val''' if split == '''validation''' else split UpperCamelCase__ :List[Any] = save_dir.joinpath(f'''{fn}.source''' ) UpperCamelCase__ :int = save_dir.joinpath(f'''{fn}.target''' ) UpperCamelCase__ :Union[str, Any] = src_path.open('''w+''' ) UpperCamelCase__ :Tuple = tgt_path.open('''w+''' ) # reader is the bottleneck so writing one record at a time doesn't slow things down for x in tqdm(ds[split] ): UpperCamelCase__ :Union[str, Any] = x['''translation'''] src_fp.write(ex[src_lang] + '''\n''' ) tgt_fp.write(ex[tgt_lang] + '''\n''' ) print(f'''Saved {dataset} dataset to {save_dir}''' ) if __name__ == "__main__": fire.Fire(download_wmt_dataset)
97
from typing import Dict, List, Optional from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _A = logging.get_logger(__name__) _A = { '''nielsr/canine-s''': 2_048, } # Unicode defines 1,114,112 total “codepoints” _A = 1_114_112 # Below: Constants defining canonical codepoints for special, pseudo-characters. # Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py _A = 0 _A = 0xe0_00 _A = 0xe0_01 _A = 0xe0_02 _A = 0xe0_03 _A = 0xe0_04 # Maps special codepoints to human-readable names. _A = { # Special symbols are represented using codepoints values that are valid, # but designated as "Private Use", meaning that they will never be assigned # characters by the Unicode Consortium, and are thus safe for use here. # # NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly # excluded and should fail with a hard error. CLS: "[CLS]", SEP: "[SEP]", BOS: "[BOS]", MASK: "[MASK]", PAD: "[PAD]", RESERVED: "[RESERVED]", } # Maps special codepoint human-readable names to their codepoint values. _A = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()} class A ( __UpperCAmelCase ): __snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self, UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=False, UpperCamelCase__=2048, **UpperCamelCase__, ): """simple docstring""" lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else bos_token lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else eos_token lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else sep_token lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else cls_token lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else mask_token super().__init__( bos_token=UpperCamelCase__, eos_token=UpperCamelCase__, sep_token=UpperCamelCase__, cls_token=UpperCamelCase__, pad_token=UpperCamelCase__, mask_token=UpperCamelCase__, add_prefix_space=UpperCamelCase__, model_max_length=UpperCamelCase__, **UpperCamelCase__, ) # Creates a mapping for looking up the IDs of special symbols. lowerCAmelCase_ = {} for codepoint, name in SPECIAL_CODEPOINTS.items(): lowerCAmelCase_ = codepoint # Creates a mapping for looking up the string forms of special symbol IDs. lowerCAmelCase_ = { codepoint: name for name, codepoint in self._special_codepoints.items() } lowerCAmelCase_ = UNICODE_VOCAB_SIZE lowerCAmelCase_ = len(self._special_codepoints ) @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return self._unicode_vocab_size def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ): """simple docstring""" return list(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ): """simple docstring""" try: return ord(UpperCamelCase__ ) except TypeError: raise ValueError(f"invalid token: '{token}'" ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ): """simple docstring""" try: if index in SPECIAL_CODEPOINTS: return SPECIAL_CODEPOINTS[index] return chr(UpperCamelCase__ ) except TypeError: raise ValueError(f"invalid id: {index}" ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ): """simple docstring""" return "".join(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ): """simple docstring""" lowerCAmelCase_ = [self.sep_token_id] lowerCAmelCase_ = [self.cls_token_id] lowerCAmelCase_ = cls + token_ids_a + sep if token_ids_a is not None: result += token_ids_a + sep return result def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None, UpperCamelCase__ = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase__, token_ids_a=UpperCamelCase__, already_has_special_tokens=UpperCamelCase__ ) lowerCAmelCase_ = [1] + ([0] * len(UpperCamelCase__ )) + [1] if token_ids_a is not None: result += ([0] * len(UpperCamelCase__ )) + [1] return result def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ): """simple docstring""" lowerCAmelCase_ = [self.sep_token_id] lowerCAmelCase_ = [self.cls_token_id] lowerCAmelCase_ = len(cls + token_ids_a + sep ) * [0] if token_ids_a is not None: result += len(token_ids_a + sep ) * [1] return result def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ): """simple docstring""" return ()
278
0
"""simple docstring""" import argparse import json import re from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileNetVaConfig, MobileNetVaForImageClassification, MobileNetVaImageProcessor, load_tf_weights_in_mobilenet_va, ) from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ : Optional[int] = logging.get_logger(__name__) def a_ ( lowerCamelCase ): UpperCAmelCase__ = MobileNetVaConfig(layer_norm_eps=0.001 ) if "_quant" in model_name: raise ValueError('Quantized models are not supported.' ) UpperCAmelCase__ = re.match(r'^mobilenet_v1_([^_]*)_([^_]*)$' , lowerCamelCase ) if matches: UpperCAmelCase__ = float(matches[1] ) UpperCAmelCase__ = int(matches[2] ) # The TensorFlow version of MobileNetV1 predicts 1001 classes instead of # the usual 1000. The first class (index 0) is "background". UpperCAmelCase__ = 1_0_0_1 UpperCAmelCase__ = 'imagenet-1k-id2label.json' UpperCAmelCase__ = 'huggingface/label-files' UpperCAmelCase__ = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type='dataset' ) , 'r' ) ) UpperCAmelCase__ = {int(lowerCamelCase ) + 1: v for k, v in idalabel.items()} UpperCAmelCase__ = 'background' UpperCAmelCase__ = idalabel UpperCAmelCase__ = {v: k for k, v in idalabel.items()} return config def a_ ( ): UpperCAmelCase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg' UpperCAmelCase__ = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw ) return im @torch.no_grad() def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=False ): UpperCAmelCase__ = get_mobilenet_va_config(lowerCamelCase ) # Load 🤗 model UpperCAmelCase__ = MobileNetVaForImageClassification(lowerCamelCase ).eval() # Load weights from TensorFlow checkpoint load_tf_weights_in_mobilenet_va(lowerCamelCase , lowerCamelCase , lowerCamelCase ) # Check outputs on an image, prepared by MobileNetV1ImageProcessor UpperCAmelCase__ = MobileNetVaImageProcessor( crop_size={'width': config.image_size, 'height': config.image_size} , size={'shortest_edge': config.image_size + 3_2} , ) UpperCAmelCase__ = image_processor(images=prepare_img() , return_tensors='pt' ) UpperCAmelCase__ = model(**lowerCamelCase ) UpperCAmelCase__ = outputs.logits assert logits.shape == (1, 1_0_0_1) if model_name == "mobilenet_v1_1.0_224": UpperCAmelCase__ = torch.tensor([-4.1739, -1.1233, 3.1205] ) elif model_name == "mobilenet_v1_0.75_192": UpperCAmelCase__ = torch.tensor([-3.9440, -2.3141, -0.3333] ) else: UpperCAmelCase__ = None if expected_logits is not None: assert torch.allclose(logits[0, :3] , lowerCamelCase , atol=1e-4 ) Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase ) print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowerCamelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(lowerCamelCase ) if push_to_hub: print('Pushing to the hub...' ) UpperCAmelCase__ = 'google/' + model_name image_processor.push_to_hub(lowerCamelCase ) model.push_to_hub(lowerCamelCase ) if __name__ == "__main__": lowerCAmelCase__ : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='mobilenet_v1_1.0_224', type=str, help='Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.', ) parser.add_argument( '--checkpoint_path', required=True, type=str, help='Path to the original TensorFlow checkpoint (.ckpt file).' ) parser.add_argument( '--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) lowerCAmelCase__ : Dict = parser.parse_args() convert_movilevit_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
98
def __UpperCamelCase ( _A = 1000000 ): lowerCAmelCase_ = 1 lowerCAmelCase_ = 1 lowerCAmelCase_ = {1: 1} for inputa in range(2 , _A ): lowerCAmelCase_ = 0 lowerCAmelCase_ = inputa while True: if number in counters: counter += counters[number] break if number % 2 == 0: number //= 2 counter += 1 else: lowerCAmelCase_ = (3 * number) + 1 counter += 1 if inputa not in counters: lowerCAmelCase_ = counter if counter > pre_counter: lowerCAmelCase_ = inputa lowerCAmelCase_ = counter return largest_number if __name__ == "__main__": print(solution(int(input().strip())))
278
0
import os import tempfile import unittest from transformers import NezhaConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, ) from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST class A__ : """simple docstring""" def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=128 , lowercase=32 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase=None , ) -> Dict: '''simple docstring''' a__ : Optional[Any] = parent a__ : str = batch_size a__ : Optional[int] = seq_length a__ : List[str] = is_training a__ : Any = use_input_mask a__ : Any = use_token_type_ids a__ : Any = use_labels a__ : Any = vocab_size a__ : Optional[Any] = hidden_size a__ : Any = num_hidden_layers a__ : int = num_attention_heads a__ : Any = intermediate_size a__ : Any = hidden_act a__ : Dict = hidden_dropout_prob a__ : List[Any] = attention_probs_dropout_prob a__ : Union[str, Any] = max_position_embeddings a__ : Optional[Any] = type_vocab_size a__ : str = type_sequence_label_size a__ : int = initializer_range a__ : List[Any] = num_labels a__ : Union[str, Any] = num_choices a__ : Tuple = scope def __lowercase ( self) -> Union[str, Any]: '''simple docstring''' a__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) a__ : int = None if self.use_input_mask: a__ : int = random_attention_mask([self.batch_size, self.seq_length]) a__ : str = None if self.use_token_type_ids: a__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) a__ : List[str] = None a__ : List[str] = None a__ : str = None if self.use_labels: a__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size) a__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) a__ : Any = ids_tensor([self.batch_size] , self.num_choices) a__ : Optional[Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowercase ( self) -> Tuple: '''simple docstring''' return NezhaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , ) def __lowercase ( self) -> int: '''simple docstring''' ( ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ) : List[Any] = self.prepare_config_and_inputs() a__ : Tuple = True a__ : Optional[int] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) a__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> str: '''simple docstring''' a__ : Dict = NezhaModel(config=lowercase) model.to(lowercase) model.eval() a__ : int = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase) a__ : int = model(lowercase , token_type_ids=lowercase) a__ : Optional[Any] = model(lowercase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size)) def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Tuple: '''simple docstring''' a__ : List[str] = True a__ : str = NezhaModel(lowercase) model.to(lowercase) model.eval() a__ : int = model( lowercase , attention_mask=lowercase , token_type_ids=lowercase , encoder_hidden_states=lowercase , encoder_attention_mask=lowercase , ) a__ : Optional[int] = model( lowercase , attention_mask=lowercase , token_type_ids=lowercase , encoder_hidden_states=lowercase , ) a__ : List[Any] = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size)) def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Union[str, Any]: '''simple docstring''' a__ : str = NezhaForMaskedLM(config=lowercase) model.to(lowercase) model.eval() a__ : List[Any] = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Tuple: '''simple docstring''' a__ : List[str] = NezhaForNextSentencePrediction(config=lowercase) model.to(lowercase) model.eval() a__ : int = model( lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2)) def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Any: '''simple docstring''' a__ : str = NezhaForPreTraining(config=lowercase) model.to(lowercase) model.eval() a__ : Optional[Any] = model( lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , next_sentence_label=lowercase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2)) def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> List[str]: '''simple docstring''' a__ : str = NezhaForQuestionAnswering(config=lowercase) model.to(lowercase) model.eval() a__ : str = model( lowercase , attention_mask=lowercase , token_type_ids=lowercase , start_positions=lowercase , end_positions=lowercase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> List[Any]: '''simple docstring''' a__ : List[str] = self.num_labels a__ : int = NezhaForSequenceClassification(lowercase) model.to(lowercase) model.eval() a__ : List[str] = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> List[Any]: '''simple docstring''' a__ : List[Any] = self.num_labels a__ : Optional[Any] = NezhaForTokenClassification(config=lowercase) model.to(lowercase) model.eval() a__ : Any = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Optional[Any]: '''simple docstring''' a__ : Optional[Any] = self.num_choices a__ : Union[str, Any] = NezhaForMultipleChoice(config=lowercase) model.to(lowercase) model.eval() a__ : Tuple = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() a__ : str = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() a__ : Dict = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() a__ : List[Any] = model( lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def __lowercase ( self) -> List[str]: '''simple docstring''' a__ : List[str] = self.prepare_config_and_inputs() ( ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ) : Optional[Any] = config_and_inputs a__ : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class A__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): """simple docstring""" __A : Tuple = ( ( NezhaModel, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, ) if is_torch_available() else () ) __A : int = ( { '''feature-extraction''': NezhaModel, '''fill-mask''': NezhaForMaskedLM, '''question-answering''': NezhaForQuestionAnswering, '''text-classification''': NezhaForSequenceClassification, '''token-classification''': NezhaForTokenClassification, '''zero-shot''': NezhaForSequenceClassification, } if is_torch_available() else {} ) __A : str = True def __lowercase ( self , lowercase , lowercase , lowercase=False) -> Dict: '''simple docstring''' a__ : Union[str, Any] = super()._prepare_for_class(lowercase , lowercase , return_labels=lowercase) if return_labels: if model_class in get_values(lowercase): a__ : Optional[Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowercase) a__ : Union[str, Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowercase) return inputs_dict def __lowercase ( self) -> Tuple: '''simple docstring''' a__ : Union[str, Any] = NezhaModelTester(self) a__ : Tuple = ConfigTester(self , config_class=lowercase , hidden_size=37) def __lowercase ( self) -> List[Any]: '''simple docstring''' self.config_tester.run_common_tests() def __lowercase ( self) -> Any: '''simple docstring''' a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase) def __lowercase ( self) -> Union[str, Any]: '''simple docstring''' a__ : Any = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*lowercase) def __lowercase ( self) -> Any: '''simple docstring''' ( ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ( a__ ) , ) : str = self.model_tester.prepare_config_and_inputs_for_decoder() a__ : Optional[int] = None self.model_tester.create_and_check_model_as_decoder( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) def __lowercase ( self) -> List[str]: '''simple docstring''' a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*lowercase) def __lowercase ( self) -> Dict: '''simple docstring''' a__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*lowercase) def __lowercase ( self) -> Any: '''simple docstring''' a__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_next_sequence_prediction(*lowercase) def __lowercase ( self) -> List[Any]: '''simple docstring''' a__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*lowercase) def __lowercase ( self) -> Tuple: '''simple docstring''' a__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowercase) def __lowercase ( self) -> List[Any]: '''simple docstring''' a__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*lowercase) def __lowercase ( self) -> int: '''simple docstring''' a__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowercase) @slow def __lowercase ( self) -> Dict: '''simple docstring''' for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a__ : Any = NezhaModel.from_pretrained(lowercase) self.assertIsNotNone(lowercase) @slow @require_torch_gpu def __lowercase ( self) -> Optional[int]: '''simple docstring''' a__ , a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # NezhaForMultipleChoice behaves incorrectly in JIT environments. if model_class == NezhaForMultipleChoice: return a__ : int = True a__ : Optional[Any] = model_class(config=lowercase) a__ : Dict = self._prepare_for_class(lowercase , lowercase) a__ : Union[str, Any] = torch.jit.trace( lowercase , (inputs_dict['input_ids'].to('cpu'), inputs_dict['attention_mask'].to('cpu'))) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(lowercase , os.path.join(lowercase , 'bert.pt')) a__ : Tuple = torch.jit.load(os.path.join(lowercase , 'bert.pt') , map_location=lowercase) loaded(inputs_dict['input_ids'].to(lowercase) , inputs_dict['attention_mask'].to(lowercase)) @require_torch class A__ ( unittest.TestCase ): """simple docstring""" @slow def __lowercase ( self) -> Dict: '''simple docstring''' a__ : Optional[int] = NezhaModel.from_pretrained('sijunhe/nezha-cn-base') a__ : Optional[int] = torch.tensor([[0, 1, 2, 3, 4, 5]]) a__ : List[Any] = torch.tensor([[0, 1, 1, 1, 1, 1]]) with torch.no_grad(): a__ : List[Any] = model(lowercase , attention_mask=lowercase)[0] a__ : str = torch.Size((1, 6, 768)) self.assertEqual(output.shape , lowercase) a__ : List[Any] = torch.tensor([[[0.06_85, 0.24_41, 0.11_02], [0.06_00, 0.19_06, 0.13_49], [0.02_21, 0.08_19, 0.05_86]]]) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowercase , atol=1e-4)) @slow def __lowercase ( self) -> List[Any]: '''simple docstring''' a__ : int = NezhaForMaskedLM.from_pretrained('sijunhe/nezha-cn-base') a__ : str = torch.tensor([[0, 1, 2, 3, 4, 5]]) a__ : Dict = torch.tensor([[1, 1, 1, 1, 1, 1]]) with torch.no_grad(): a__ : str = model(lowercase , attention_mask=lowercase)[0] a__ : str = torch.Size((1, 6, 2_1128)) self.assertEqual(output.shape , lowercase) a__ : Any = torch.tensor( [[-2.79_39, -1.79_02, -2.21_89], [-2.85_85, -1.89_08, -2.37_23], [-2.64_99, -1.77_50, -2.25_58]]) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowercase , atol=1e-4))
99
import unittest from transformers import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device if is_torch_available(): import torch from transformers import AutoModelForImageClassification if is_vision_available(): from transformers import AutoImageProcessor @require_torch @require_vision class A ( unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = AutoImageProcessor.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' ) lowerCAmelCase_ = AutoModelForImageClassification.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' ) model.to(UpperCamelCase__ ) from datasets import load_dataset lowerCAmelCase_ = load_dataset('''nielsr/rvlcdip-demo''' ) lowerCAmelCase_ = dataset['''train'''][0]['''image'''].convert('''RGB''' ) lowerCAmelCase_ = image_processor(UpperCamelCase__, return_tensors='''pt''' ).to(UpperCamelCase__ ) # forward pass with torch.no_grad(): lowerCAmelCase_ = model(**UpperCamelCase__ ) lowerCAmelCase_ = outputs.logits lowerCAmelCase_ = torch.Size((1, 16) ) self.assertEqual(logits.shape, UpperCamelCase__ ) lowerCAmelCase_ = torch.tensor( [-0.4_158, -0.4_092, -0.4_347], device=UpperCamelCase__, dtype=torch.float, ) self.assertTrue(torch.allclose(logits[0, :3], UpperCamelCase__, atol=1E-4 ) )
278
0
"""simple docstring""" from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = { "huggingface/time-series-transformer-tourism-monthly": ( "https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json" ), # See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer } class SCREAMING_SNAKE_CASE_ ( __a ): """simple docstring""" __lowercase : Tuple = '''time_series_transformer''' __lowercase : List[str] = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', '''num_hidden_layers''': '''encoder_layers''', } def __init__( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = "student_t" , lowerCAmelCase__ = "nll" , lowerCAmelCase__ = 1 , lowerCAmelCase__ = [1, 2, 3, 4, 5, 6, 7] , lowerCAmelCase__ = "mean" , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = 3_2 , lowerCAmelCase__ = 3_2 , lowerCAmelCase__ = 2 , lowerCAmelCase__ = 2 , lowerCAmelCase__ = 2 , lowerCAmelCase__ = 2 , lowerCAmelCase__ = True , lowerCAmelCase__ = "gelu" , lowerCAmelCase__ = 6_4 , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 1_0_0 , lowerCAmelCase__ = 0.02 , lowerCAmelCase__=True , **lowerCAmelCase__ , ): # time series specific configuration __SCREAMING_SNAKE_CASE = prediction_length __SCREAMING_SNAKE_CASE = context_length or prediction_length __SCREAMING_SNAKE_CASE = distribution_output __SCREAMING_SNAKE_CASE = loss __SCREAMING_SNAKE_CASE = input_size __SCREAMING_SNAKE_CASE = num_time_features __SCREAMING_SNAKE_CASE = lags_sequence __SCREAMING_SNAKE_CASE = scaling __SCREAMING_SNAKE_CASE = num_dynamic_real_features __SCREAMING_SNAKE_CASE = num_static_real_features __SCREAMING_SNAKE_CASE = num_static_categorical_features if cardinality and num_static_categorical_features > 0: if len(lowerCAmelCase__) != num_static_categorical_features: raise ValueError( """The cardinality should be a list of the same length as `num_static_categorical_features`""") __SCREAMING_SNAKE_CASE = cardinality else: __SCREAMING_SNAKE_CASE = [0] if embedding_dimension and num_static_categorical_features > 0: if len(lowerCAmelCase__) != num_static_categorical_features: raise ValueError( """The embedding dimension should be a list of the same length as `num_static_categorical_features`""") __SCREAMING_SNAKE_CASE = embedding_dimension else: __SCREAMING_SNAKE_CASE = [min(5_0 , (cat + 1) // 2) for cat in self.cardinality] __SCREAMING_SNAKE_CASE = num_parallel_samples # Transformer architecture configuration __SCREAMING_SNAKE_CASE = input_size * len(lowerCAmelCase__) + self._number_of_features __SCREAMING_SNAKE_CASE = d_model __SCREAMING_SNAKE_CASE = encoder_attention_heads __SCREAMING_SNAKE_CASE = decoder_attention_heads __SCREAMING_SNAKE_CASE = encoder_ffn_dim __SCREAMING_SNAKE_CASE = decoder_ffn_dim __SCREAMING_SNAKE_CASE = encoder_layers __SCREAMING_SNAKE_CASE = decoder_layers __SCREAMING_SNAKE_CASE = dropout __SCREAMING_SNAKE_CASE = attention_dropout __SCREAMING_SNAKE_CASE = activation_dropout __SCREAMING_SNAKE_CASE = encoder_layerdrop __SCREAMING_SNAKE_CASE = decoder_layerdrop __SCREAMING_SNAKE_CASE = activation_function __SCREAMING_SNAKE_CASE = init_std __SCREAMING_SNAKE_CASE = use_cache super().__init__(is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__) @property def snake_case_ ( self): return ( sum(self.embedding_dimension) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
100
import argparse import os import re import torch from flax.traverse_util import flatten_dict from tax import checkpoints from transformers import ( AutoTokenizer, PixaStructConfig, PixaStructForConditionalGeneration, PixaStructImageProcessor, PixaStructProcessor, PixaStructTextConfig, PixaStructVisionConfig, ) def __UpperCamelCase ( _A ): lowerCAmelCase_ = checkpoints.load_tax_checkpoint(_A ) lowerCAmelCase_ = flatten_dict(_A ) return flax_params def __UpperCamelCase ( _A ): lowerCAmelCase_ = {} lowerCAmelCase_ = { '''token_embedder''': '''embeddings''', '''encoder_norm''': '''layernorm''', '''kernel''': '''weight''', '''.out''': '''.output''', '''scale''': '''weight''', '''embedders_0.pos_embedding''': '''row_embedder.weight''', '''embedders_1.pos_embedding''': '''column_embedder.weight''', } lowerCAmelCase_ = { '''query''': '''attention.query''', '''key''': '''attention.key''', '''value''': '''attention.value''', '''output.dense''': '''output''', '''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''', '''pre_self_attention_layer_norm''': '''self_attention.layer_norm''', '''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''', '''mlp.''': '''mlp.DenseReluDense.''', '''pre_mlp_layer_norm''': '''mlp.layer_norm''', '''self_attention.o''': '''self_attention.attention.o''', '''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''', '''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''', '''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''', '''decoder.logits_dense.weight''': '''decoder.lm_head.weight''', } for key in flax_dict.keys(): if "target" in key: # remove the first prefix from the key lowerCAmelCase_ = '''.'''.join(key[1:] ) # rename the key for old, new in CONVERSION_MAPPING.items(): lowerCAmelCase_ = new_key.replace(_A , _A ) if "decoder" in new_key: for old, new in DECODER_CONVERSION_MAPPING.items(): lowerCAmelCase_ = new_key.replace(_A , _A ) if "layers" in new_key and "decoder" not in new_key: # use regex to replace the layer number lowerCAmelCase_ = re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , _A ) lowerCAmelCase_ = new_key.replace('''encoder''' , '''encoder.encoder''' ) elif "layers" in new_key and "decoder" in new_key: # use regex to replace the layer number lowerCAmelCase_ = re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , _A ) lowerCAmelCase_ = flax_dict[key] lowerCAmelCase_ = {} # convert converted_dict into torch format for key in converted_dict.keys(): if ("embed_tokens" not in key) and ("embedder" not in key): lowerCAmelCase_ = torch.from_numpy(converted_dict[key].T ) else: lowerCAmelCase_ = torch.from_numpy(converted_dict[key] ) return converted_torch_dict def __UpperCamelCase ( _A , _A , _A=False , _A=False ): lowerCAmelCase_ = get_flax_param(_A ) if not use_large: lowerCAmelCase_ = PixaStructVisionConfig() lowerCAmelCase_ = PixaStructTextConfig() else: lowerCAmelCase_ = PixaStructVisionConfig( hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 ) lowerCAmelCase_ = PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 ) lowerCAmelCase_ = PixaStructConfig( vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=_A ) lowerCAmelCase_ = PixaStructForConditionalGeneration(_A ) lowerCAmelCase_ = rename_and_convert_flax_params(_A ) model.load_state_dict(_A ) lowerCAmelCase_ = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' ) lowerCAmelCase_ = PixaStructImageProcessor() lowerCAmelCase_ = PixaStructProcessor(image_processor=_A , tokenizer=_A ) if use_large: lowerCAmelCase_ = 4096 lowerCAmelCase_ = True # mkdir if needed os.makedirs(_A , exist_ok=_A ) model.save_pretrained(_A ) processor.save_pretrained(_A ) print('''Model saved in {}'''.format(_A ) ) if __name__ == "__main__": _A = argparse.ArgumentParser() parser.add_argument('''--t5x_checkpoint_path''', default=None, type=str, help='''Path to the original T5x checkpoint.''') parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--use_large''', action='''store_true''', help='''Use large model.''') parser.add_argument('''--is_vqa''', action='''store_true''', help='''Use large model.''') _A = parser.parse_args() convert_pixastruct_original_pytorch_checkpoint_to_hf( args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large )
278
0
import argparse import glob import logging import os import sys import time from collections import defaultdict from pathlib import Path from typing import Dict, List, Tuple import numpy as np import pytorch_lightning as pl import torch from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback from torch import nn from torch.utils.data import DataLoader from transformers import MBartTokenizer, TaForConditionalGeneration from transformers.models.bart.modeling_bart import shift_tokens_right from utils import ( ROUGE_KEYS, LegacySeqaSeqDataset, SeqaSeqDataset, assert_all_frozen, calculate_bleu, calculate_rouge, check_output_dir, flatten_list, freeze_embeds, freeze_params, get_git_info, label_smoothed_nll_loss, lmap, pickle_save, save_git_info, save_json, use_task_specific_params, ) # need the parent dir module sys.path.insert(2, str(Path(__file__).resolve().parents[1])) from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa lowercase__ :List[Any] = logging.getLogger(__name__) class lowercase ( SCREAMING_SNAKE_CASE__ ): lowercase_ : Any ='''summarization''' lowercase_ : Optional[Any] =['''loss'''] lowercase_ : Union[str, Any] =ROUGE_KEYS lowercase_ : Optional[int] ='''rouge2''' def __init__( self ,A__ ,**A__): if hparams.sortish_sampler and hparams.gpus > 1: lowercase = False elif hparams.max_tokens_per_batch is not None: if hparams.gpus > 1: raise NotImplementedError('''Dynamic Batch size does not work for multi-gpu training''') if hparams.sortish_sampler: raise ValueError('''--sortish_sampler and --max_tokens_per_batch may not be used simultaneously''') super().__init__(A__ ,num_labels=A__ ,mode=self.mode ,**A__) use_task_specific_params(self.model ,'''summarization''') save_git_info(self.hparams.output_dir) lowercase = Path(self.output_dir) / '''metrics.json''' lowercase = Path(self.output_dir) / '''hparams.pkl''' pickle_save(self.hparams ,self.hparams_save_path) lowercase = 0 lowercase = defaultdict(A__) lowercase = self.config.model_type lowercase = self.config.tgt_vocab_size if self.model_type == '''fsmt''' else self.config.vocab_size lowercase = { "data_dir": self.hparams.data_dir, "max_source_length": self.hparams.max_source_length, "prefix": self.model.config.prefix or "", } lowercase = { '''train''': self.hparams.n_train, '''val''': self.hparams.n_val, '''test''': self.hparams.n_test, } lowercase = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()} lowercase = { '''train''': self.hparams.max_target_length, '''val''': self.hparams.val_max_target_length, '''test''': self.hparams.test_max_target_length, } assert self.target_lens["train"] <= self.target_lens["val"], f'target_lens: {self.target_lens}' assert self.target_lens["train"] <= self.target_lens["test"], f'target_lens: {self.target_lens}' if self.hparams.freeze_embeds: freeze_embeds(self.model) if self.hparams.freeze_encoder: freeze_params(self.model.get_encoder()) assert_all_frozen(self.model.get_encoder()) lowercase = get_git_info()['''repo_sha'''] lowercase = hparams.num_workers lowercase = None # default to config if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer ,A__): lowercase = self.tokenizer.lang_code_to_id[hparams.tgt_lang] lowercase = self.decoder_start_token_id lowercase = ( SeqaSeqDataset if hasattr(self.tokenizer ,'''prepare_seq2seq_batch''') else LegacySeqaSeqDataset ) lowercase = False lowercase = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams if self.hparams.eval_max_gen_length is not None: lowercase = self.hparams.eval_max_gen_length else: lowercase = self.model.config.max_length lowercase = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric def A__ ( self ,A__): lowercase = { k: self.tokenizer.batch_decode(v.tolist()) if '''mask''' not in k else v.shape for k, v in batch.items() } save_json(A__ ,Path(self.output_dir) / '''text_batch.json''') save_json({k: v.tolist() for k, v in batch.items()} ,Path(self.output_dir) / '''tok_batch.json''') lowercase = True return readable_batch def A__ ( self ,A__ ,**A__): return self.model(A__ ,**A__) def A__ ( self ,A__): lowercase = self.tokenizer.batch_decode( A__ ,skip_special_tokens=A__ ,clean_up_tokenization_spaces=A__) return lmap(str.strip ,A__) def A__ ( self ,A__): lowercase = self.tokenizer.pad_token_id lowercase , lowercase = batch['''input_ids'''], batch['''attention_mask'''] lowercase = batch['''labels'''] if isinstance(self.model ,A__): lowercase = self.model._shift_right(A__) else: lowercase = shift_tokens_right(A__ ,A__) if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero lowercase = decoder_input_ids self.save_readable_batch(A__) lowercase = self(A__ ,attention_mask=A__ ,decoder_input_ids=A__ ,use_cache=A__) lowercase = outputs['''logits'''] if self.hparams.label_smoothing == 0: # Same behavior as modeling_bart.py, besides ignoring pad_token_id lowercase = nn.CrossEntropyLoss(ignore_index=A__) assert lm_logits.shape[-1] == self.vocab_size lowercase = ce_loss_fct(lm_logits.view(-1 ,lm_logits.shape[-1]) ,tgt_ids.view(-1)) else: lowercase = nn.functional.log_softmax(A__ ,dim=-1) lowercase , lowercase = label_smoothed_nll_loss( A__ ,A__ ,self.hparams.label_smoothing ,ignore_index=A__) return (loss,) @property def A__ ( self): return self.tokenizer.pad_token_id def A__ ( self ,A__ ,A__): lowercase = self._step(A__) lowercase = dict(zip(self.loss_names ,A__)) # tokens per batch lowercase = batch['''input_ids'''].ne(self.pad).sum() + batch['''labels'''].ne(self.pad).sum() lowercase = batch['''input_ids'''].shape[0] lowercase = batch['''input_ids'''].eq(self.pad).sum() lowercase = batch['''input_ids'''].eq(self.pad).float().mean() # TODO(SS): make a wandb summary metric for this return {"loss": loss_tensors[0], "log": logs} def A__ ( self ,A__ ,A__): return self._generative_step(A__) def A__ ( self ,A__ ,A__="val"): self.step_count += 1 lowercase = {k: torch.stack([x[k] for x in outputs]).mean() for k in self.loss_names} lowercase = losses['''loss'''] lowercase = { k: np.array([x[k] for x in outputs]).mean() for k in self.metric_names + ['''gen_time''', '''gen_len'''] } lowercase = ( generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric] ) lowercase = torch.tensor(A__).type_as(A__) generative_metrics.update({k: v.item() for k, v in losses.items()}) losses.update(A__) lowercase = {f'{prefix}_avg_{k}': x for k, x in losses.items()} lowercase = self.step_count self.metrics[prefix].append(A__) # callback writes this to self.metrics_save_path lowercase = flatten_list([x['''preds'''] for x in outputs]) return { "log": all_metrics, "preds": preds, f'{prefix}_loss': loss, f'{prefix}_{self.val_metric}': metric_tensor, } def A__ ( self ,A__ ,A__): return calculate_rouge(A__ ,A__) def A__ ( self ,A__): lowercase = time.time() # parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens') lowercase = self.model.generate( batch['''input_ids'''] ,attention_mask=batch['''attention_mask'''] ,use_cache=A__ ,decoder_start_token_id=self.decoder_start_token_id ,num_beams=self.eval_beams ,max_length=self.eval_max_length ,) lowercase = (time.time() - ta) / batch['''input_ids'''].shape[0] lowercase = self.ids_to_clean_text(A__) lowercase = self.ids_to_clean_text(batch['''labels''']) lowercase = self._step(A__) lowercase = dict(zip(self.loss_names ,A__)) lowercase = self.calc_generative_metrics(A__ ,A__) lowercase = np.mean(lmap(A__ ,A__)) base_metrics.update(gen_time=A__ ,gen_len=A__ ,preds=A__ ,target=A__ ,**A__) return base_metrics def A__ ( self ,A__ ,A__): return self._generative_step(A__) def A__ ( self ,A__): return self.validation_epoch_end(A__ ,prefix='''test''') def A__ ( self ,A__): lowercase = self.n_obs[type_path] lowercase = self.target_lens[type_path] lowercase = self.dataset_class( self.tokenizer ,type_path=A__ ,n_obs=A__ ,max_target_length=A__ ,**self.dataset_kwargs ,) return dataset def A__ ( self ,A__ ,A__ ,A__ = False): lowercase = self.get_dataset(A__) if self.hparams.sortish_sampler and type_path != "test" and type_path != "val": lowercase = dataset.make_sortish_sampler(A__ ,distributed=self.hparams.gpus > 1) return DataLoader( A__ ,batch_size=A__ ,collate_fn=dataset.collate_fn ,shuffle=A__ ,num_workers=self.num_workers ,sampler=A__ ,) elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val": lowercase = dataset.make_dynamic_sampler( self.hparams.max_tokens_per_batch ,distributed=self.hparams.gpus > 1) return DataLoader( A__ ,batch_sampler=A__ ,collate_fn=dataset.collate_fn ,num_workers=self.num_workers ,) else: return DataLoader( A__ ,batch_size=A__ ,collate_fn=dataset.collate_fn ,shuffle=A__ ,num_workers=self.num_workers ,sampler=A__ ,) def A__ ( self): lowercase = self.get_dataloader('''train''' ,batch_size=self.hparams.train_batch_size ,shuffle=A__) return dataloader def A__ ( self): return self.get_dataloader('''val''' ,batch_size=self.hparams.eval_batch_size) def A__ ( self): return self.get_dataloader('''test''' ,batch_size=self.hparams.eval_batch_size) @staticmethod def A__ ( A__ ,A__): BaseTransformer.add_model_specific_args(A__ ,A__) add_generic_args(A__ ,A__) parser.add_argument( '''--max_source_length''' ,default=1_0_2_4 ,type=A__ ,help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) ,) parser.add_argument( '''--max_target_length''' ,default=5_6 ,type=A__ ,help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) ,) parser.add_argument( '''--val_max_target_length''' ,default=1_4_2 ,type=A__ ,help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) ,) parser.add_argument( '''--test_max_target_length''' ,default=1_4_2 ,type=A__ ,help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) ,) parser.add_argument('''--freeze_encoder''' ,action='''store_true''') parser.add_argument('''--freeze_embeds''' ,action='''store_true''') parser.add_argument('''--sortish_sampler''' ,action='''store_true''' ,default=A__) parser.add_argument('''--overwrite_output_dir''' ,action='''store_true''' ,default=A__) parser.add_argument('''--max_tokens_per_batch''' ,type=A__ ,default=A__) parser.add_argument('''--logger_name''' ,type=A__ ,choices=['''default''', '''wandb''', '''wandb_shared'''] ,default='''default''') parser.add_argument('''--n_train''' ,type=A__ ,default=-1 ,required=A__ ,help='''# examples. -1 means use all.''') parser.add_argument('''--n_val''' ,type=A__ ,default=5_0_0 ,required=A__ ,help='''# examples. -1 means use all.''') parser.add_argument('''--n_test''' ,type=A__ ,default=-1 ,required=A__ ,help='''# examples. -1 means use all.''') parser.add_argument( '''--task''' ,type=A__ ,default='''summarization''' ,required=A__ ,help='''# examples. -1 means use all.''') parser.add_argument('''--label_smoothing''' ,type=A__ ,default=0.0 ,required=A__) parser.add_argument('''--src_lang''' ,type=A__ ,default='''''' ,required=A__) parser.add_argument('''--tgt_lang''' ,type=A__ ,default='''''' ,required=A__) parser.add_argument('''--eval_beams''' ,type=A__ ,default=A__ ,required=A__) parser.add_argument( '''--val_metric''' ,type=A__ ,default=A__ ,required=A__ ,choices=['''bleu''', '''rouge2''', '''loss''', None]) parser.add_argument('''--eval_max_gen_length''' ,type=A__ ,default=A__ ,help='''never generate more than n tokens''') parser.add_argument('''--save_top_k''' ,type=A__ ,default=1 ,required=A__ ,help='''How many checkpoints to save''') parser.add_argument( '''--early_stopping_patience''' ,type=A__ ,default=-1 ,required=A__ ,help=( '''-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So''' ''' val_check_interval will effect it.''' ) ,) return parser class lowercase ( SCREAMING_SNAKE_CASE__ ): lowercase_ : Optional[int] ='''translation''' lowercase_ : int =['''loss'''] lowercase_ : Optional[Any] =['''bleu'''] lowercase_ : Tuple ='''bleu''' def __init__( self ,A__ ,**A__): super().__init__(A__ ,**A__) lowercase = hparams.src_lang lowercase = hparams.tgt_lang def A__ ( self ,A__ ,A__): return calculate_bleu(A__ ,A__) def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=None ): '''simple docstring''' Path(args.output_dir ).mkdir(exist_ok=lowerCAmelCase__ ) check_output_dir(lowerCAmelCase__ , expected_items=3 ) if model is None: if "summarization" in args.task: lowercase = SummarizationModule(lowerCAmelCase__ ) else: lowercase = TranslationModule(lowerCAmelCase__ ) lowercase = Path(args.data_dir ).name if ( args.logger_name == "default" or args.fast_dev_run or str(args.output_dir ).startswith('''/tmp''' ) or str(args.output_dir ).startswith('''/var''' ) ): lowercase = True # don't pollute wandb logs unnecessarily elif args.logger_name == "wandb": from pytorch_lightning.loggers import WandbLogger lowercase = os.environ.get('''WANDB_PROJECT''' , lowerCAmelCase__ ) lowercase = WandbLogger(name=model.output_dir.name , project=lowerCAmelCase__ ) elif args.logger_name == "wandb_shared": from pytorch_lightning.loggers import WandbLogger lowercase = WandbLogger(name=model.output_dir.name , project=f'hf_{dataset}' ) if args.early_stopping_patience >= 0: lowercase = get_early_stopping_callback(model.val_metric , args.early_stopping_patience ) else: lowercase = False lowercase = args.val_metric == '''loss''' lowercase = generic_train( lowerCAmelCase__ , lowerCAmelCase__ , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback( args.output_dir , model.val_metric , args.save_top_k , lowerCAmelCase__ ) , early_stopping_callback=lowerCAmelCase__ , logger=lowerCAmelCase__ , ) pickle_save(model.hparams , model.output_dir / '''hparams.pkl''' ) if not args.do_predict: return model lowercase = '''''' lowercase = sorted(glob.glob(os.path.join(args.output_dir , '''*.ckpt''' ) , recursive=lowerCAmelCase__ ) ) if checkpoints: lowercase = checkpoints[-1] lowercase = checkpoints[-1] trainer.logger.log_hyperparams(model.hparams ) # test() without a model tests using the best checkpoint automatically trainer.test() return model if __name__ == "__main__": lowercase__ :int = argparse.ArgumentParser() lowercase__ :Union[str, Any] = pl.Trainer.add_argparse_args(parser) lowercase__ :Optional[Any] = SummarizationModule.add_model_specific_args(parser, os.getcwd()) lowercase__ :Union[str, Any] = parser.parse_args() main(args)
101
import warnings from ...utils import logging from .image_processing_beit import BeitImageProcessor _A = logging.get_logger(__name__) class A ( __UpperCAmelCase ): def __init__( self, *UpperCamelCase__, **UpperCamelCase__ ): """simple docstring""" warnings.warn( '''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use BeitImageProcessor instead.''', UpperCamelCase__, ) super().__init__(*UpperCamelCase__, **UpperCamelCase__ )
278
0
"""simple docstring""" import os import random import sys from . import cryptomath_module as cryptoMath # noqa: N812 from . import rabin_miller as rabinMiller # noqa: N812 def lowercase ( ) ->None: """simple docstring""" print('''Making key files...''' ) make_key_files('''rsa''' , 1_024 ) print('''Key files generation successful.''' ) def lowercase ( _snake_case : int ) ->tuple[tuple[int, int], tuple[int, int]]: """simple docstring""" print('''Generating prime p...''' ) __snake_case : Any = rabinMiller.generate_large_prime(_snake_case ) print('''Generating prime q...''' ) __snake_case : Any = rabinMiller.generate_large_prime(_snake_case ) __snake_case : Optional[Any] = p * q print('''Generating e that is relatively prime to (p - 1) * (q - 1)...''' ) while True: __snake_case : Any = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) ) if cryptoMath.gcd(_snake_case , (p - 1) * (q - 1) ) == 1: break print('''Calculating d that is mod inverse of e...''' ) __snake_case : Optional[Any] = cryptoMath.find_mod_inverse(_snake_case , (p - 1) * (q - 1) ) __snake_case : str = (n, e) __snake_case : List[str] = (n, d) return (public_key, private_key) def lowercase ( _snake_case : str , _snake_case : int ) ->None: """simple docstring""" if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ): print('''\nWARNING:''' ) print( f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n""" '''Use a different name or delete these files and re-run this program.''' ) sys.exit() __snake_case , __snake_case : List[str] = generate_key(_snake_case ) print(f"""\nWriting public key to file {name}_pubkey.txt...""" ) with open(f"""{name}_pubkey.txt""" , '''w''' ) as out_file: out_file.write(f"""{key_size},{public_key[0]},{public_key[1]}""" ) print(f"""Writing private key to file {name}_privkey.txt...""" ) with open(f"""{name}_privkey.txt""" , '''w''' ) as out_file: out_file.write(f"""{key_size},{private_key[0]},{private_key[1]}""" ) if __name__ == "__main__": main()
102
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() _A = logging.get_logger(__name__) def __UpperCamelCase ( _A ): lowerCAmelCase_ = YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: lowerCAmelCase_ = 192 lowerCAmelCase_ = 768 lowerCAmelCase_ = 12 lowerCAmelCase_ = 3 lowerCAmelCase_ = [800, 1333] lowerCAmelCase_ = False elif yolos_name == "yolos_s_dWr": lowerCAmelCase_ = 330 lowerCAmelCase_ = 14 lowerCAmelCase_ = 6 lowerCAmelCase_ = 1320 elif "yolos_s" in yolos_name: lowerCAmelCase_ = 384 lowerCAmelCase_ = 1536 lowerCAmelCase_ = 12 lowerCAmelCase_ = 6 elif "yolos_b" in yolos_name: lowerCAmelCase_ = [800, 1344] lowerCAmelCase_ = 91 lowerCAmelCase_ = '''huggingface/label-files''' lowerCAmelCase_ = '''coco-detection-id2label.json''' lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) ) lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()} lowerCAmelCase_ = idalabel lowerCAmelCase_ = {v: k for k, v in idalabel.items()} return config def __UpperCamelCase ( _A , _A , _A = False ): for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCAmelCase_ = state_dict.pop(f"blocks.{i}.attn.qkv.weight" ) lowerCAmelCase_ = state_dict.pop(f"blocks.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict lowerCAmelCase_ = in_proj_weight[: config.hidden_size, :] lowerCAmelCase_ = in_proj_bias[: config.hidden_size] lowerCAmelCase_ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCAmelCase_ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCAmelCase_ = in_proj_weight[-config.hidden_size :, :] lowerCAmelCase_ = in_proj_bias[-config.hidden_size :] def __UpperCamelCase ( _A ): if "backbone" in name: lowerCAmelCase_ = name.replace('''backbone''' , '''vit''' ) if "cls_token" in name: lowerCAmelCase_ = name.replace('''cls_token''' , '''embeddings.cls_token''' ) if "det_token" in name: lowerCAmelCase_ = name.replace('''det_token''' , '''embeddings.detection_tokens''' ) if "mid_pos_embed" in name: lowerCAmelCase_ = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' ) if "pos_embed" in name: lowerCAmelCase_ = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' ) if "patch_embed.proj" in name: lowerCAmelCase_ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "blocks" in name: lowerCAmelCase_ = name.replace('''blocks''' , '''encoder.layer''' ) if "attn.proj" in name: lowerCAmelCase_ = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: lowerCAmelCase_ = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: lowerCAmelCase_ = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: lowerCAmelCase_ = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: lowerCAmelCase_ = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: lowerCAmelCase_ = name.replace('''mlp.fc2''' , '''output.dense''' ) if "class_embed" in name: lowerCAmelCase_ = name.replace('''class_embed''' , '''class_labels_classifier''' ) if "bbox_embed" in name: lowerCAmelCase_ = name.replace('''bbox_embed''' , '''bbox_predictor''' ) if "vit.norm" in name: lowerCAmelCase_ = name.replace('''vit.norm''' , '''vit.layernorm''' ) return name def __UpperCamelCase ( _A , _A ): for key in orig_state_dict.copy().keys(): lowerCAmelCase_ = orig_state_dict.pop(_A ) if "qkv" in key: lowerCAmelCase_ = key.split('''.''' ) lowerCAmelCase_ = int(key_split[2] ) lowerCAmelCase_ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: lowerCAmelCase_ = val[:dim, :] lowerCAmelCase_ = val[ dim : dim * 2, : ] lowerCAmelCase_ = val[-dim:, :] else: lowerCAmelCase_ = val[:dim] lowerCAmelCase_ = val[dim : dim * 2] lowerCAmelCase_ = val[-dim:] else: lowerCAmelCase_ = val return orig_state_dict def __UpperCamelCase ( ): lowerCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw ) return im @torch.no_grad() def __UpperCamelCase ( _A , _A , _A , _A = False ): lowerCAmelCase_ = get_yolos_config(_A ) # load original state_dict lowerCAmelCase_ = torch.load(_A , map_location='''cpu''' )['''model'''] # load 🤗 model lowerCAmelCase_ = YolosForObjectDetection(_A ) model.eval() lowerCAmelCase_ = convert_state_dict(_A , _A ) model.load_state_dict(_A ) # Check outputs on an image, prepared by YolosImageProcessor lowerCAmelCase_ = 800 if yolos_name != '''yolos_ti''' else 512 lowerCAmelCase_ = YolosImageProcessor(format='''coco_detection''' , size=_A ) lowerCAmelCase_ = image_processor(images=prepare_img() , return_tensors='''pt''' ) lowerCAmelCase_ = model(**_A ) lowerCAmelCase_ , lowerCAmelCase_ = outputs.logits, outputs.pred_boxes lowerCAmelCase_ , lowerCAmelCase_ = None, None if yolos_name == "yolos_ti": lowerCAmelCase_ = torch.tensor( [[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] ) lowerCAmelCase_ = torch.tensor( [[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] ) elif yolos_name == "yolos_s_200_pre": lowerCAmelCase_ = torch.tensor( [[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] ) lowerCAmelCase_ = torch.tensor( [[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] ) elif yolos_name == "yolos_s_300_pre": lowerCAmelCase_ = torch.tensor( [[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] ) lowerCAmelCase_ = torch.tensor( [[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] ) elif yolos_name == "yolos_s_dWr": lowerCAmelCase_ = torch.tensor( [[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] ) lowerCAmelCase_ = torch.tensor( [[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] ) elif yolos_name == "yolos_base": lowerCAmelCase_ = torch.tensor( [[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] ) lowerCAmelCase_ = torch.tensor( [[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] ) else: raise ValueError(f"Unknown yolos_name: {yolos_name}" ) assert torch.allclose(logits[0, :3, :3] , _A , atol=1E-4 ) assert torch.allclose(pred_boxes[0, :3, :3] , _A , atol=1E-4 ) Path(_A ).mkdir(exist_ok=_A ) print(f"Saving model {yolos_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_A ) print(f"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(_A ) if push_to_hub: lowerCAmelCase_ = { '''yolos_ti''': '''yolos-tiny''', '''yolos_s_200_pre''': '''yolos-small''', '''yolos_s_300_pre''': '''yolos-small-300''', '''yolos_s_dWr''': '''yolos-small-dwr''', '''yolos_base''': '''yolos-base''', } print('''Pushing to the hub...''' ) lowerCAmelCase_ = model_mapping[yolos_name] image_processor.push_to_hub(_A , organization='''hustvl''' ) model.push_to_hub(_A , organization='''hustvl''' ) if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--yolos_name''', default='''yolos_s_200_pre''', type=str, help=( '''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\',''' ''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.''' ), ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) _A = parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
278
0
import math def UpperCamelCase( __UpperCamelCase : int = 100 ): lowerCAmelCase_ : Dict = sum(i * i for i in range(1 ,n + 1 ) ) lowerCAmelCase_ : Optional[int] = int(math.pow(sum(range(1 ,n + 1 ) ) ,2 ) ) return square_of_sum - sum_of_squares if __name__ == "__main__": print(F'''{solution() = }''')
103
def __UpperCamelCase ( _A ): if not numbers: return 0 if not isinstance(_A , (list, tuple) ) or not all( isinstance(_A , _A ) for number in numbers ): raise ValueError('''numbers must be an iterable of integers''' ) lowerCAmelCase_ = lowerCAmelCase_ = lowerCAmelCase_ = numbers[0] for i in range(1 , len(_A ) ): # update the maximum and minimum subarray products lowerCAmelCase_ = numbers[i] if number < 0: lowerCAmelCase_ , lowerCAmelCase_ = min_till_now, max_till_now lowerCAmelCase_ = max(_A , max_till_now * number ) lowerCAmelCase_ = min(_A , min_till_now * number ) # update the maximum product found till now lowerCAmelCase_ = max(_A , _A ) return max_prod
278
0
'''simple docstring''' import re import string import numpy as np import datasets lowerCAmelCase__ = ''' Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list. ''' lowerCAmelCase__ = ''' Args: predictions: List of predicted texts. references: List of reference texts. regexes_to_ignore: List, defaults to None. Regex expressions of characters to ignore when calculating the exact matches. Note: these regexes are removed from the input data before the changes based on the options below (e.g. ignore_case, ignore_punctuation, ignore_numbers) are applied. ignore_case: Boolean, defaults to False. If true, turns everything to lowercase so that capitalization differences are ignored. ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before comparing predictions and references. ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before comparing predictions and references. Returns: exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive. Examples: >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds) >>> print(round(results["exact_match"], 1)) 25.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True) >>> print(round(results["exact_match"], 1)) 50.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True) >>> print(round(results["exact_match"], 1)) 75.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True) >>> print(round(results["exact_match"], 1)) 100.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."] >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."] >>> results = exact_match.compute(references=refs, predictions=preds) >>> print(round(results["exact_match"], 1)) 33.3 ''' lowerCAmelCase__ = ''' ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase_ (datasets.Metric ): """simple docstring""" def SCREAMING_SNAKE_CASE ( self : Dict ): return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { '''predictions''': datasets.Value('''string''' ,id='''sequence''' ), '''references''': datasets.Value('''string''' ,id='''sequence''' ), } ) ,reference_urls=[] ,) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : Any=None ,lowercase__ : List[Any]=False ,lowercase__ : Optional[Any]=False ,lowercase__ : int=False ,): if regexes_to_ignore is not None: for s in regexes_to_ignore: __lowercase = np.array([re.sub(lowercase__ ,'''''' ,lowercase__ ) for x in predictions] ) __lowercase = np.array([re.sub(lowercase__ ,'''''' ,lowercase__ ) for x in references] ) else: __lowercase = np.asarray(lowercase__ ) __lowercase = np.asarray(lowercase__ ) if ignore_case: __lowercase = np.char.lower(lowercase__ ) __lowercase = np.char.lower(lowercase__ ) if ignore_punctuation: __lowercase = string.punctuation.maketrans('''''' ,'''''' ,string.punctuation ) __lowercase = np.char.translate(lowercase__ ,table=lowercase__ ) __lowercase = np.char.translate(lowercase__ ,table=lowercase__ ) if ignore_numbers: __lowercase = string.digits.maketrans('''''' ,'''''' ,string.digits ) __lowercase = np.char.translate(lowercase__ ,table=lowercase__ ) __lowercase = np.char.translate(lowercase__ ,table=lowercase__ ) __lowercase = predictions == references return {"exact_match": np.mean(lowercase__ ) * 1_0_0}
104
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision import transforms from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def __UpperCamelCase ( _A ): lowerCAmelCase_ = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2] lowerCAmelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False lowerCAmelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False lowerCAmelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False if "large" in model_name or "xlarge" in model_name or "huge" in model_name: if "fl3" in model_name: lowerCAmelCase_ = [3, 3, 3, 3] lowerCAmelCase_ = [5, 5, 5, 5] elif "fl4" in model_name: lowerCAmelCase_ = [4, 4, 4, 4] lowerCAmelCase_ = [3, 3, 3, 3] if "tiny" in model_name or "small" in model_name or "base" in model_name: lowerCAmelCase_ = [3, 3, 3, 3] if "lrf" in model_name: lowerCAmelCase_ = [3, 3, 3, 3] else: lowerCAmelCase_ = [2, 2, 2, 2] if "tiny" in model_name: lowerCAmelCase_ = 96 elif "small" in model_name: lowerCAmelCase_ = 96 elif "base" in model_name: lowerCAmelCase_ = 128 elif "large" in model_name: lowerCAmelCase_ = 192 elif "xlarge" in model_name: lowerCAmelCase_ = 256 elif "huge" in model_name: lowerCAmelCase_ = 352 # set label information lowerCAmelCase_ = '''huggingface/label-files''' if "large" in model_name or "huge" in model_name: lowerCAmelCase_ = '''imagenet-22k-id2label.json''' else: lowerCAmelCase_ = '''imagenet-1k-id2label.json''' lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) ) lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()} lowerCAmelCase_ = {v: k for k, v in idalabel.items()} lowerCAmelCase_ = FocalNetConfig( embed_dim=_A , depths=_A , focal_levels=_A , focal_windows=_A , use_conv_embed=_A , idalabel=_A , labelaid=_A , use_post_layernorm=_A , use_layerscale=_A , ) return config def __UpperCamelCase ( _A ): if "patch_embed.proj" in name: lowerCAmelCase_ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "patch_embed.norm" in name: lowerCAmelCase_ = name.replace('''patch_embed.norm''' , '''embeddings.norm''' ) if "layers" in name: lowerCAmelCase_ = '''encoder.''' + name if "encoder.layers" in name: lowerCAmelCase_ = name.replace('''encoder.layers''' , '''encoder.stages''' ) if "downsample.proj" in name: lowerCAmelCase_ = name.replace('''downsample.proj''' , '''downsample.projection''' ) if "blocks" in name: lowerCAmelCase_ = name.replace('''blocks''' , '''layers''' ) if "modulation.f.weight" in name or "modulation.f.bias" in name: lowerCAmelCase_ = name.replace('''modulation.f''' , '''modulation.projection_in''' ) if "modulation.h.weight" in name or "modulation.h.bias" in name: lowerCAmelCase_ = name.replace('''modulation.h''' , '''modulation.projection_context''' ) if "modulation.proj.weight" in name or "modulation.proj.bias" in name: lowerCAmelCase_ = name.replace('''modulation.proj''' , '''modulation.projection_out''' ) if name == "norm.weight": lowerCAmelCase_ = '''layernorm.weight''' if name == "norm.bias": lowerCAmelCase_ = '''layernorm.bias''' if "head" in name: lowerCAmelCase_ = name.replace('''head''' , '''classifier''' ) else: lowerCAmelCase_ = '''focalnet.''' + name return name def __UpperCamelCase ( _A , _A , _A=False ): # fmt: off lowerCAmelCase_ = { '''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''', '''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''', '''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''', '''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''', '''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''', '''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''', '''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''', '''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''', '''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''', '''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''', } # fmt: on lowerCAmelCase_ = model_name_to_url[model_name] print('''Checkpoint URL: ''' , _A ) lowerCAmelCase_ = torch.hub.load_state_dict_from_url(_A , map_location='''cpu''' )['''model'''] # rename keys for key in state_dict.copy().keys(): lowerCAmelCase_ = state_dict.pop(_A ) lowerCAmelCase_ = val lowerCAmelCase_ = get_focalnet_config(_A ) lowerCAmelCase_ = FocalNetForImageClassification(_A ) model.eval() # load state dict model.load_state_dict(_A ) # verify conversion lowerCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowerCAmelCase_ = BitImageProcessor( do_resize=_A , size={'''shortest_edge''': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=_A , crop_size=224 , do_normalize=_A , image_mean=_A , image_std=_A , ) lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw ) lowerCAmelCase_ = processor(images=_A , return_tensors='''pt''' ) lowerCAmelCase_ = transforms.Compose( [ transforms.Resize(256 ), transforms.CenterCrop(224 ), transforms.ToTensor(), transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ), ] ) lowerCAmelCase_ = image_transforms(_A ).unsqueeze(0 ) # verify pixel_values assert torch.allclose(inputs.pixel_values , _A , atol=1E-4 ) lowerCAmelCase_ = model(**_A ) lowerCAmelCase_ = outputs.logits.argmax(-1 ).item() print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] ) print('''First values of logits:''' , outputs.logits[0, :3] ) if model_name == "focalnet-tiny": lowerCAmelCase_ = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ) elif model_name == "focalnet-tiny-lrf": lowerCAmelCase_ = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] ) elif model_name == "focalnet-small": lowerCAmelCase_ = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] ) elif model_name == "focalnet-small-lrf": lowerCAmelCase_ = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] ) elif model_name == "focalnet-base": lowerCAmelCase_ = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] ) elif model_name == "focalnet-base-lrf": lowerCAmelCase_ = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] ) assert torch.allclose(outputs.logits[0, :3] , _A , atol=1E-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(f"Saving model and processor of {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_A ) processor.save_pretrained(_A ) if push_to_hub: print(f"Pushing model and processor of {model_name} to the hub..." ) model.push_to_hub(f"{model_name}" ) processor.push_to_hub(f"{model_name}" ) if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''focalnet-tiny''', type=str, help='''Name of the FocalNet model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub.''', ) _A = parser.parse_args() convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
278
0
"""simple docstring""" from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging a : List[str] = logging.get_logger(__name__) a : int = { '''t5-small''': '''https://huggingface.co/t5-small/resolve/main/config.json''', '''t5-base''': '''https://huggingface.co/t5-base/resolve/main/config.json''', '''t5-large''': '''https://huggingface.co/t5-large/resolve/main/config.json''', '''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/config.json''', '''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/config.json''', } class __UpperCamelCase ( a__ ): lowerCamelCase : Optional[Any] ="""t5""" lowerCamelCase : Dict =["""past_key_values"""] lowerCamelCase : Optional[int] ={"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""} def __init__( self , lowerCAmelCase__=3_2128 , lowerCAmelCase__=512 , lowerCAmelCase__=64 , lowerCAmelCase__=2048 , lowerCAmelCase__=6 , lowerCAmelCase__=None , lowerCAmelCase__=8 , lowerCAmelCase__=32 , lowerCAmelCase__=128 , lowerCAmelCase__=0.1 , lowerCAmelCase__=1E-6 , lowerCAmelCase__=1.0 , lowerCAmelCase__="relu" , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=0 , lowerCAmelCase__=1 , **lowerCAmelCase__ , ) -> str: a : List[Any] = vocab_size a : Any = d_model a : Optional[int] = d_kv a : Optional[int] = d_ff a : Any = num_layers a : Any = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry a : Optional[Any] = num_heads a : List[str] = relative_attention_num_buckets a : int = relative_attention_max_distance a : List[str] = dropout_rate a : Optional[Any] = layer_norm_epsilon a : Optional[Any] = initializer_factor a : Union[str, Any] = feed_forward_proj a : List[Any] = use_cache a : Dict = self.feed_forward_proj.split("-" ) a : Tuple = act_info[-1] a : Optional[Any] = act_info[0] == "gated" if len(lowerCAmelCase__ ) > 1 and act_info[0] != "gated" or len(lowerCAmelCase__ ) > 2: raise ValueError( f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.""" "Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. " "'gated-gelu' or 'relu'" ) # for backwards compatibility if feed_forward_proj == "gated-gelu": a : Union[str, Any] = "gelu_new" super().__init__( pad_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ , ) class __UpperCamelCase ( a__ ): @property def __a ( self ) -> Mapping[str, Mapping[int, str]]: a : Optional[Any] = { "input_ids": {0: "batch", 1: "encoder_sequence"}, "attention_mask": {0: "batch", 1: "encoder_sequence"}, } if self.use_past: a : str = "past_encoder_sequence + sequence" a : Optional[Any] = {0: "batch"} a : Optional[Any] = {0: "batch", 1: "past_decoder_sequence + sequence"} else: a : Tuple = {0: "batch", 1: "decoder_sequence"} a : Any = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(lowerCAmelCase__ , direction="inputs" ) return common_inputs @property def __a ( self ) -> int: return 13
105
import argparse from argparse import Namespace import torch from torch import nn from transformers import XGLMConfig, XGLMForCausalLM def __UpperCamelCase ( _A ): lowerCAmelCase_ = [ '''decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(_A , _A ) def __UpperCamelCase ( _A ): lowerCAmelCase_ , lowerCAmelCase_ = emb.weight.shape lowerCAmelCase_ = nn.Linear(_A , _A , bias=_A ) lowerCAmelCase_ = emb.weight.data return lin_layer def __UpperCamelCase ( _A ): lowerCAmelCase_ = torch.load(_A , map_location='''cpu''' ) lowerCAmelCase_ = Namespace(**checkpoint['''cfg''']['''model'''] ) lowerCAmelCase_ = checkpoint['''model'''] remove_ignore_keys_(_A ) lowerCAmelCase_ = state_dict['''decoder.embed_tokens.weight'''].shape[0] lowerCAmelCase_ = {key.replace('''decoder''' , '''model''' ): val for key, val in state_dict.items()} lowerCAmelCase_ = XGLMConfig( vocab_size=_A , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''gelu''' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , ) lowerCAmelCase_ = XGLMForCausalLM(_A ) lowerCAmelCase_ = model.load_state_dict(_A , strict=_A ) print(_A ) lowerCAmelCase_ = make_linear_from_emb(model.model.embed_tokens ) return model if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') _A = parser.parse_args() _A = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path) model.save_pretrained(args.pytorch_dump_folder_path)
278
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __UpperCamelCase : Tuple = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Union[str, Any] = ['''NllbTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : List[Any] = ['''NllbTokenizerFast'''] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb import NllbTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb_fast import NllbTokenizerFast else: import sys __UpperCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
106
# This script creates a super tiny model that is useful inside tests, when we just want to test that # the machinery works, without needing to the check the quality of the outcomes. # # This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny - # all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and # emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files. # The latter is done by `fsmt-make-super-tiny-model.py`. # # It will be used then as "stas/tiny-wmt19-en-ru" from pathlib import Path import json import tempfile from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES _A = '''tiny-wmt19-en-ru''' # Build # borrowed from a test _A = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''w</w>''', '''r</w>''', '''t</w>''', '''lo''', '''low''', '''er</w>''', '''low</w>''', '''lowest</w>''', '''newer</w>''', '''wider</w>''', '''<unk>''', ] _A = dict(zip(vocab, range(len(vocab)))) _A = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', ''''''] with tempfile.TemporaryDirectory() as tmpdirname: _A = Path(tmpdirname) _A = build_dir / VOCAB_FILES_NAMES['''src_vocab_file'''] _A = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file'''] _A = build_dir / VOCAB_FILES_NAMES['''merges_file'''] with open(src_vocab_file, '''w''') as fp: fp.write(json.dumps(vocab_tokens)) with open(tgt_vocab_file, '''w''') as fp: fp.write(json.dumps(vocab_tokens)) with open(merges_file, '''w''') as fp: fp.write('''\n'''.join(merges)) _A = FSMTTokenizer( langs=['''en''', '''ru'''], src_vocab_size=len(vocab), tgt_vocab_size=len(vocab), src_vocab_file=src_vocab_file, tgt_vocab_file=tgt_vocab_file, merges_file=merges_file, ) _A = FSMTConfig( langs=['''ru''', '''en'''], src_vocab_size=1_000, tgt_vocab_size=1_000, d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) _A = FSMTForConditionalGeneration(config) print(f"num of params {tiny_model.num_parameters()}") # Test _A = tokenizer(['''Making tiny model'''], return_tensors='''pt''') _A = tiny_model(**batch) print('''test output:''', len(outputs.logits[0])) # Save tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(f"Generated {mname_tiny}") # Upload # transformers-cli upload tiny-wmt19-en-ru
278
0
import string def __magic_name__ ( A : str ): '''simple docstring''' for key in range(len(string.ascii_uppercase ) ): a = "" for symbol in message: if symbol in string.ascii_uppercase: a = string.ascii_uppercase.find(A ) a = num - key if num < 0: a = num + len(string.ascii_uppercase ) a = translated + string.ascii_uppercase[num] else: a = translated + symbol print(F"""Decryption using Key #{key}: {translated}""" ) def __magic_name__ ( ): '''simple docstring''' a = input("Encrypted message: " ) a = message.upper() decrypt(A ) if __name__ == "__main__": import doctest doctest.testmod() main()
107
import argparse from collections import defaultdict import yaml _A = '''docs/source/en/_toctree.yml''' def __UpperCamelCase ( _A ): lowerCAmelCase_ = defaultdict(_A ) for doc in model_doc: counts[doc["local"]] += 1 lowerCAmelCase_ = [key for key, value in counts.items() if value > 1] lowerCAmelCase_ = [] for duplicate_key in duplicates: lowerCAmelCase_ = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} ) if len(_A ) > 1: raise ValueError( f"{duplicate_key} is present several times in the documentation table of content at " '''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the ''' '''others.''' ) # Only add this once new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] ) # Sort return sorted(_A , key=lambda _A : s["title"].lower() ) def __UpperCamelCase ( _A=False ): with open(_A , encoding='''utf-8''' ) as f: lowerCAmelCase_ = yaml.safe_load(f.read() ) # Get to the API doc lowerCAmelCase_ = 0 while content[api_idx]["title"] != "API": api_idx += 1 lowerCAmelCase_ = content[api_idx]['''sections'''] # Then to the model doc lowerCAmelCase_ = 0 while api_doc[model_idx]["title"] != "Models": model_idx += 1 lowerCAmelCase_ = api_doc[model_idx]['''sections'''] lowerCAmelCase_ = [(idx, section) for idx, section in enumerate(_A ) if '''sections''' in section] lowerCAmelCase_ = False for idx, modality_doc in modalities_docs: lowerCAmelCase_ = modality_doc['''sections'''] lowerCAmelCase_ = clean_model_doc_toc(_A ) if old_modality_doc != new_modality_doc: lowerCAmelCase_ = True if overwrite: lowerCAmelCase_ = new_modality_doc if diff: if overwrite: lowerCAmelCase_ = model_doc lowerCAmelCase_ = api_doc with open(_A , '''w''' , encoding='''utf-8''' ) as f: f.write(yaml.dump(_A , allow_unicode=_A ) ) else: raise ValueError( '''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' ) if __name__ == "__main__": _A = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') _A = parser.parse_args() check_model_doc(args.fix_and_overwrite)
278
0
"""simple docstring""" def a__ ( SCREAMING_SNAKE_CASE : list[int] ): '''simple docstring''' if not numbers: return 0 if not isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) or not all( isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for number in numbers ): raise ValueError("numbers must be an iterable of integers" ) lowerCAmelCase : str = numbers[0] for i in range(1 , len(SCREAMING_SNAKE_CASE ) ): # update the maximum and minimum subarray products lowerCAmelCase : int = numbers[i] if number < 0: lowerCAmelCase , lowerCAmelCase : Union[str, Any] = min_till_now, max_till_now lowerCAmelCase : List[Any] = max(SCREAMING_SNAKE_CASE , max_till_now * number ) lowerCAmelCase : List[Any] = min(SCREAMING_SNAKE_CASE , min_till_now * number ) # update the maximum product found till now lowerCAmelCase : str = max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return max_prod
108
import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest class A ( __UpperCAmelCase ): __snake_case = (UnCLIPScheduler,) def SCREAMING_SNAKE_CASE__ ( self, **UpperCamelCase__ ): """simple docstring""" lowerCAmelCase_ = { '''num_train_timesteps''': 1000, '''variance_type''': '''fixed_small_log''', '''clip_sample''': True, '''clip_sample_range''': 1.0, '''prediction_type''': '''epsilon''', } config.update(**UpperCamelCase__ ) return config def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" for clip_sample_range in [1, 5, 10, 20]: self.check_over_configs(clip_sample_range=UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" for time_step in [0, 500, 999]: for prev_timestep in [None, 5, 100, 250, 500, 750]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=UpperCamelCase__, prev_timestep=UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.scheduler_classes[0] lowerCAmelCase_ = self.get_scheduler_config(variance_type='''fixed_small_log''' ) lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1E-5 def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.scheduler_classes[0] lowerCAmelCase_ = self.get_scheduler_config(variance_type='''learned_range''' ) lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ ) lowerCAmelCase_ = 0.5 assert scheduler._get_variance(1, predicted_variance=UpperCamelCase__ ) - -10.1_712_790 < 1E-5 assert scheduler._get_variance(487, predicted_variance=UpperCamelCase__ ) - -5.7_998_052 < 1E-5 assert scheduler._get_variance(999, predicted_variance=UpperCamelCase__ ) - -0.0_010_011 < 1E-5 def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.scheduler_classes[0] lowerCAmelCase_ = self.get_scheduler_config() lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ ) lowerCAmelCase_ = scheduler.timesteps lowerCAmelCase_ = self.dummy_model() lowerCAmelCase_ = self.dummy_sample_deter lowerCAmelCase_ = torch.manual_seed(0 ) for i, t in enumerate(UpperCamelCase__ ): # 1. predict noise residual lowerCAmelCase_ = model(UpperCamelCase__, UpperCamelCase__ ) # 2. predict previous mean of sample x_t-1 lowerCAmelCase_ = scheduler.step(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, generator=UpperCamelCase__ ).prev_sample lowerCAmelCase_ = pred_prev_sample lowerCAmelCase_ = torch.sum(torch.abs(UpperCamelCase__ ) ) lowerCAmelCase_ = torch.mean(torch.abs(UpperCamelCase__ ) ) assert abs(result_sum.item() - 252.2_682_495 ) < 1E-2 assert abs(result_mean.item() - 0.3_284_743 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.scheduler_classes[0] lowerCAmelCase_ = self.get_scheduler_config() lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ ) scheduler.set_timesteps(25 ) lowerCAmelCase_ = scheduler.timesteps lowerCAmelCase_ = self.dummy_model() lowerCAmelCase_ = self.dummy_sample_deter lowerCAmelCase_ = torch.manual_seed(0 ) for i, t in enumerate(UpperCamelCase__ ): # 1. predict noise residual lowerCAmelCase_ = model(UpperCamelCase__, UpperCamelCase__ ) if i + 1 == timesteps.shape[0]: lowerCAmelCase_ = None else: lowerCAmelCase_ = timesteps[i + 1] # 2. predict previous mean of sample x_t-1 lowerCAmelCase_ = scheduler.step( UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, prev_timestep=UpperCamelCase__, generator=UpperCamelCase__ ).prev_sample lowerCAmelCase_ = pred_prev_sample lowerCAmelCase_ = torch.sum(torch.abs(UpperCamelCase__ ) ) lowerCAmelCase_ = torch.mean(torch.abs(UpperCamelCase__ ) ) assert abs(result_sum.item() - 258.2_044_983 ) < 1E-2 assert abs(result_mean.item() - 0.3_362_038 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" pass def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" pass
278
0
"""simple docstring""" def _snake_case ( UpperCamelCase : int ): if num <= 0: raise ValueError("""Input must be a positive integer""" ) UpperCAmelCase : Any = [True] * (num + 1) UpperCAmelCase : Any = 2 while p * p <= num: if primes[p]: for i in range(p * p , num + 1 , UpperCamelCase ): UpperCAmelCase : int = False p += 1 return [prime for prime in range(2 , num + 1 ) if primes[prime]] if __name__ == "__main__": import doctest doctest.testmod() A: int = int(input("Enter a positive integer: ").strip()) print(prime_sieve_eratosthenes(user_num))
109
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _A = logging.get_logger(__name__) _A = torch.device('''cpu''') def __UpperCamelCase ( ): lowerCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw ) return im def __UpperCamelCase ( _A ): if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.1_7_0_3E0_0, 2.1_1_0_7E0_0, -2.0_8_1_1E0_0, 8.8_6_8_5E-0_1, 2.4_3_6_0E-0_1] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.9_6_3_6E-0_1, 2.3_4_7_8E-0_1, -1.6_9_6_3E0_0, -1.7_3_8_1E0_0, -8.6_3_3_7E-0_1] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.2_7_6_8E-0_1, -4.7_4_2_9E-0_1, -1.0_8_9_7E0_0, -1.0_2_4_8E0_0, 3.5_5_2_3E-0_2] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.5_3_3_0E-0_1, 2.4_2_1_1E-0_1, -6.0_1_8_5E-0_1, -8.2_7_8_9E-0_1, -6.0_4_4_6E-0_2] ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = dct.pop(_A ) lowerCAmelCase_ = val def __UpperCamelCase ( _A ): lowerCAmelCase_ = [] for k in state_dict.keys(): lowerCAmelCase_ = k if ".pwconv" in k: lowerCAmelCase_ = k_new.replace('''.pwconv''' , '''.point_wise_conv''' ) if ".dwconv" in k: lowerCAmelCase_ = k_new.replace('''.dwconv''' , '''.depth_wise_conv''' ) if ".Proj." in k: lowerCAmelCase_ = k_new.replace('''.Proj.''' , '''.proj.''' ) if "patch_embed" in k_new: lowerCAmelCase_ = k_new.replace('''patch_embed''' , '''swiftformer.patch_embed.patch_embedding''' ) if "network" in k_new: lowerCAmelCase_ = k_new.split('''.''' ) if ls[2].isdigit(): lowerCAmelCase_ = '''swiftformer.encoder.network.''' + ls[1] + '''.blocks.''' + ls[2] + '''.''' + '''.'''.join(ls[3:] ) else: lowerCAmelCase_ = k_new.replace('''network''' , '''swiftformer.encoder.network''' ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size lowerCAmelCase_ = 1000 lowerCAmelCase_ = '''huggingface/label-files''' lowerCAmelCase_ = '''imagenet-1k-id2label.json''' lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) ) lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()} lowerCAmelCase_ = idalabel lowerCAmelCase_ = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": lowerCAmelCase_ = [3, 3, 6, 4] lowerCAmelCase_ = [48, 56, 112, 220] elif swiftformer_name == "swiftformer_s": lowerCAmelCase_ = [3, 3, 9, 6] lowerCAmelCase_ = [48, 64, 168, 224] elif swiftformer_name == "swiftformer_l1": lowerCAmelCase_ = [4, 3, 10, 5] lowerCAmelCase_ = [48, 96, 192, 384] elif swiftformer_name == "swiftformer_l3": lowerCAmelCase_ = [4, 4, 12, 6] lowerCAmelCase_ = [64, 128, 320, 512] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith('''https''' ): lowerCAmelCase_ = torch.hub.load_state_dict_from_url(_A , map_location='''cpu''' , check_hash=_A ) else: lowerCAmelCase_ = torch.load(_A , map_location='''cpu''' ) lowerCAmelCase_ = checkpoint lowerCAmelCase_ = create_rename_keys(_A ) for rename_key_src, rename_key_dest in rename_keys: rename_key(_A , _A , _A ) # load HuggingFace model lowerCAmelCase_ = SwiftFormerForImageClassification(_A ).eval() hf_model.load_state_dict(_A ) # prepare test inputs lowerCAmelCase_ = prepare_img() lowerCAmelCase_ = ViTImageProcessor.from_pretrained('''preprocessor_config''' ) lowerCAmelCase_ = processor(images=_A , return_tensors='''pt''' ) # compare outputs from both models lowerCAmelCase_ = get_expected_output(_A ) lowerCAmelCase_ = hf_model(inputs['''pixel_values'''] ).logits assert hf_logits.shape == torch.Size([1, 1000] ) assert torch.allclose(hf_logits[0, 0:5] , _A , atol=1E-3 ) Path(_A ).mkdir(exist_ok=_A ) print(f"Saving model {swiftformer_name} to {pytorch_dump_folder_path}" ) hf_model.save_pretrained(_A ) if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--swiftformer_name''', default='''swiftformer_xs''', choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''], type=str, help='''Name of the SwiftFormer model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''./converted_outputs/''', type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''') _A = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
278
0
"""simple docstring""" from abc import ABC, abstractmethod from argparse import ArgumentParser class _UpperCAmelCase ( __UpperCAmelCase ): @staticmethod @abstractmethod def A ( A : Optional[int] ) -> Any: raise NotImplementedError() @abstractmethod def A ( self : Union[str, Any] ) -> Optional[Any]: raise NotImplementedError()
33
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _A = logging.get_logger(__name__) _A = { '''google/vit-base-patch16-224''': '''https://huggingface.co/vit-base-patch16-224/resolve/main/config.json''', # See all ViT models at https://huggingface.co/models?filter=vit } class A ( __UpperCAmelCase ): __snake_case = 'vit' def __init__( self, UpperCamelCase__=768, UpperCamelCase__=12, UpperCamelCase__=12, UpperCamelCase__=3072, UpperCamelCase__="gelu", UpperCamelCase__=0.0, UpperCamelCase__=0.0, UpperCamelCase__=0.02, UpperCamelCase__=1E-12, UpperCamelCase__=224, UpperCamelCase__=16, UpperCamelCase__=3, UpperCamelCase__=True, UpperCamelCase__=16, **UpperCamelCase__, ): """simple docstring""" super().__init__(**UpperCamelCase__ ) lowerCAmelCase_ = hidden_size lowerCAmelCase_ = num_hidden_layers lowerCAmelCase_ = num_attention_heads lowerCAmelCase_ = intermediate_size lowerCAmelCase_ = hidden_act lowerCAmelCase_ = hidden_dropout_prob lowerCAmelCase_ = attention_probs_dropout_prob lowerCAmelCase_ = initializer_range lowerCAmelCase_ = layer_norm_eps lowerCAmelCase_ = image_size lowerCAmelCase_ = patch_size lowerCAmelCase_ = num_channels lowerCAmelCase_ = qkv_bias lowerCAmelCase_ = encoder_stride class A ( __UpperCAmelCase ): __snake_case = version.parse('1.11' ) @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return 1E-4
278
0
import unittest from transformers import ( MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TextGenerationPipeline, logging, pipeline, ) from transformers.testing_utils import ( CaptureLogger, is_pipeline_test, require_accelerate, require_tf, require_torch, require_torch_gpu, require_torch_or_tf, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf class A_ ( unittest.TestCase ): _UpperCAmelCase : str = MODEL_FOR_CAUSAL_LM_MAPPING _UpperCAmelCase : int = TF_MODEL_FOR_CAUSAL_LM_MAPPING @require_torch def lowerCAmelCase ( self : List[str]): __lowerCamelCase : Optional[Any] = pipeline(task='text-generation' ,model='sshleifer/tiny-ctrl' ,framework='pt') # Using `do_sample=False` to force deterministic output __lowerCamelCase : List[Any] = text_generator('This is a test' ,do_sample=UpperCamelCase__) self.assertEqual( UpperCamelCase__ ,[ { 'generated_text': ( 'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.' ' oscope. FiliFili@@' ) } ] ,) __lowerCamelCase : int = text_generator(['This is a test', 'This is a second test']) self.assertEqual( UpperCamelCase__ ,[ [ { 'generated_text': ( 'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.' ' oscope. FiliFili@@' ) } ], [ { 'generated_text': ( 'This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy' ' oscope. oscope. FiliFili@@' ) } ], ] ,) __lowerCamelCase : int = text_generator('This is a test' ,do_sample=UpperCamelCase__ ,num_return_sequences=2 ,return_tensors=UpperCamelCase__) self.assertEqual( UpperCamelCase__ ,[ {'generated_token_ids': ANY(UpperCamelCase__)}, {'generated_token_ids': ANY(UpperCamelCase__)}, ] ,) __lowerCamelCase : Tuple = text_generator.model.config.eos_token_id __lowerCamelCase : Any = '<pad>' __lowerCamelCase : List[str] = text_generator( ['This is a test', 'This is a second test'] ,do_sample=UpperCamelCase__ ,num_return_sequences=2 ,batch_size=2 ,return_tensors=UpperCamelCase__ ,) self.assertEqual( UpperCamelCase__ ,[ [ {'generated_token_ids': ANY(UpperCamelCase__)}, {'generated_token_ids': ANY(UpperCamelCase__)}, ], [ {'generated_token_ids': ANY(UpperCamelCase__)}, {'generated_token_ids': ANY(UpperCamelCase__)}, ], ] ,) @require_tf def lowerCAmelCase ( self : Tuple): __lowerCamelCase : Dict = pipeline(task='text-generation' ,model='sshleifer/tiny-ctrl' ,framework='tf') # Using `do_sample=False` to force deterministic output __lowerCamelCase : Optional[int] = text_generator('This is a test' ,do_sample=UpperCamelCase__) self.assertEqual( UpperCamelCase__ ,[ { 'generated_text': ( 'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵' ' please,' ) } ] ,) __lowerCamelCase : Union[str, Any] = text_generator(['This is a test', 'This is a second test'] ,do_sample=UpperCamelCase__) self.assertEqual( UpperCamelCase__ ,[ [ { 'generated_text': ( 'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵' ' please,' ) } ], [ { 'generated_text': ( 'This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes' ' Cannes 閲閲Cannes Cannes Cannes 攵 please,' ) } ], ] ,) def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : List[str]): __lowerCamelCase : Optional[Any] = TextGenerationPipeline(model=UpperCamelCase__ ,tokenizer=UpperCamelCase__) return text_generator, ["This is a test", "Another test"] def lowerCAmelCase ( self : str): __lowerCamelCase : Tuple = 'Hello I believe in' __lowerCamelCase : int = pipeline('text-generation' ,model='hf-internal-testing/tiny-random-gpt2') __lowerCamelCase : List[Any] = text_generator(UpperCamelCase__) self.assertEqual( UpperCamelCase__ ,[{'generated_text': 'Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'}] ,) __lowerCamelCase : Optional[int] = text_generator(UpperCamelCase__ ,stop_sequence=' fe') self.assertEqual(UpperCamelCase__ ,[{'generated_text': 'Hello I believe in fe'}]) def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Union[str, Any]): __lowerCamelCase : Optional[int] = text_generator.model __lowerCamelCase : List[str] = text_generator.tokenizer __lowerCamelCase : int = text_generator('This is a test') self.assertEqual(UpperCamelCase__ ,[{'generated_text': ANY(UpperCamelCase__)}]) self.assertTrue(outputs[0]['generated_text'].startswith('This is a test')) __lowerCamelCase : Optional[Any] = text_generator('This is a test' ,return_full_text=UpperCamelCase__) self.assertEqual(UpperCamelCase__ ,[{'generated_text': ANY(UpperCamelCase__)}]) self.assertNotIn('This is a test' ,outputs[0]['generated_text']) __lowerCamelCase : Union[str, Any] = pipeline(task='text-generation' ,model=UpperCamelCase__ ,tokenizer=UpperCamelCase__ ,return_full_text=UpperCamelCase__) __lowerCamelCase : List[Any] = text_generator('This is a test') self.assertEqual(UpperCamelCase__ ,[{'generated_text': ANY(UpperCamelCase__)}]) self.assertNotIn('This is a test' ,outputs[0]['generated_text']) __lowerCamelCase : str = text_generator('This is a test' ,return_full_text=UpperCamelCase__) self.assertEqual(UpperCamelCase__ ,[{'generated_text': ANY(UpperCamelCase__)}]) self.assertTrue(outputs[0]['generated_text'].startswith('This is a test')) __lowerCamelCase : Optional[int] = text_generator(['This is great !', 'Something else'] ,num_return_sequences=2 ,do_sample=UpperCamelCase__) self.assertEqual( UpperCamelCase__ ,[ [{'generated_text': ANY(UpperCamelCase__)}, {'generated_text': ANY(UpperCamelCase__)}], [{'generated_text': ANY(UpperCamelCase__)}, {'generated_text': ANY(UpperCamelCase__)}], ] ,) if text_generator.tokenizer.pad_token is not None: __lowerCamelCase : List[Any] = text_generator( ['This is great !', 'Something else'] ,num_return_sequences=2 ,batch_size=2 ,do_sample=UpperCamelCase__) self.assertEqual( UpperCamelCase__ ,[ [{'generated_text': ANY(UpperCamelCase__)}, {'generated_text': ANY(UpperCamelCase__)}], [{'generated_text': ANY(UpperCamelCase__)}, {'generated_text': ANY(UpperCamelCase__)}], ] ,) with self.assertRaises(UpperCamelCase__): __lowerCamelCase : Any = text_generator('test' ,return_full_text=UpperCamelCase__ ,return_text=UpperCamelCase__) with self.assertRaises(UpperCamelCase__): __lowerCamelCase : int = text_generator('test' ,return_full_text=UpperCamelCase__ ,return_tensors=UpperCamelCase__) with self.assertRaises(UpperCamelCase__): __lowerCamelCase : Optional[Any] = text_generator('test' ,return_text=UpperCamelCase__ ,return_tensors=UpperCamelCase__) # Empty prompt is slighly special # it requires BOS token to exist. # Special case for Pegasus which will always append EOS so will # work even without BOS. if ( text_generator.tokenizer.bos_token_id is not None or "Pegasus" in tokenizer.__class__.__name__ or "Git" in model.__class__.__name__ ): __lowerCamelCase : str = text_generator('') self.assertEqual(UpperCamelCase__ ,[{'generated_text': ANY(UpperCamelCase__)}]) else: with self.assertRaises((ValueError, AssertionError)): __lowerCamelCase : Optional[int] = text_generator('') if text_generator.framework == "tf": # TF generation does not support max_new_tokens, and it's impossible # to control long generation with only max_length without # fancy calculation, dismissing tests for now. return # We don't care about infinite range models. # They already work. # Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly. __lowerCamelCase : int = ['RwkvForCausalLM', 'XGLMForCausalLM', 'GPTNeoXForCausalLM'] if ( tokenizer.model_max_length < 1_0_0_0_0 and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS ): # Handling of large generations with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError)): text_generator('This is a test' * 5_0_0 ,max_new_tokens=2_0) __lowerCamelCase : int = text_generator('This is a test' * 5_0_0 ,handle_long_generation='hole' ,max_new_tokens=2_0) # Hole strategy cannot work with self.assertRaises(UpperCamelCase__): text_generator( 'This is a test' * 5_0_0 ,handle_long_generation='hole' ,max_new_tokens=tokenizer.model_max_length + 1_0 ,) @require_torch @require_accelerate @require_torch_gpu def lowerCAmelCase ( self : Any): import torch # Classic `model_kwargs` __lowerCamelCase : Dict = pipeline( model='hf-internal-testing/tiny-random-bloom' ,model_kwargs={'device_map': 'auto', 'torch_dtype': torch.bfloataa} ,) self.assertEqual(pipe.model.device ,torch.device(0)) self.assertEqual(pipe.model.lm_head.weight.dtype ,torch.bfloataa) __lowerCamelCase : int = pipe('This is a test') self.assertEqual( UpperCamelCase__ ,[ { 'generated_text': ( 'This is a test test test test test test test test test test test test test test test test' ' test' ) } ] ,) # Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.) __lowerCamelCase : List[str] = pipeline(model='hf-internal-testing/tiny-random-bloom' ,device_map='auto' ,torch_dtype=torch.bfloataa) self.assertEqual(pipe.model.device ,torch.device(0)) self.assertEqual(pipe.model.lm_head.weight.dtype ,torch.bfloataa) __lowerCamelCase : Any = pipe('This is a test') self.assertEqual( UpperCamelCase__ ,[ { 'generated_text': ( 'This is a test test test test test test test test test test test test test test test test' ' test' ) } ] ,) # torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602 __lowerCamelCase : Union[str, Any] = pipeline(model='hf-internal-testing/tiny-random-bloom' ,device_map='auto') self.assertEqual(pipe.model.device ,torch.device(0)) self.assertEqual(pipe.model.lm_head.weight.dtype ,torch.floataa) __lowerCamelCase : Tuple = pipe('This is a test') self.assertEqual( UpperCamelCase__ ,[ { 'generated_text': ( 'This is a test test test test test test test test test test test test test test test test' ' test' ) } ] ,) @require_torch @require_torch_gpu def lowerCAmelCase ( self : Any): import torch __lowerCamelCase : Any = pipeline(model='hf-internal-testing/tiny-random-bloom' ,device=0 ,torch_dtype=torch.floataa) pipe('This is a test') @require_torch @require_accelerate @require_torch_gpu def lowerCAmelCase ( self : int): import torch __lowerCamelCase : List[Any] = pipeline(model='hf-internal-testing/tiny-random-bloom' ,device_map='auto' ,torch_dtype=torch.floataa) pipe('This is a test' ,do_sample=UpperCamelCase__ ,top_p=0.5) def lowerCAmelCase ( self : Tuple): __lowerCamelCase : Union[str, Any] = 'Hello world' __lowerCamelCase : List[Any] = pipeline('text-generation' ,model='hf-internal-testing/tiny-random-gpt2') if text_generator.model.framework == "tf": __lowerCamelCase : Optional[int] = logging.get_logger('transformers.generation.tf_utils') else: __lowerCamelCase : Optional[Any] = logging.get_logger('transformers.generation.utils') __lowerCamelCase : List[Any] = 'Both `max_new_tokens`' # The beggining of the message to be checked in this test # Both are set by the user -> log warning with CaptureLogger(UpperCamelCase__) as cl: __lowerCamelCase : Dict = text_generator(UpperCamelCase__ ,max_length=1_0 ,max_new_tokens=1) self.assertIn(UpperCamelCase__ ,cl.out) # The user only sets one -> no warning with CaptureLogger(UpperCamelCase__) as cl: __lowerCamelCase : Tuple = text_generator(UpperCamelCase__ ,max_new_tokens=1) self.assertNotIn(UpperCamelCase__ ,cl.out) with CaptureLogger(UpperCamelCase__) as cl: __lowerCamelCase : Union[str, Any] = text_generator(UpperCamelCase__ ,max_length=1_0) self.assertNotIn(UpperCamelCase__ ,cl.out)
73
import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def __UpperCamelCase ( _A , _A ): assert isinstance(_A , _A ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCAmelCase_ = JsonDatasetReader(_A , cache_dir=_A , keep_in_memory=_A ).read() _check_json_dataset(_A , _A ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase_ = features.copy() if features else default_expected_features lowerCAmelCase_ = ( Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase_ = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read() _check_json_dataset(_A , _A ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}, ] , ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''} lowerCAmelCase_ = features.copy() if features else default_expected_features lowerCAmelCase_ = ( Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase_ = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read() assert isinstance(_A , _A ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def __UpperCamelCase ( _A , _A ): # jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"} lowerCAmelCase_ = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''} lowerCAmelCase_ = features.copy() lowerCAmelCase_ = ( Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read() assert isinstance(_A , _A ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase_ = JsonDatasetReader(_A , cache_dir=_A , split=_A ).read() _check_json_dataset(_A , _A ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''' , [str, list] ) def __UpperCamelCase ( _A , _A , _A ): if issubclass(_A , _A ): lowerCAmelCase_ = jsonl_path elif issubclass(_A , _A ): lowerCAmelCase_ = [jsonl_path] lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase_ = JsonDatasetReader(_A , cache_dir=_A ).read() _check_json_dataset(_A , _A ) def __UpperCamelCase ( _A , _A , _A=("train",) ): assert isinstance(_A , _A ) for split in splits: lowerCAmelCase_ = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCAmelCase_ = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=_A , keep_in_memory=_A ).read() _check_json_datasetdict(_A , _A ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase_ = features.copy() if features else default_expected_features lowerCAmelCase_ = ( Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase_ = JsonDatasetReader({'''train''': jsonl_path} , features=_A , cache_dir=_A ).read() _check_json_datasetdict(_A , _A ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def __UpperCamelCase ( _A , _A , _A ): if split: lowerCAmelCase_ = {split: jsonl_path} else: lowerCAmelCase_ = '''train''' lowerCAmelCase_ = {'''train''': jsonl_path, '''test''': jsonl_path} lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase_ = JsonDatasetReader(_A , cache_dir=_A ).read() _check_json_datasetdict(_A , _A , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def __UpperCamelCase ( _A ): return json.load(_A ) def __UpperCamelCase ( _A ): return [json.loads(_A ) for line in buffer] class A : @pytest.mark.parametrize('''lines, load_json_function''', [(True, load_json_lines), (False, load_json)] ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ): """simple docstring""" with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, lines=UpperCamelCase__ ).write() buffer.seek(0 ) lowerCAmelCase_ = load_json_function(UpperCamelCase__ ) assert isinstance(UpperCamelCase__, UpperCamelCase__ ) assert isinstance(exported_content[0], UpperCamelCase__ ) assert len(UpperCamelCase__ ) == 10 @pytest.mark.parametrize( '''orient, container, keys, len_at''', [ ('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None), ('''split''', dict, {'''columns''', '''data'''}, '''data'''), ('''index''', dict, set('''0123456789''' ), None), ('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''), ('''values''', list, None, None), ('''table''', dict, {'''schema''', '''data'''}, '''data'''), ], ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ): """simple docstring""" with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, lines=UpperCamelCase__, orient=UpperCamelCase__ ).write() buffer.seek(0 ) lowerCAmelCase_ = load_json(UpperCamelCase__ ) assert isinstance(UpperCamelCase__, UpperCamelCase__ ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(UpperCamelCase__, '''keys''' ) and not hasattr(exported_content[0], '''keys''' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(UpperCamelCase__ ) == 10 @pytest.mark.parametrize('''lines, load_json_function''', [(True, load_json_lines), (False, load_json)] ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ): """simple docstring""" with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, lines=UpperCamelCase__, num_proc=2 ).write() buffer.seek(0 ) lowerCAmelCase_ = load_json_function(UpperCamelCase__ ) assert isinstance(UpperCamelCase__, UpperCamelCase__ ) assert isinstance(exported_content[0], UpperCamelCase__ ) assert len(UpperCamelCase__ ) == 10 @pytest.mark.parametrize( '''orient, container, keys, len_at''', [ ('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None), ('''split''', dict, {'''columns''', '''data'''}, '''data'''), ('''index''', dict, set('''0123456789''' ), None), ('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''), ('''values''', list, None, None), ('''table''', dict, {'''schema''', '''data'''}, '''data'''), ], ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ): """simple docstring""" with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, lines=UpperCamelCase__, orient=UpperCamelCase__, num_proc=2 ).write() buffer.seek(0 ) lowerCAmelCase_ = load_json(UpperCamelCase__ ) assert isinstance(UpperCamelCase__, UpperCamelCase__ ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(UpperCamelCase__, '''keys''' ) and not hasattr(exported_content[0], '''keys''' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(UpperCamelCase__ ) == 10 def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ): """simple docstring""" with pytest.raises(UpperCamelCase__ ): with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, num_proc=0 ) @pytest.mark.parametrize('''compression, extension''', [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ): """simple docstring""" lowerCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / f"test.json.{extension}" lowerCAmelCase_ = str(shared_datadir / f"test_file.json.{extension}" ) JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, compression=UpperCamelCase__ ).write() with fsspec.open(UpperCamelCase__, '''rb''', compression='''infer''' ) as f: lowerCAmelCase_ = f.read() with fsspec.open(UpperCamelCase__, '''rb''', compression='''infer''' ) as f: lowerCAmelCase_ = f.read() assert exported_content == original_content
278
0
from collections import defaultdict class a : """simple docstring""" def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple: _A = total # total no of tasks (N) # DP table will have a dimension of (2^M)*N # initially all values are set to -1 _A = [ [-1 for i in range(total + 1 )] for j in range(2 ** len(UpperCamelCase__ ) ) ] _A = defaultdict(UpperCamelCase__ ) # stores the list of persons for each task # final_mask is used to check if all persons are included by setting all bits # to 1 _A = (1 << len(UpperCamelCase__ )) - 1 def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]: if mask == self.final_mask: return 1 # if not everyone gets the task and no more tasks are available, return 0 if task_no > self.total_tasks: return 0 # if case already considered if self.dp[mask][task_no] != -1: return self.dp[mask][task_no] # Number of ways when we don't this task in the arrangement _A = self.count_ways_until(UpperCamelCase__ , task_no + 1 ) # now assign the tasks one by one to all possible persons and recursively # assign for the remaining tasks. if task_no in self.task: for p in self.task[task_no]: # if p is already given a task if mask & (1 << p): continue # assign this task to p and change the mask value. And recursively # assign tasks with the new mask value. total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 ) # save the value. _A = total_ways_util return self.dp[mask][task_no] def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple: for i in range(len(UpperCamelCase__ ) ): for j in task_performed[i]: self.task[j].append(UpperCamelCase__ ) # call the function to fill the DP table, final answer is stored in dp[0][1] return self.count_ways_until(0 , 1 ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = 5 # total no of tasks (the value of N) # the list of tasks that can be done by M persons. _SCREAMING_SNAKE_CASE = [[1, 3, 4], [1, 2, 5], [3, 4]] print( AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways( task_performed ) )
180
import importlib import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Union import torch from ..utils import BaseOutput _A = '''scheduler_config.json''' class A ( __UpperCAmelCase ): __snake_case = 1 __snake_case = 2 __snake_case = 3 __snake_case = 4 __snake_case = 5 __snake_case = 6 __snake_case = 7 __snake_case = 8 __snake_case = 9 __snake_case = 10 __snake_case = 11 __snake_case = 12 __snake_case = 13 __snake_case = 14 @dataclass class A ( __UpperCAmelCase ): __snake_case = 42 class A : __snake_case = SCHEDULER_CONFIG_NAME __snake_case = [] __snake_case = True @classmethod def SCREAMING_SNAKE_CASE__ ( cls, UpperCamelCase__ = None, UpperCamelCase__ = None, UpperCamelCase__=False, **UpperCamelCase__, ): """simple docstring""" lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = cls.load_config( pretrained_model_name_or_path=UpperCamelCase__, subfolder=UpperCamelCase__, return_unused_kwargs=UpperCamelCase__, return_commit_hash=UpperCamelCase__, **UpperCamelCase__, ) return cls.from_config(UpperCamelCase__, return_unused_kwargs=UpperCamelCase__, **UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = False, **UpperCamelCase__ ): """simple docstring""" self.save_config(save_directory=UpperCamelCase__, push_to_hub=UpperCamelCase__, **UpperCamelCase__ ) @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return self._get_compatibles() @classmethod def SCREAMING_SNAKE_CASE__ ( cls ): """simple docstring""" lowerCAmelCase_ = list(set([cls.__name__] + cls._compatibles ) ) lowerCAmelCase_ = importlib.import_module(__name__.split('''.''' )[0] ) lowerCAmelCase_ = [ getattr(UpperCamelCase__, UpperCamelCase__ ) for c in compatible_classes_str if hasattr(UpperCamelCase__, UpperCamelCase__ ) ] return compatible_classes
278
0
lowercase__ :List[str] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/" def UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' # Make sure the supplied data is a bytes-like object if not isinstance(_A , _A ): lowercase = f'a bytes-like object is required, not \'{data.__class__.__name__}\'' raise TypeError(_A ) lowercase = ''''''.join(bin(_A )[2:].zfill(8 ) for byte in data ) lowercase = len(_A ) % 6 != 0 if padding_needed: # The padding that will be added later lowercase = B'''=''' * ((6 - len(_A ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(_A ) % 6) else: lowercase = B'''''' # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(_A ) , 6 ) ).encode() + padding ) def UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' # Make sure encoded_data is either a string or a bytes-like object if not isinstance(_A , _A ) and not isinstance(_A , _A ): lowercase = ( '''argument should be a bytes-like object or ASCII string, ''' f'not \'{encoded_data.__class__.__name__}\'' ) raise TypeError(_A ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(_A , _A ): try: lowercase = encoded_data.decode('''utf-8''' ) except UnicodeDecodeError: raise ValueError('''base64 encoded data should only contain ASCII characters''' ) lowercase = encoded_data.count('''=''' ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(_A ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one lowercase = encoded_data[:-padding] lowercase = ''''''.join( bin(B64_CHARSET.index(_A ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: lowercase = ''''''.join( bin(B64_CHARSET.index(_A ) )[2:].zfill(6 ) for char in encoded_data ) lowercase = [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(_A ) , 8 ) ] return bytes(_A ) if __name__ == "__main__": import doctest doctest.testmod()
101
import unittest import numpy as np import torch from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class A ( unittest.TestCase ): @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCAmelCase_ = UNetaDModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=3, out_channels=3, down_block_types=('''DownBlock2D''', '''AttnDownBlock2D'''), up_block_types=('''AttnUpBlock2D''', '''UpBlock2D'''), ) return model def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.dummy_uncond_unet lowerCAmelCase_ = KarrasVeScheduler() lowerCAmelCase_ = KarrasVePipeline(unet=UpperCamelCase__, scheduler=UpperCamelCase__ ) pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowerCAmelCase_ = torch.manual_seed(0 ) lowerCAmelCase_ = pipe(num_inference_steps=2, generator=UpperCamelCase__, output_type='''numpy''' ).images lowerCAmelCase_ = torch.manual_seed(0 ) lowerCAmelCase_ = pipe(num_inference_steps=2, generator=UpperCamelCase__, output_type='''numpy''', return_dict=UpperCamelCase__ )[0] lowerCAmelCase_ = image[0, -3:, -3:, -1] lowerCAmelCase_ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCAmelCase_ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class A ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = '''google/ncsnpp-celebahq-256''' lowerCAmelCase_ = UNetaDModel.from_pretrained(UpperCamelCase__ ) lowerCAmelCase_ = KarrasVeScheduler() lowerCAmelCase_ = KarrasVePipeline(unet=UpperCamelCase__, scheduler=UpperCamelCase__ ) pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowerCAmelCase_ = torch.manual_seed(0 ) lowerCAmelCase_ = pipe(num_inference_steps=20, generator=UpperCamelCase__, output_type='''numpy''' ).images lowerCAmelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) lowerCAmelCase_ = np.array([0.578, 0.5_811, 0.5_924, 0.5_809, 0.587, 0.5_886, 0.5_861, 0.5_802, 0.586] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
278
0
from __future__ import annotations SCREAMING_SNAKE_CASE_ = [] def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: '''simple docstring''' for i in range(len(_A ) ): if board[row][i] == 1: return False for i in range(len(_A ) ): if board[i][column] == 1: return False for i, j in zip(range(_A , -1 , -1 ) , range(_A , -1 , -1 ) ): if board[i][j] == 1: return False for i, j in zip(range(_A , -1 , -1 ) , range(_A , len(_A ) ) ): if board[i][j] == 1: return False return True def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: '''simple docstring''' if row >= len(_A ): solution.append(_A ) printboard(_A ) print() return True for i in range(len(_A ) ): if is_safe(_A , _A , _A ): SCREAMING_SNAKE_CASE = 1 solve(_A , row + 1 ) SCREAMING_SNAKE_CASE = 0 return False def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Any: '''simple docstring''' for i in range(len(_A ) ): for j in range(len(_A ) ): if board[i][j] == 1: print("""Q""" , end=""" """ ) else: print(""".""" , end=""" """ ) print() # n=int(input("The no. of queens")) SCREAMING_SNAKE_CASE_ = 8 SCREAMING_SNAKE_CASE_ = [[0 for i in range(n)] for j in range(n)] solve(board, 0) print("""The total no. of solutions are :""", len(solution))
296
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
278
0
"""simple docstring""" import argparse import hashlib # hashlib is only used inside the Test class import struct class _UpperCAmelCase : def __init__( self : Any , _lowercase : Dict ): __UpperCAmelCase = data __UpperCAmelCase = [0X6_7_4_5_2_3_0_1, 0XE_F_C_D_A_B_8_9, 0X9_8_B_A_D_C_F_E, 0X1_0_3_2_5_4_7_6, 0XC_3_D_2_E_1_F_0] @staticmethod def a ( _lowercase : Union[str, Any] , _lowercase : int ): return ((n << b) | (n >> (32 - b))) & 0XF_F_F_F_F_F_F_F def a ( self : List[Any] ): __UpperCAmelCase = b'''\x80''' + b'''\x00''' * (63 - (len(self.data ) + 8) % 64) __UpperCAmelCase = self.data + padding + struct.pack('''>Q''' , 8 * len(self.data ) ) return padded_data def a ( self : Dict ): return [ self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 ) ] def a ( self : str , _lowercase : List[str] ): __UpperCAmelCase = list(struct.unpack('''>16L''' , UpperCamelCase__ ) ) + [0] * 64 for i in range(16 , 80 ): __UpperCAmelCase = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 ) return w def a ( self : int ): __UpperCAmelCase = self.padding() __UpperCAmelCase = self.split_blocks() for block in self.blocks: __UpperCAmelCase = self.expand_block(UpperCamelCase__ ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = self.h for i in range(0 , 80 ): if 0 <= i < 20: __UpperCAmelCase = (b & c) | ((~b) & d) __UpperCAmelCase = 0X5_A_8_2_7_9_9_9 elif 20 <= i < 40: __UpperCAmelCase = b ^ c ^ d __UpperCAmelCase = 0X6_E_D_9_E_B_A_1 elif 40 <= i < 60: __UpperCAmelCase = (b & c) | (b & d) | (c & d) __UpperCAmelCase = 0X8_F_1_B_B_C_D_C elif 60 <= i < 80: __UpperCAmelCase = b ^ c ^ d __UpperCAmelCase = 0XC_A_6_2_C_1_D_6 __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = ( self.rotate(UpperCamelCase__ , 5 ) + f + e + k + expanded_block[i] & 0XF_F_F_F_F_F_F_F, a, self.rotate(UpperCamelCase__ , 30 ), c, d, ) __UpperCAmelCase = ( self.h[0] + a & 0XF_F_F_F_F_F_F_F, self.h[1] + b & 0XF_F_F_F_F_F_F_F, self.h[2] + c & 0XF_F_F_F_F_F_F_F, self.h[3] + d & 0XF_F_F_F_F_F_F_F, self.h[4] + e & 0XF_F_F_F_F_F_F_F, ) return ("{:08x}" * 5).format(*self.h ) def lowercase__ ( ): __UpperCAmelCase = b'''Test String''' assert SHAaHash(_A ).final_hash() == hashlib.shaa(_A ).hexdigest() # noqa: S324 def lowercase__ ( ): __UpperCAmelCase = argparse.ArgumentParser(description='''Process some strings or files''' ) parser.add_argument( '''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , ) parser.add_argument('''--file''' , dest='''input_file''' , help='''Hash contents of a file''' ) __UpperCAmelCase = parser.parse_args() __UpperCAmelCase = args.input_string # In any case hash input should be a bytestring if args.input_file: with open(args.input_file , '''rb''' ) as f: __UpperCAmelCase = f.read() else: __UpperCAmelCase = bytes(_A , '''utf-8''' ) print(SHAaHash(_A ).final_hash() ) if __name__ == "__main__": main() import doctest doctest.testmod()
332
import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def __UpperCamelCase ( _A = 3 ): if isinstance(_A , _A ): raise TypeError('''number of qubits must be a integer.''' ) if number_of_qubits <= 0: raise ValueError('''number of qubits must be > 0.''' ) if math.floor(_A ) != number_of_qubits: raise ValueError('''number of qubits must be exact integer.''' ) if number_of_qubits > 10: raise ValueError('''number of qubits too large to simulate(>10).''' ) lowerCAmelCase_ = QuantumRegister(_A , '''qr''' ) lowerCAmelCase_ = ClassicalRegister(_A , '''cr''' ) lowerCAmelCase_ = QuantumCircuit(_A , _A ) lowerCAmelCase_ = number_of_qubits for i in range(_A ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(_A ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , _A , _A ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(_A , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(_A , _A ) # simulate with 10000 shots lowerCAmelCase_ = Aer.get_backend('''qasm_simulator''' ) lowerCAmelCase_ = execute(_A , _A , shots=10000 ) return job.result().get_counts(_A ) if __name__ == "__main__": print( f"Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}" )
278
0
"""simple docstring""" import asyncio import os import re import sys import tempfile import unittest from contextlib import contextmanager from copy import deepcopy from distutils.util import strtobool from enum import Enum from importlib.util import find_spec from pathlib import Path from unittest.mock import patch import pyarrow as pa import pytest import requests from packaging import version from datasets import config if config.PY_VERSION < version.parse("""3.8"""): import importlib_metadata else: import importlib.metadata as importlib_metadata def UpperCamelCase_ ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : str=False ) -> Union[str, Any]: """simple docstring""" try: lowerCAmelCase_ : List[str] = os.environ[key] except KeyError: # KEY isn't set, default to `default`. lowerCAmelCase_ : Any = default else: # KEY is set, convert it to True or False. try: lowerCAmelCase_ : List[Any] = strtobool(_A ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(f"If set, {key} must be yes or no." ) return _value lowercase__ : Tuple = parse_flag_from_env("""RUN_SLOW""", default=False) lowercase__ : Union[str, Any] = parse_flag_from_env("""RUN_REMOTE""", default=False) lowercase__ : str = parse_flag_from_env("""RUN_LOCAL""", default=True) lowercase__ : List[str] = parse_flag_from_env("""RUN_PACKAGED""", default=True) # Compression lowercase__ : Optional[Any] = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="""test requires lz4""") lowercase__ : Any = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="""test requires py7zr""") lowercase__ : Union[str, Any] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="""test requires zstandard""") # Audio lowercase__ : List[Any] = pytest.mark.skipif( # On Windows and OS X, soundfile installs sndfile find_spec("""soundfile""") is None or version.parse(importlib_metadata.version("""soundfile""")) < version.parse("""0.12.0"""), reason="""test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; """, ) # Beam lowercase__ : Dict = pytest.mark.skipif( not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("""0.3.2"""), reason="""test requires apache-beam and a compatible dill version""", ) # Dill-cloudpickle compatibility lowercase__ : str = pytest.mark.skipif( config.DILL_VERSION <= version.parse("""0.3.2"""), reason="""test requires dill>0.3.2 for cloudpickle compatibility""", ) # Windows lowercase__ : Optional[Any] = pytest.mark.skipif( sys.platform == """win32""", reason="""test should not be run on Windows""", ) def UpperCamelCase_ ( lowerCAmelCase__ : Union[str, Any] ) -> Optional[int]: """simple docstring""" try: import faiss # noqa except ImportError: lowerCAmelCase_ : Optional[Any] = unittest.skip('test requires faiss' )(_A ) return test_case def UpperCamelCase_ ( lowerCAmelCase__ : str ) -> str: """simple docstring""" try: import regex # noqa except ImportError: lowerCAmelCase_ : Optional[int] = unittest.skip('test requires regex' )(_A ) return test_case def UpperCamelCase_ ( lowerCAmelCase__ : str ) -> Tuple: """simple docstring""" try: import elasticsearch # noqa except ImportError: lowerCAmelCase_ : List[str] = unittest.skip('test requires elasticsearch' )(_A ) return test_case def UpperCamelCase_ ( lowerCAmelCase__ : List[Any] ) -> Optional[int]: """simple docstring""" try: import sqlalchemy # noqa except ImportError: lowerCAmelCase_ : str = unittest.skip('test requires sqlalchemy' )(_A ) return test_case def UpperCamelCase_ ( lowerCAmelCase__ : Optional[int] ) -> int: """simple docstring""" if not config.TORCH_AVAILABLE: lowerCAmelCase_ : Tuple = unittest.skip('test requires PyTorch' )(_A ) return test_case def UpperCamelCase_ ( lowerCAmelCase__ : str ) -> List[Any]: """simple docstring""" if not config.TF_AVAILABLE: lowerCAmelCase_ : Optional[Any] = unittest.skip('test requires TensorFlow' )(_A ) return test_case def UpperCamelCase_ ( lowerCAmelCase__ : Dict ) -> Any: """simple docstring""" if not config.JAX_AVAILABLE: lowerCAmelCase_ : int = unittest.skip('test requires JAX' )(_A ) return test_case def UpperCamelCase_ ( lowerCAmelCase__ : str ) -> List[Any]: """simple docstring""" if not config.PIL_AVAILABLE: lowerCAmelCase_ : str = unittest.skip('test requires Pillow' )(_A ) return test_case def UpperCamelCase_ ( lowerCAmelCase__ : List[Any] ) -> List[str]: """simple docstring""" try: import transformers # noqa F401 except ImportError: return unittest.skip('test requires transformers' )(_A ) else: return test_case def UpperCamelCase_ ( lowerCAmelCase__ : Tuple ) -> List[Any]: """simple docstring""" try: import tiktoken # noqa F401 except ImportError: return unittest.skip('test requires tiktoken' )(_A ) else: return test_case def UpperCamelCase_ ( lowerCAmelCase__ : Optional[int] ) -> List[str]: """simple docstring""" try: import spacy # noqa F401 except ImportError: return unittest.skip('test requires spacy' )(_A ) else: return test_case def UpperCamelCase_ ( lowerCAmelCase__ : int ) -> Any: """simple docstring""" def _require_spacy_model(lowerCAmelCase__ : Union[str, Any] ): try: import spacy # noqa F401 spacy.load(_A ) except ImportError: return unittest.skip('test requires spacy' )(_A ) except OSError: return unittest.skip('test requires spacy model \'{}\''.format(_A ) )(_A ) else: return test_case return _require_spacy_model def UpperCamelCase_ ( lowerCAmelCase__ : Dict ) -> Any: """simple docstring""" try: import pyspark # noqa F401 except ImportError: return unittest.skip('test requires pyspark' )(_A ) else: return test_case def UpperCamelCase_ ( lowerCAmelCase__ : Union[str, Any] ) -> Any: """simple docstring""" try: import joblibspark # noqa F401 except ImportError: return unittest.skip('test requires joblibspark' )(_A ) else: return test_case def UpperCamelCase_ ( lowerCAmelCase__ : List[Any] ) -> Union[str, Any]: """simple docstring""" if not _run_slow_tests or _run_slow_tests == 0: lowerCAmelCase_ : List[Any] = unittest.skip('test is slow' )(_A ) return test_case def UpperCamelCase_ ( lowerCAmelCase__ : Tuple ) -> Tuple: """simple docstring""" if not _run_local_tests or _run_local_tests == 0: lowerCAmelCase_ : Optional[Any] = unittest.skip('test is local' )(_A ) return test_case def UpperCamelCase_ ( lowerCAmelCase__ : List[str] ) -> List[Any]: """simple docstring""" if not _run_packaged_tests or _run_packaged_tests == 0: lowerCAmelCase_ : str = unittest.skip('test is packaged' )(_A ) return test_case def UpperCamelCase_ ( lowerCAmelCase__ : Tuple ) -> Optional[Any]: """simple docstring""" if not _run_remote_tests or _run_remote_tests == 0: lowerCAmelCase_ : Dict = unittest.skip('test requires remote' )(_A ) return test_case def UpperCamelCase_ ( *lowerCAmelCase__ : str ) -> List[Any]: """simple docstring""" def decorate(cls : Any ): for name, fn in cls.__dict__.items(): if callable(_A ) and name.startswith('test' ): for decorator in decorators: lowerCAmelCase_ : Tuple = decorator(_A ) setattr(cls , _A , _A ) return cls return decorate class UpperCamelCase__ ( __UpperCAmelCase ): """simple docstring""" pass class UpperCamelCase__ ( __UpperCAmelCase ): """simple docstring""" _SCREAMING_SNAKE_CASE = 0 _SCREAMING_SNAKE_CASE = 1 _SCREAMING_SNAKE_CASE = 2 @contextmanager def UpperCamelCase_ ( lowerCAmelCase__ : Optional[Any]=OfflineSimulationMode.CONNECTION_FAILS , lowerCAmelCase__ : List[str]=1e-16 ) -> Dict: """simple docstring""" lowerCAmelCase_ : Optional[int] = requests.Session().request def timeout_request(lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[Any] , **lowerCAmelCase__ : List[str] ): # Change the url to an invalid url so that the connection hangs lowerCAmelCase_ : Optional[int] = 'https://10.255.255.1' if kwargs.get('timeout' ) is None: raise RequestWouldHangIndefinitelyError( f"Tried a call to {url} in offline mode with no timeout set. Please set a timeout." ) lowerCAmelCase_ : List[str] = timeout try: return online_request(_A , _A , **_A ) except Exception as e: # The following changes in the error are just here to make the offline timeout error prettier lowerCAmelCase_ : Tuple = url lowerCAmelCase_ : Union[str, Any] = e.args[0] lowerCAmelCase_ : Optional[int] = (max_retry_error.args[0].replace('10.255.255.1' , f"OfflineMock[{url}]" ),) lowerCAmelCase_ : Dict = (max_retry_error,) raise def raise_connection_error(lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[Any] , **lowerCAmelCase__ : Optional[Any] ): raise requests.ConnectionError('Offline mode is enabled.' , request=_A ) if mode is OfflineSimulationMode.CONNECTION_FAILS: with patch('requests.Session.send' , _A ): yield elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT: # inspired from https://stackoverflow.com/a/904609 with patch('requests.Session.request' , _A ): yield elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1: with patch('datasets.config.HF_DATASETS_OFFLINE' , _A ): yield else: raise ValueError('Please use a value from the OfflineSimulationMode enum.' ) @contextmanager def UpperCamelCase_ ( *lowerCAmelCase__ : Union[str, Any] , **lowerCAmelCase__ : Optional[int] ) -> List[str]: """simple docstring""" lowerCAmelCase_ : Optional[Any] = str(Path().resolve() ) with tempfile.TemporaryDirectory(*_A , **_A ) as tmp_dir: try: os.chdir(_A ) yield finally: os.chdir(_A ) @contextmanager def UpperCamelCase_ ( ) -> Tuple: """simple docstring""" import gc gc.collect() lowerCAmelCase_ : Optional[int] = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase." @contextmanager def UpperCamelCase_ ( ) -> Dict: """simple docstring""" import gc gc.collect() lowerCAmelCase_ : Union[str, Any] = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase." def UpperCamelCase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : Dict ) -> Dict: """simple docstring""" return deepcopy(_A ).integers(0 , 100 , 10 ).tolist() == deepcopy(_A ).integers(0 , 100 , 10 ).tolist() def UpperCamelCase_ ( lowerCAmelCase__ : Tuple ) -> Any: """simple docstring""" import decorator from requests.exceptions import HTTPError def _wrapper(lowerCAmelCase__ : Optional[int] , *lowerCAmelCase__ : Optional[Any] , **lowerCAmelCase__ : str ): try: return func(*_A , **_A ) except HTTPError as err: if str(_A ).startswith('500' ) or str(_A ).startswith('502' ): pytest.xfail(str(_A ) ) raise err return decorator.decorator(_wrapper , _A ) class UpperCamelCase__ : """simple docstring""" def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ): lowerCAmelCase_ : Dict = returncode lowerCAmelCase_ : Tuple = stdout lowerCAmelCase_ : str = stderr async def UpperCamelCase_ ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : int ) -> Tuple: """simple docstring""" while True: lowerCAmelCase_ : Any = await stream.readline() if line: callback(_A ) else: break async def UpperCamelCase_ ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : Dict=None , lowerCAmelCase__ : List[str]=False , lowerCAmelCase__ : str=False ) -> str: """simple docstring""" if echo: print('\nRunning: ' , ' '.join(_A ) ) lowerCAmelCase_ : Dict = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=_A , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_A , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) lowerCAmelCase_ : Optional[Any] = [] lowerCAmelCase_ : Optional[Any] = [] def tee(lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str]="" ): lowerCAmelCase_ : Union[str, Any] = line.decode('utf-8' ).rstrip() sink.append(_A ) if not quiet: print(_A , _A , file=_A ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ _read_stream(p.stdout , lambda lowerCAmelCase__ : tee(_A , _A , sys.stdout , label='stdout:' ) ), _read_stream(p.stderr , lambda lowerCAmelCase__ : tee(_A , _A , sys.stderr , label='stderr:' ) ), ] , timeout=_A , ) return _RunOutput(await p.wait() , _A , _A ) def UpperCamelCase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int]=None , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : List[str]=180 , lowerCAmelCase__ : Tuple=False , lowerCAmelCase__ : Optional[int]=True ) -> List[str]: """simple docstring""" lowerCAmelCase_ : List[str] = asyncio.get_event_loop() lowerCAmelCase_ : List[str] = loop.run_until_complete( _stream_subprocess(_A , env=_A , stdin=_A , timeout=_A , quiet=_A , echo=_A ) ) lowerCAmelCase_ : Tuple = ' '.join(_A ) if result.returncode > 0: lowerCAmelCase_ : Any = '\n'.join(result.stderr ) raise RuntimeError( f"'{cmd_str}' failed with returncode {result.returncode}\n\n" f"The combined stderr from workers follows:\n{stderr}" ) # check that the subprocess actually did run and produced some output, should the test rely on # the remote side to do the testing if not result.stdout and not result.stderr: raise RuntimeError(f"'{cmd_str}' produced no output." ) return result def UpperCamelCase_ ( ) -> List[Any]: """simple docstring""" lowerCAmelCase_ : int = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' ) lowerCAmelCase_ : str = re.sub(R'^gw' , '' , _A , 0 , re.M ) return int(_A ) def UpperCamelCase_ ( ) -> int: """simple docstring""" lowerCAmelCase_ : Union[str, Any] = 2_9500 lowerCAmelCase_ : List[str] = pytest_xdist_worker_id() return port + uniq_delta
224
from functools import lru_cache @lru_cache def __UpperCamelCase ( _A ): if num < 0: raise ValueError('''Number should not be negative.''' ) return 1 if num in (0, 1) else num * factorial(num - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
278
0
'''simple docstring''' from ..utils import DummyObject, requires_backends class lowerCAmelCase_ ( metaclass=__UpperCAmelCase ): '''simple docstring''' UpperCamelCase_ : int = ["""speech"""] def __init__( self : Dict , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : Dict ) -> int: '''simple docstring''' requires_backends(self , ['''speech'''] ) class lowerCAmelCase_ ( metaclass=__UpperCAmelCase ): '''simple docstring''' UpperCamelCase_ : Union[str, Any] = ["""speech"""] def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Optional[int]: '''simple docstring''' requires_backends(self , ['''speech'''] )
319
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation def __UpperCamelCase ( _A ): lowerCAmelCase_ = 384 lowerCAmelCase_ = 7 if "tiny" in model_name: lowerCAmelCase_ = 96 lowerCAmelCase_ = (2, 2, 6, 2) lowerCAmelCase_ = (3, 6, 12, 24) elif "small" in model_name: lowerCAmelCase_ = 96 lowerCAmelCase_ = (2, 2, 18, 2) lowerCAmelCase_ = (3, 6, 12, 24) elif "base" in model_name: lowerCAmelCase_ = 128 lowerCAmelCase_ = (2, 2, 18, 2) lowerCAmelCase_ = (4, 8, 16, 32) lowerCAmelCase_ = 12 lowerCAmelCase_ = 512 elif "large" in model_name: lowerCAmelCase_ = 192 lowerCAmelCase_ = (2, 2, 18, 2) lowerCAmelCase_ = (6, 12, 24, 48) lowerCAmelCase_ = 12 lowerCAmelCase_ = 768 # set label information lowerCAmelCase_ = 150 lowerCAmelCase_ = '''huggingface/label-files''' lowerCAmelCase_ = '''ade20k-id2label.json''' lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) ) lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()} lowerCAmelCase_ = {v: k for k, v in idalabel.items()} lowerCAmelCase_ = SwinConfig( embed_dim=_A , depths=_A , num_heads=_A , window_size=_A , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , ) lowerCAmelCase_ = UperNetConfig( backbone_config=_A , auxiliary_in_channels=_A , num_labels=_A , idalabel=_A , labelaid=_A , ) return config def __UpperCamelCase ( _A ): lowerCAmelCase_ = [] # fmt: off # stem rename_keys.append(('''backbone.patch_embed.projection.weight''', '''backbone.embeddings.patch_embeddings.projection.weight''') ) rename_keys.append(('''backbone.patch_embed.projection.bias''', '''backbone.embeddings.patch_embeddings.projection.bias''') ) rename_keys.append(('''backbone.patch_embed.norm.weight''', '''backbone.embeddings.norm.weight''') ) rename_keys.append(('''backbone.patch_embed.norm.bias''', '''backbone.embeddings.norm.bias''') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm1.weight", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm1.bias", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight", f"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias", f"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm2.weight", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm2.bias", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight", f"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias", f"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight", f"backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias", f"backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias") ) if i < 3: rename_keys.append((f"backbone.stages.{i}.downsample.reduction.weight", f"backbone.encoder.layers.{i}.downsample.reduction.weight") ) rename_keys.append((f"backbone.stages.{i}.downsample.norm.weight", f"backbone.encoder.layers.{i}.downsample.norm.weight") ) rename_keys.append((f"backbone.stages.{i}.downsample.norm.bias", f"backbone.encoder.layers.{i}.downsample.norm.bias") ) rename_keys.append((f"backbone.norm{i}.weight", f"backbone.hidden_states_norms.stage{i+1}.weight") ) rename_keys.append((f"backbone.norm{i}.bias", f"backbone.hidden_states_norms.stage{i+1}.bias") ) # decode head rename_keys.extend( [ ('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''), ('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''), ('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''), ('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''), ] ) # fmt: on return rename_keys def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = dct.pop(_A ) lowerCAmelCase_ = val def __UpperCamelCase ( _A , _A ): lowerCAmelCase_ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): lowerCAmelCase_ = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) lowerCAmelCase_ = state_dict.pop(f"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight" ) lowerCAmelCase_ = state_dict.pop(f"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict lowerCAmelCase_ = in_proj_weight[:dim, :] lowerCAmelCase_ = in_proj_bias[: dim] lowerCAmelCase_ = in_proj_weight[ dim : dim * 2, : ] lowerCAmelCase_ = in_proj_bias[ dim : dim * 2 ] lowerCAmelCase_ = in_proj_weight[ -dim :, : ] lowerCAmelCase_ = in_proj_bias[-dim :] # fmt: on def __UpperCamelCase ( _A ): lowerCAmelCase_ , lowerCAmelCase_ = x.shape lowerCAmelCase_ = x.reshape(_A , 4 , in_channel // 4 ) lowerCAmelCase_ = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(_A , _A ) return x def __UpperCamelCase ( _A ): lowerCAmelCase_ , lowerCAmelCase_ = x.shape lowerCAmelCase_ = x.reshape(_A , in_channel // 4 , 4 ) lowerCAmelCase_ = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(_A , _A ) return x def __UpperCamelCase ( _A ): lowerCAmelCase_ = x.shape[0] lowerCAmelCase_ = x.reshape(4 , in_channel // 4 ) lowerCAmelCase_ = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(_A ) return x def __UpperCamelCase ( _A ): lowerCAmelCase_ = x.shape[0] lowerCAmelCase_ = x.reshape(in_channel // 4 , 4 ) lowerCAmelCase_ = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(_A ) return x def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = { '''upernet-swin-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth''', '''upernet-swin-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth''', '''upernet-swin-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth''', '''upernet-swin-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth''', } lowerCAmelCase_ = model_name_to_url[model_name] lowerCAmelCase_ = torch.hub.load_state_dict_from_url(_A , map_location='''cpu''' , file_name=_A )[ '''state_dict''' ] for name, param in state_dict.items(): print(_A , param.shape ) lowerCAmelCase_ = get_upernet_config(_A ) lowerCAmelCase_ = UperNetForSemanticSegmentation(_A ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): lowerCAmelCase_ = state_dict.pop(_A ) if "bn" in key: lowerCAmelCase_ = key.replace('''bn''' , '''batch_norm''' ) lowerCAmelCase_ = val # rename keys lowerCAmelCase_ = create_rename_keys(_A ) for src, dest in rename_keys: rename_key(_A , _A , _A ) read_in_q_k_v(_A , config.backbone_config ) # fix downsample parameters for key, value in state_dict.items(): if "downsample" in key: if "reduction" in key: lowerCAmelCase_ = reverse_correct_unfold_reduction_order(_A ) if "norm" in key: lowerCAmelCase_ = reverse_correct_unfold_norm_order(_A ) model.load_state_dict(_A ) # verify on image lowerCAmelCase_ = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg''' lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw ).convert('''RGB''' ) lowerCAmelCase_ = SegformerImageProcessor() lowerCAmelCase_ = processor(_A , return_tensors='''pt''' ).pixel_values with torch.no_grad(): lowerCAmelCase_ = model(_A ) lowerCAmelCase_ = outputs.logits print(logits.shape ) print('''First values of logits:''' , logits[0, 0, :3, :3] ) # assert values if model_name == "upernet-swin-tiny": lowerCAmelCase_ = torch.tensor( [[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] ) elif model_name == "upernet-swin-small": lowerCAmelCase_ = torch.tensor( [[-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.0_9_0_8, -7.0_9_0_8, -6.8_5_3_4]] ) elif model_name == "upernet-swin-base": lowerCAmelCase_ = torch.tensor( [[-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.4_7_6_3, -6.4_7_6_3, -6.3_2_5_4]] ) elif model_name == "upernet-swin-large": lowerCAmelCase_ = torch.tensor( [[-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.4_0_4_4, -7.4_0_4_4, -7.2_5_8_6]] ) print('''Logits:''' , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , _A , atol=1E-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(f"Saving model {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_A ) print(f"Saving processor to {pytorch_dump_folder_path}" ) processor.save_pretrained(_A ) if push_to_hub: print(f"Pushing model and processor for {model_name} to hub" ) model.push_to_hub(f"openmmlab/{model_name}" ) processor.push_to_hub(f"openmmlab/{model_name}" ) if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''upernet-swin-tiny''', type=str, choices=[f"upernet-swin-{size}" for size in ['''tiny''', '''small''', '''base''', '''large''']], help='''Name of the Swin + UperNet model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) _A = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
278
0
"""simple docstring""" __A = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n" __A = [{"type": "code", "content": INSTALL_CONTENT}] __A = { "{processor_class}": "FakeProcessorClass", "{model_class}": "FakeModelClass", "{object_class}": "FakeObjectClass", }
177
import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def __UpperCamelCase ( _A , _A ): lowerCAmelCase_ = args.log_outputs lowerCAmelCase_ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] ) # load metric lowerCAmelCase_ = load_metric('''wer''' ) lowerCAmelCase_ = load_metric('''cer''' ) # compute metrics lowerCAmelCase_ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] ) lowerCAmelCase_ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] ) # print & log results lowerCAmelCase_ = f"WER: {wer_result}\nCER: {cer_result}" print(_A ) with open(f"{dataset_id}_eval_results.txt" , '''w''' ) as f: f.write(_A ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: lowerCAmelCase_ = f"log_{dataset_id}_predictions.txt" lowerCAmelCase_ = f"log_{dataset_id}_targets.txt" with open(_A , '''w''' ) as p, open(_A , '''w''' ) as t: # mapping function to write output def write_to_file(_A , _A ): p.write(f"{i}" + '''\n''' ) p.write(batch['''prediction'''] + '''\n''' ) t.write(f"{i}" + '''\n''' ) t.write(batch['''target'''] + '''\n''' ) result.map(_A , with_indices=_A ) def __UpperCamelCase ( _A ): lowerCAmelCase_ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training lowerCAmelCase_ = re.sub(_A , '''''' , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! lowerCAmelCase_ = ['''\n\n''', '''\n''', ''' ''', ''' '''] for t in token_sequences_to_ignore: lowerCAmelCase_ = ''' '''.join(text.split(_A ) ) return text def __UpperCamelCase ( _A ): # load dataset lowerCAmelCase_ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=_A ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor lowerCAmelCase_ = AutoFeatureExtractor.from_pretrained(args.model_id ) lowerCAmelCase_ = feature_extractor.sampling_rate # resample audio lowerCAmelCase_ = dataset.cast_column('''audio''' , Audio(sampling_rate=_A ) ) # load eval pipeline if args.device is None: lowerCAmelCase_ = 0 if torch.cuda.is_available() else -1 lowerCAmelCase_ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(_A ): lowerCAmelCase_ = asr( batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) lowerCAmelCase_ = prediction['''text'''] lowerCAmelCase_ = normalize_text(batch['''sentence'''] ) return batch # run inference on all examples lowerCAmelCase_ = dataset.map(_A , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(_A , _A ) if __name__ == "__main__": _A = argparse.ArgumentParser() parser.add_argument( '''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers''' ) parser.add_argument( '''--dataset''', type=str, required=True, help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''', ) parser.add_argument( '''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice''' ) parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''') parser.add_argument( '''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.''' ) parser.add_argument( '''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.''' ) parser.add_argument( '''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.''' ) parser.add_argument( '''--device''', type=int, default=None, help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''', ) _A = parser.parse_args() main(args)
278
0
import unittest from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin __lowerCamelCase : Dict = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece @require_tokenizers class __snake_case ( __UpperCAmelCase , unittest.TestCase ): lowerCAmelCase_ = XLNetTokenizer lowerCAmelCase_ = XLNetTokenizerFast lowerCAmelCase_ = True lowerCAmelCase_ = True def __a ( self : List[str] ): """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing SCREAMING_SNAKE_CASE__ = XLNetTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ ) tokenizer.sanitize_special_tokens() tokenizer.save_pretrained(self.tmpdirname ) def __a ( self : Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE__ = """<s>""" SCREAMING_SNAKE_CASE__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ ) def __a ( self : int ): """simple docstring""" SCREAMING_SNAKE_CASE__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<unk>""" ) self.assertEqual(vocab_keys[1] , """<s>""" ) self.assertEqual(vocab_keys[-1] , """<eod>""" ) self.assertEqual(len(UpperCamelCase__ ) , 10_06 ) def __a ( self : Tuple ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 10_00 ) def __a ( self : Dict ): """simple docstring""" SCREAMING_SNAKE_CASE__ = XLNetTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(UpperCamelCase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [2_85, 46, 10, 1_70, 3_82] ) SCREAMING_SNAKE_CASE__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( UpperCamelCase__ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) SCREAMING_SNAKE_CASE__ = tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] ) SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(UpperCamelCase__ ) self.assertListEqual( UpperCamelCase__ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) def __a ( self : Union[str, Any] ): """simple docstring""" SCREAMING_SNAKE_CASE__ = XLNetTokenizer(UpperCamelCase__ , do_lower_case=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( UpperCamelCase__ , [ SPIECE_UNDERLINE + """""", """i""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """se""", """.""", ] , ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""▁he""", """ll""", """o"""] ) def __a ( self : Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE__ = XLNetTokenizer(UpperCamelCase__ , do_lower_case=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( UpperCamelCase__ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """se""", """.""", ] , ) @slow def __a ( self : Tuple ): """simple docstring""" SCREAMING_SNAKE_CASE__ = XLNetTokenizer.from_pretrained("""xlnet-base-cased""" ) SCREAMING_SNAKE_CASE__ = tokenizer.encode("""sequence builders""" , add_special_tokens=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = tokenizer.encode("""multi-sequence build""" , add_special_tokens=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__ ) assert encoded_sentence == text + [4, 3] assert encoded_pair == text + [4] + text_a + [4, 3] @slow def __a ( self : Dict ): """simple docstring""" SCREAMING_SNAKE_CASE__ = {"""input_ids""": [[17, 2_14_42, 2_70, 17, 10, 1_46_45, 3_18, 34, 17, 45_46, 31_45, 7_87, 13, 77_52, 2_20_18, 23, 21, 17, 45_46, 31_45, 7_87, 13, 33_52, 1_44_31, 13, 55_00, 11, 11_76, 5_80, 13, 1_68_19, 47_97, 23, 17, 10, 1_71_35, 6_58, 19, 4_57, 79_32, 13, 1_84, 19, 31_54, 1_71_35, 64_68, 19, 14_04, 1_22_69, 19, 42_29, 53_56, 1_62_64, 46, 19, 17, 2_05_45, 1_03_95, 9, 9, 9, 11, 28, 64_21, 95_31, 2_07_29, 17, 10, 3_53, 1_70_22, 11, 21, 64_21, 95_31, 1_69_49, 17, 10, 1_15_09, 7_53, 11, 33, 95, 24_21, 73_85, 9_56, 1_44_31, 26_26, 25, 8_42, 73_85, 48_36, 21, 14_29, 22_72, 98_55, 31_20, 1_61, 2_47_38, 19, 1_32_03, 6_58, 2_18, 7_87, 21, 4_30, 1_84_82, 8_47, 26_37, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_22, 2_21_78, 27, 10_64, 22, 9_56, 13, 1_11_01, 14_29, 58_54, 2_43_13, 1_89_53, 40, 4_22, 2_43_66, 68, 17_58, 37, 1_04_83, 1_42_57, 31, 2_07, 2_63, 21, 2_03, 37_73, 25, 71, 97_35, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 20_49, 34_42, 17, 1_38_94, 33_80, 23, 95, 18, 1_76_34, 22_88, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCamelCase__ , model_name="""xlnet-base-cased""" , revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" , )
219
from typing import Dict, List, Optional from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _A = logging.get_logger(__name__) _A = { '''nielsr/canine-s''': 2_048, } # Unicode defines 1,114,112 total “codepoints” _A = 1_114_112 # Below: Constants defining canonical codepoints for special, pseudo-characters. # Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py _A = 0 _A = 0xe0_00 _A = 0xe0_01 _A = 0xe0_02 _A = 0xe0_03 _A = 0xe0_04 # Maps special codepoints to human-readable names. _A = { # Special symbols are represented using codepoints values that are valid, # but designated as "Private Use", meaning that they will never be assigned # characters by the Unicode Consortium, and are thus safe for use here. # # NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly # excluded and should fail with a hard error. CLS: "[CLS]", SEP: "[SEP]", BOS: "[BOS]", MASK: "[MASK]", PAD: "[PAD]", RESERVED: "[RESERVED]", } # Maps special codepoint human-readable names to their codepoint values. _A = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()} class A ( __UpperCAmelCase ): __snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self, UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=False, UpperCamelCase__=2048, **UpperCamelCase__, ): """simple docstring""" lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else bos_token lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else eos_token lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else sep_token lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else cls_token lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else mask_token super().__init__( bos_token=UpperCamelCase__, eos_token=UpperCamelCase__, sep_token=UpperCamelCase__, cls_token=UpperCamelCase__, pad_token=UpperCamelCase__, mask_token=UpperCamelCase__, add_prefix_space=UpperCamelCase__, model_max_length=UpperCamelCase__, **UpperCamelCase__, ) # Creates a mapping for looking up the IDs of special symbols. lowerCAmelCase_ = {} for codepoint, name in SPECIAL_CODEPOINTS.items(): lowerCAmelCase_ = codepoint # Creates a mapping for looking up the string forms of special symbol IDs. lowerCAmelCase_ = { codepoint: name for name, codepoint in self._special_codepoints.items() } lowerCAmelCase_ = UNICODE_VOCAB_SIZE lowerCAmelCase_ = len(self._special_codepoints ) @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return self._unicode_vocab_size def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ): """simple docstring""" return list(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ): """simple docstring""" try: return ord(UpperCamelCase__ ) except TypeError: raise ValueError(f"invalid token: '{token}'" ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ): """simple docstring""" try: if index in SPECIAL_CODEPOINTS: return SPECIAL_CODEPOINTS[index] return chr(UpperCamelCase__ ) except TypeError: raise ValueError(f"invalid id: {index}" ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ): """simple docstring""" return "".join(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ): """simple docstring""" lowerCAmelCase_ = [self.sep_token_id] lowerCAmelCase_ = [self.cls_token_id] lowerCAmelCase_ = cls + token_ids_a + sep if token_ids_a is not None: result += token_ids_a + sep return result def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None, UpperCamelCase__ = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase__, token_ids_a=UpperCamelCase__, already_has_special_tokens=UpperCamelCase__ ) lowerCAmelCase_ = [1] + ([0] * len(UpperCamelCase__ )) + [1] if token_ids_a is not None: result += ([0] * len(UpperCamelCase__ )) + [1] return result def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ): """simple docstring""" lowerCAmelCase_ = [self.sep_token_id] lowerCAmelCase_ = [self.cls_token_id] lowerCAmelCase_ = len(cls + token_ids_a + sep ) * [0] if token_ids_a is not None: result += len(token_ids_a + sep ) * [1] return result def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ): """simple docstring""" return ()
278
0
'''simple docstring''' import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging __snake_case =logging.get_logger(__name__) __snake_case ={"""vocab_file""": """spiece.model"""} __snake_case ={ """vocab_file""": { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""", } } __snake_case ={ """xlnet-base-cased""": None, """xlnet-large-cased""": None, } # Segments (not really needed) __snake_case =0 __snake_case =1 __snake_case =2 __snake_case =3 __snake_case =4 class UpperCAmelCase_ ( __UpperCAmelCase ): lowerCamelCase : List[Any] = VOCAB_FILES_NAMES lowerCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase : Optional[Any] = '''left''' def __init__( self : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any]=False , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Optional[int]=False , UpperCAmelCase__ : List[Any]="<s>" , UpperCAmelCase__ : Any="</s>" , UpperCAmelCase__ : Tuple="<unk>" , UpperCAmelCase__ : int="<sep>" , UpperCAmelCase__ : Optional[Any]="<pad>" , UpperCAmelCase__ : Dict="<cls>" , UpperCAmelCase__ : Dict="<mask>" , UpperCAmelCase__ : int=["<eop>", "<eod>"] , UpperCAmelCase__ : str = None , **UpperCAmelCase__ : Optional[int] , ) -> Union[str, Any]: lowerCAmelCase = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , ) lowerCAmelCase = 3 lowerCAmelCase = do_lower_case lowerCAmelCase = remove_space lowerCAmelCase = keep_accents lowerCAmelCase = vocab_file lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(UpperCamelCase__ ) @property def __UpperCAmelCase ( self : int ) -> Tuple: return len(self.sp_model ) def __UpperCAmelCase ( self : Tuple ) -> Tuple: lowerCAmelCase = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Tuple ) -> Optional[int]: lowerCAmelCase = self.__dict__.copy() lowerCAmelCase = None return state def __setstate__( self : str , UpperCAmelCase__ : Union[str, Any] ) -> List[Any]: lowerCAmelCase = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): lowerCAmelCase = {} lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : Tuple ) -> Optional[Any]: if self.remove_space: lowerCAmelCase = ' '.join(inputs.strip().split() ) else: lowerCAmelCase = inputs lowerCAmelCase = outputs.replace('``' , '"' ).replace('\'\'' , '"' ) if not self.keep_accents: lowerCAmelCase = unicodedata.normalize('NFKD' , UpperCamelCase__ ) lowerCAmelCase = ''.join([c for c in outputs if not unicodedata.combining(UpperCamelCase__ )] ) if self.do_lower_case: lowerCAmelCase = outputs.lower() return outputs def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : Dict ) -> Dict: lowerCAmelCase = self.preprocess_text(UpperCamelCase__ ) lowerCAmelCase = self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ ) lowerCAmelCase = [] for piece in pieces: if len(UpperCamelCase__ ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit(): lowerCAmelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCamelCase__ , '' ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: lowerCAmelCase = cur_pieces[1:] else: lowerCAmelCase = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(UpperCamelCase__ ) else: new_pieces.append(UpperCamelCase__ ) return new_pieces def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Union[str, Any] ) -> Any: return self.sp_model.PieceToId(UpperCamelCase__ ) def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : int ) -> Any: return self.sp_model.IdToPiece(UpperCamelCase__ ) def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : str ) -> int: lowerCAmelCase = ''.join(UpperCamelCase__ ).replace(UpperCamelCase__ , ' ' ).strip() return out_string def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] = False , UpperCAmelCase__ : List[Any] = None , UpperCAmelCase__ : int = True , **UpperCAmelCase__ : Union[str, Any] , ) -> Optional[int]: lowerCAmelCase = kwargs.pop('use_source_tokenizer' , UpperCamelCase__ ) lowerCAmelCase = self.convert_ids_to_tokens(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 lowerCAmelCase = [] lowerCAmelCase = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(UpperCamelCase__ ) ) lowerCAmelCase = [] sub_texts.append(UpperCamelCase__ ) else: current_sub_text.append(UpperCamelCase__ ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(UpperCamelCase__ ) ) # Mimic the behavior of the Rust tokenizer: # By default, there are no spaces between special tokens lowerCAmelCase = ''.join(UpperCamelCase__ ) lowerCAmelCase = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: lowerCAmelCase = self.clean_up_tokenization(UpperCamelCase__ ) return clean_text else: return text def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] = None ) -> str: lowerCAmelCase = [self.sep_token_id] lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] = None , UpperCAmelCase__ : int = False ) -> str: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ ) if token_ids_a is not None: return ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1, 1] return ([0] * len(UpperCamelCase__ )) + [1, 1] def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] = None ) -> Union[str, Any]: lowerCAmelCase = [self.sep_token_id] lowerCAmelCase = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str = None ) -> Any: if not os.path.isdir(UpperCamelCase__ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCAmelCase = os.path.join( UpperCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCamelCase__ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCamelCase__ , 'wb' ) as fi: lowerCAmelCase = self.sp_model.serialized_model_proto() fi.write(UpperCamelCase__ ) return (out_vocab_file,)
4
def __UpperCamelCase ( _A = 1000000 ): lowerCAmelCase_ = 1 lowerCAmelCase_ = 1 lowerCAmelCase_ = {1: 1} for inputa in range(2 , _A ): lowerCAmelCase_ = 0 lowerCAmelCase_ = inputa while True: if number in counters: counter += counters[number] break if number % 2 == 0: number //= 2 counter += 1 else: lowerCAmelCase_ = (3 * number) + 1 counter += 1 if inputa not in counters: lowerCAmelCase_ = counter if counter > pre_counter: lowerCAmelCase_ = inputa lowerCAmelCase_ = counter return largest_number if __name__ == "__main__": print(solution(int(input().strip())))
278
0
"""simple docstring""" import pytest from datasets.splits import SplitDict, SplitInfo from datasets.utils.py_utils import asdict @pytest.mark.parametrize( '''split_dict''' , [ SplitDict(), SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1_3_3_7 , num_examples=4_2 , dataset_name='''my_dataset''' )} ), SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1_3_3_7 , num_examples=4_2 )} ), SplitDict({'''train''': SplitInfo()} ), ] , ) def lowercase ( __snake_case : str ): lowercase_ : int = split_dict._to_yaml_list() assert len(_A ) == len(_A ) lowercase_ : Union[str, Any] = SplitDict._from_yaml_list(_A ) for split_name, split_info in split_dict.items(): # dataset_name field is deprecated, and is therefore not part of the YAML dump lowercase_ : List[Any] = None # the split name of split_dict takes over the name of the split info object lowercase_ : List[Any] = split_name assert split_dict == reloaded @pytest.mark.parametrize( '''split_info''' , [SplitInfo(), SplitInfo(dataset_name=_A ), SplitInfo(dataset_name='''my_dataset''' )] ) def lowercase ( __snake_case : Any ): # For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name" # field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files lowercase_ : Union[str, Any] = asdict(SplitDict({'''train''': split_info} ) ) assert "dataset_name" in split_dict_asdict["train"] assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
33
import unittest from transformers import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device if is_torch_available(): import torch from transformers import AutoModelForImageClassification if is_vision_available(): from transformers import AutoImageProcessor @require_torch @require_vision class A ( unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = AutoImageProcessor.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' ) lowerCAmelCase_ = AutoModelForImageClassification.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' ) model.to(UpperCamelCase__ ) from datasets import load_dataset lowerCAmelCase_ = load_dataset('''nielsr/rvlcdip-demo''' ) lowerCAmelCase_ = dataset['''train'''][0]['''image'''].convert('''RGB''' ) lowerCAmelCase_ = image_processor(UpperCamelCase__, return_tensors='''pt''' ).to(UpperCamelCase__ ) # forward pass with torch.no_grad(): lowerCAmelCase_ = model(**UpperCamelCase__ ) lowerCAmelCase_ = outputs.logits lowerCAmelCase_ = torch.Size((1, 16) ) self.assertEqual(logits.shape, UpperCamelCase__ ) lowerCAmelCase_ = torch.tensor( [-0.4_158, -0.4_092, -0.4_347], device=UpperCamelCase__, dtype=torch.float, ) self.assertTrue(torch.allclose(logits[0, :3], UpperCamelCase__, atol=1E-4 ) )
278
0
import warnings from .generation import TFGenerationMixin class A_ ( __UpperCAmelCase ): # warning at import time warnings.warn( '''Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will ''' '''be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.''' , __UpperCAmelCase , )
73
import argparse import os import re import torch from flax.traverse_util import flatten_dict from tax import checkpoints from transformers import ( AutoTokenizer, PixaStructConfig, PixaStructForConditionalGeneration, PixaStructImageProcessor, PixaStructProcessor, PixaStructTextConfig, PixaStructVisionConfig, ) def __UpperCamelCase ( _A ): lowerCAmelCase_ = checkpoints.load_tax_checkpoint(_A ) lowerCAmelCase_ = flatten_dict(_A ) return flax_params def __UpperCamelCase ( _A ): lowerCAmelCase_ = {} lowerCAmelCase_ = { '''token_embedder''': '''embeddings''', '''encoder_norm''': '''layernorm''', '''kernel''': '''weight''', '''.out''': '''.output''', '''scale''': '''weight''', '''embedders_0.pos_embedding''': '''row_embedder.weight''', '''embedders_1.pos_embedding''': '''column_embedder.weight''', } lowerCAmelCase_ = { '''query''': '''attention.query''', '''key''': '''attention.key''', '''value''': '''attention.value''', '''output.dense''': '''output''', '''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''', '''pre_self_attention_layer_norm''': '''self_attention.layer_norm''', '''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''', '''mlp.''': '''mlp.DenseReluDense.''', '''pre_mlp_layer_norm''': '''mlp.layer_norm''', '''self_attention.o''': '''self_attention.attention.o''', '''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''', '''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''', '''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''', '''decoder.logits_dense.weight''': '''decoder.lm_head.weight''', } for key in flax_dict.keys(): if "target" in key: # remove the first prefix from the key lowerCAmelCase_ = '''.'''.join(key[1:] ) # rename the key for old, new in CONVERSION_MAPPING.items(): lowerCAmelCase_ = new_key.replace(_A , _A ) if "decoder" in new_key: for old, new in DECODER_CONVERSION_MAPPING.items(): lowerCAmelCase_ = new_key.replace(_A , _A ) if "layers" in new_key and "decoder" not in new_key: # use regex to replace the layer number lowerCAmelCase_ = re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , _A ) lowerCAmelCase_ = new_key.replace('''encoder''' , '''encoder.encoder''' ) elif "layers" in new_key and "decoder" in new_key: # use regex to replace the layer number lowerCAmelCase_ = re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , _A ) lowerCAmelCase_ = flax_dict[key] lowerCAmelCase_ = {} # convert converted_dict into torch format for key in converted_dict.keys(): if ("embed_tokens" not in key) and ("embedder" not in key): lowerCAmelCase_ = torch.from_numpy(converted_dict[key].T ) else: lowerCAmelCase_ = torch.from_numpy(converted_dict[key] ) return converted_torch_dict def __UpperCamelCase ( _A , _A , _A=False , _A=False ): lowerCAmelCase_ = get_flax_param(_A ) if not use_large: lowerCAmelCase_ = PixaStructVisionConfig() lowerCAmelCase_ = PixaStructTextConfig() else: lowerCAmelCase_ = PixaStructVisionConfig( hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 ) lowerCAmelCase_ = PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 ) lowerCAmelCase_ = PixaStructConfig( vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=_A ) lowerCAmelCase_ = PixaStructForConditionalGeneration(_A ) lowerCAmelCase_ = rename_and_convert_flax_params(_A ) model.load_state_dict(_A ) lowerCAmelCase_ = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' ) lowerCAmelCase_ = PixaStructImageProcessor() lowerCAmelCase_ = PixaStructProcessor(image_processor=_A , tokenizer=_A ) if use_large: lowerCAmelCase_ = 4096 lowerCAmelCase_ = True # mkdir if needed os.makedirs(_A , exist_ok=_A ) model.save_pretrained(_A ) processor.save_pretrained(_A ) print('''Model saved in {}'''.format(_A ) ) if __name__ == "__main__": _A = argparse.ArgumentParser() parser.add_argument('''--t5x_checkpoint_path''', default=None, type=str, help='''Path to the original T5x checkpoint.''') parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--use_large''', action='''store_true''', help='''Use large model.''') parser.add_argument('''--is_vqa''', action='''store_true''', help='''Use large model.''') _A = parser.parse_args() convert_pixastruct_original_pytorch_checkpoint_to_hf( args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large )
278
0
import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def snake_case ( snake_case__ :str) -> int: _A = filter(lambda snake_case__: p.requires_grad , model.parameters()) _A = sum([np.prod(p.size()) for p in model_parameters]) return params _SCREAMING_SNAKE_CASE = logging.getLogger(__name__) def snake_case ( snake_case__ :Optional[int] , snake_case__ :Tuple) -> Dict: if metric == "rouge2": _A = """{val_avg_rouge2:.4f}-{step_count}""" elif metric == "bleu": _A = """{val_avg_bleu:.4f}-{step_count}""" elif metric == "em": _A = """{val_avg_em:.4f}-{step_count}""" elif metric == "loss": _A = """{val_avg_loss:.4f}-{step_count}""" else: raise NotImplementedError( F'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this''' """ function.""") _A = ModelCheckpoint( dirpath=_A , filename=_A , monitor=F'''val_{metric}''' , mode="""max""" , save_top_k=1 , every_n_epochs=1 , ) return checkpoint_callback def snake_case ( snake_case__ :Tuple , snake_case__ :int) -> Optional[Any]: return EarlyStopping( monitor=F'''val_{metric}''' , mode="""min""" if """loss""" in metric else """max""" , patience=_A , verbose=_A , ) class a ( pl.Callback ): """simple docstring""" def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]: _A = {F'''lr_group_{i}''': param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(UpperCamelCase__ ) @rank_zero_only def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True ) -> Tuple: logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''' ) _A = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["""log""", """progress_bar""", """preds"""]} ) # Log results _A = Path(pl_module.hparams.output_dir ) if type_path == "test": _A = od / """test_results.txt""" _A = od / """test_generations.txt""" else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. _A = od / F'''{type_path}_results/{trainer.global_step:05d}.txt''' _A = od / F'''{type_path}_generations/{trainer.global_step:05d}.txt''' results_file.parent.mkdir(exist_ok=UpperCamelCase__ ) generations_file.parent.mkdir(exist_ok=UpperCamelCase__ ) with open(UpperCamelCase__ , """a+""" ) as writer: for key in sorted(UpperCamelCase__ ): if key in ["log", "progress_bar", "preds"]: continue _A = metrics[key] if isinstance(UpperCamelCase__ , torch.Tensor ): _A = val.item() _A = F'''{key}: {val:.6f}\n''' writer.write(UpperCamelCase__ ) if not save_generations: return if "preds" in metrics: _A = """\n""".join(metrics["""preds"""] ) generations_file.open("""w+""" ).write(UpperCamelCase__ ) @rank_zero_only def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]: try: _A = pl_module.model.model.num_parameters() except AttributeError: _A = pl_module.model.num_parameters() _A = count_trainable_parameters(UpperCamelCase__ ) # mp stands for million parameters trainer.logger.log_metrics({"""n_params""": npars, """mp""": npars / 1E6, """grad_mp""": n_trainable_pars / 1E6} ) @rank_zero_only def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> int: save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(UpperCamelCase__ , UpperCamelCase__ , """test""" ) @rank_zero_only def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict: save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
180
import warnings from ...utils import logging from .image_processing_beit import BeitImageProcessor _A = logging.get_logger(__name__) class A ( __UpperCAmelCase ): def __init__( self, *UpperCamelCase__, **UpperCamelCase__ ): """simple docstring""" warnings.warn( '''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use BeitImageProcessor instead.''', UpperCamelCase__, ) super().__init__(*UpperCamelCase__, **UpperCamelCase__ )
278
0
from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowercase : def __init__( self ,A__ ,A__=3 ,A__=3_2 ,A__=3 ,A__=1_0 ,A__=[1_0, 2_0, 3_0, 4_0] ,A__=[1, 1, 2, 1] ,A__=True ,A__=True ,A__="relu" ,A__=3 ,A__=None ,): lowercase = parent lowercase = batch_size lowercase = image_size lowercase = num_channels lowercase = embeddings_size lowercase = hidden_sizes lowercase = depths lowercase = is_training lowercase = use_labels lowercase = hidden_act lowercase = num_labels lowercase = scope lowercase = len(UpperCamelCase__) def A__ ( self): lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) lowercase = None if self.use_labels: lowercase = ids_tensor([self.batch_size] ,self.num_labels) lowercase = self.get_config() return config, pixel_values, labels def A__ ( self): return ResNetConfig( num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,) def A__ ( self ,A__ ,A__ ,A__): lowercase = TFResNetModel(config=UpperCamelCase__) lowercase = model(UpperCamelCase__) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) ,) def A__ ( self ,A__ ,A__ ,A__): lowercase = self.num_labels lowercase = TFResNetForImageClassification(UpperCamelCase__) lowercase = model(UpperCamelCase__ ,labels=UpperCamelCase__) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels)) def A__ ( self): lowercase = self.prepare_config_and_inputs() lowercase , lowercase , lowercase = config_and_inputs lowercase = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class lowercase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): lowercase_ : Optional[Any] =(TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () lowercase_ : Optional[int] =( {'''feature-extraction''': TFResNetModel, '''image-classification''': TFResNetForImageClassification} if is_tf_available() else {} ) lowercase_ : Optional[Any] =False lowercase_ : Dict =False lowercase_ : Union[str, Any] =False lowercase_ : Tuple =False lowercase_ : Tuple =False def A__ ( self): lowercase = TFResNetModelTester(self) lowercase = ConfigTester(self ,config_class=UpperCamelCase__ ,has_text_modality=UpperCamelCase__) def A__ ( self): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def A__ ( self): return @unittest.skip(reason='''ResNet does not use inputs_embeds''') def A__ ( self): pass @unittest.skip(reason='''ResNet does not support input and output embeddings''') def A__ ( self): pass def A__ ( self): lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase = model_class(UpperCamelCase__) lowercase = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase = [*signature.parameters.keys()] lowercase = ['''pixel_values'''] self.assertListEqual(arg_names[:1] ,UpperCamelCase__) def A__ ( self): lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__) def A__ ( self): def check_hidden_states_output(A__ ,A__ ,A__): lowercase = model_class(UpperCamelCase__) lowercase = model(**self._prepare_for_class(UpperCamelCase__ ,UpperCamelCase__)) lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowercase = self.model_tester.num_stages self.assertEqual(len(UpperCamelCase__) ,expected_num_stages + 1) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:]) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,) lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common() lowercase = ['''basic''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: lowercase = layer_type lowercase = True check_hidden_states_output(UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase = True check_hidden_states_output(UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__) def A__ ( self): lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__) @slow def A__ ( self): for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase = TFResNetModel.from_pretrained(UpperCamelCase__) self.assertIsNotNone(UpperCamelCase__) def UpperCamelCase ( ): '''simple docstring''' lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class lowercase ( unittest.TestCase ): @cached_property def A__ ( self): return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0]) if is_vision_available() else None ) @slow def A__ ( self): lowercase = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0]) lowercase = self.default_image_processor lowercase = prepare_img() lowercase = image_processor(images=UpperCamelCase__ ,return_tensors='''tf''') # forward pass lowercase = model(**UpperCamelCase__) # verify the logits lowercase = tf.TensorShape((1, 1_0_0_0)) self.assertEqual(outputs.logits.shape ,UpperCamelCase__) lowercase = tf.constant([-11.1069, -9.7877, -8.3777]) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() ,UpperCamelCase__ ,atol=1E-4))
101
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() _A = logging.get_logger(__name__) def __UpperCamelCase ( _A ): lowerCAmelCase_ = YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: lowerCAmelCase_ = 192 lowerCAmelCase_ = 768 lowerCAmelCase_ = 12 lowerCAmelCase_ = 3 lowerCAmelCase_ = [800, 1333] lowerCAmelCase_ = False elif yolos_name == "yolos_s_dWr": lowerCAmelCase_ = 330 lowerCAmelCase_ = 14 lowerCAmelCase_ = 6 lowerCAmelCase_ = 1320 elif "yolos_s" in yolos_name: lowerCAmelCase_ = 384 lowerCAmelCase_ = 1536 lowerCAmelCase_ = 12 lowerCAmelCase_ = 6 elif "yolos_b" in yolos_name: lowerCAmelCase_ = [800, 1344] lowerCAmelCase_ = 91 lowerCAmelCase_ = '''huggingface/label-files''' lowerCAmelCase_ = '''coco-detection-id2label.json''' lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) ) lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()} lowerCAmelCase_ = idalabel lowerCAmelCase_ = {v: k for k, v in idalabel.items()} return config def __UpperCamelCase ( _A , _A , _A = False ): for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCAmelCase_ = state_dict.pop(f"blocks.{i}.attn.qkv.weight" ) lowerCAmelCase_ = state_dict.pop(f"blocks.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict lowerCAmelCase_ = in_proj_weight[: config.hidden_size, :] lowerCAmelCase_ = in_proj_bias[: config.hidden_size] lowerCAmelCase_ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCAmelCase_ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCAmelCase_ = in_proj_weight[-config.hidden_size :, :] lowerCAmelCase_ = in_proj_bias[-config.hidden_size :] def __UpperCamelCase ( _A ): if "backbone" in name: lowerCAmelCase_ = name.replace('''backbone''' , '''vit''' ) if "cls_token" in name: lowerCAmelCase_ = name.replace('''cls_token''' , '''embeddings.cls_token''' ) if "det_token" in name: lowerCAmelCase_ = name.replace('''det_token''' , '''embeddings.detection_tokens''' ) if "mid_pos_embed" in name: lowerCAmelCase_ = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' ) if "pos_embed" in name: lowerCAmelCase_ = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' ) if "patch_embed.proj" in name: lowerCAmelCase_ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "blocks" in name: lowerCAmelCase_ = name.replace('''blocks''' , '''encoder.layer''' ) if "attn.proj" in name: lowerCAmelCase_ = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: lowerCAmelCase_ = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: lowerCAmelCase_ = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: lowerCAmelCase_ = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: lowerCAmelCase_ = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: lowerCAmelCase_ = name.replace('''mlp.fc2''' , '''output.dense''' ) if "class_embed" in name: lowerCAmelCase_ = name.replace('''class_embed''' , '''class_labels_classifier''' ) if "bbox_embed" in name: lowerCAmelCase_ = name.replace('''bbox_embed''' , '''bbox_predictor''' ) if "vit.norm" in name: lowerCAmelCase_ = name.replace('''vit.norm''' , '''vit.layernorm''' ) return name def __UpperCamelCase ( _A , _A ): for key in orig_state_dict.copy().keys(): lowerCAmelCase_ = orig_state_dict.pop(_A ) if "qkv" in key: lowerCAmelCase_ = key.split('''.''' ) lowerCAmelCase_ = int(key_split[2] ) lowerCAmelCase_ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: lowerCAmelCase_ = val[:dim, :] lowerCAmelCase_ = val[ dim : dim * 2, : ] lowerCAmelCase_ = val[-dim:, :] else: lowerCAmelCase_ = val[:dim] lowerCAmelCase_ = val[dim : dim * 2] lowerCAmelCase_ = val[-dim:] else: lowerCAmelCase_ = val return orig_state_dict def __UpperCamelCase ( ): lowerCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw ) return im @torch.no_grad() def __UpperCamelCase ( _A , _A , _A , _A = False ): lowerCAmelCase_ = get_yolos_config(_A ) # load original state_dict lowerCAmelCase_ = torch.load(_A , map_location='''cpu''' )['''model'''] # load 🤗 model lowerCAmelCase_ = YolosForObjectDetection(_A ) model.eval() lowerCAmelCase_ = convert_state_dict(_A , _A ) model.load_state_dict(_A ) # Check outputs on an image, prepared by YolosImageProcessor lowerCAmelCase_ = 800 if yolos_name != '''yolos_ti''' else 512 lowerCAmelCase_ = YolosImageProcessor(format='''coco_detection''' , size=_A ) lowerCAmelCase_ = image_processor(images=prepare_img() , return_tensors='''pt''' ) lowerCAmelCase_ = model(**_A ) lowerCAmelCase_ , lowerCAmelCase_ = outputs.logits, outputs.pred_boxes lowerCAmelCase_ , lowerCAmelCase_ = None, None if yolos_name == "yolos_ti": lowerCAmelCase_ = torch.tensor( [[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] ) lowerCAmelCase_ = torch.tensor( [[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] ) elif yolos_name == "yolos_s_200_pre": lowerCAmelCase_ = torch.tensor( [[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] ) lowerCAmelCase_ = torch.tensor( [[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] ) elif yolos_name == "yolos_s_300_pre": lowerCAmelCase_ = torch.tensor( [[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] ) lowerCAmelCase_ = torch.tensor( [[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] ) elif yolos_name == "yolos_s_dWr": lowerCAmelCase_ = torch.tensor( [[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] ) lowerCAmelCase_ = torch.tensor( [[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] ) elif yolos_name == "yolos_base": lowerCAmelCase_ = torch.tensor( [[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] ) lowerCAmelCase_ = torch.tensor( [[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] ) else: raise ValueError(f"Unknown yolos_name: {yolos_name}" ) assert torch.allclose(logits[0, :3, :3] , _A , atol=1E-4 ) assert torch.allclose(pred_boxes[0, :3, :3] , _A , atol=1E-4 ) Path(_A ).mkdir(exist_ok=_A ) print(f"Saving model {yolos_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_A ) print(f"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(_A ) if push_to_hub: lowerCAmelCase_ = { '''yolos_ti''': '''yolos-tiny''', '''yolos_s_200_pre''': '''yolos-small''', '''yolos_s_300_pre''': '''yolos-small-300''', '''yolos_s_dWr''': '''yolos-small-dwr''', '''yolos_base''': '''yolos-base''', } print('''Pushing to the hub...''' ) lowerCAmelCase_ = model_mapping[yolos_name] image_processor.push_to_hub(_A , organization='''hustvl''' ) model.push_to_hub(_A , organization='''hustvl''' ) if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--yolos_name''', default='''yolos_s_200_pre''', type=str, help=( '''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\',''' ''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.''' ), ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) _A = parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
278
0
import math import flax.linen as nn import jax.numpy as jnp def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 1.0E4 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = 1.0 , ) -> Optional[Any]: '''simple docstring''' assert timesteps.ndim == 1, "Timesteps should be a 1d-array" assert embedding_dim % 2 == 0, F"""Embedding dimension {embedding_dim} should be even""" SCREAMING_SNAKE_CASE = float(embedding_dim // 2 ) SCREAMING_SNAKE_CASE = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift) SCREAMING_SNAKE_CASE = min_timescale * jnp.exp(jnp.arange(_A , dtype=jnp.floataa ) * -log_timescale_increment ) SCREAMING_SNAKE_CASE = jnp.expand_dims(_A , 1 ) * jnp.expand_dims(_A , 0 ) # scale embeddings SCREAMING_SNAKE_CASE = scale * emb if flip_sin_to_cos: SCREAMING_SNAKE_CASE = jnp.concatenate([jnp.cos(_A ), jnp.sin(_A )] , axis=1 ) else: SCREAMING_SNAKE_CASE = jnp.concatenate([jnp.sin(_A ), jnp.cos(_A )] , axis=1 ) SCREAMING_SNAKE_CASE = jnp.reshape(_A , [jnp.shape(_A )[0], embedding_dim] ) return signal class UpperCamelCase__ ( nn.Module ): '''simple docstring''' __snake_case : List[Any] = 32 __snake_case : Dict = jnp.floataa @nn.compact def __call__( self : Dict ,lowerCamelCase__ : Optional[Any] ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = nn.Dense(self.time_embed_dim ,dtype=self.dtype ,name="""linear_1""" )(UpperCamelCase__ ) SCREAMING_SNAKE_CASE = nn.silu(UpperCamelCase__ ) SCREAMING_SNAKE_CASE = nn.Dense(self.time_embed_dim ,dtype=self.dtype ,name="""linear_2""" )(UpperCamelCase__ ) return temb class UpperCamelCase__ ( nn.Module ): '''simple docstring''' __snake_case : str = 32 __snake_case : Tuple = False __snake_case : Tuple = 1 @nn.compact def __call__( self : List[str] ,lowerCamelCase__ : Optional[int] ) -> Optional[int]: '''simple docstring''' return get_sinusoidal_embeddings( UpperCamelCase__ ,embedding_dim=self.dim ,flip_sin_to_cos=self.flip_sin_to_cos ,freq_shift=self.freq_shift )
296
def __UpperCamelCase ( _A ): if not numbers: return 0 if not isinstance(_A , (list, tuple) ) or not all( isinstance(_A , _A ) for number in numbers ): raise ValueError('''numbers must be an iterable of integers''' ) lowerCAmelCase_ = lowerCAmelCase_ = lowerCAmelCase_ = numbers[0] for i in range(1 , len(_A ) ): # update the maximum and minimum subarray products lowerCAmelCase_ = numbers[i] if number < 0: lowerCAmelCase_ , lowerCAmelCase_ = min_till_now, max_till_now lowerCAmelCase_ = max(_A , max_till_now * number ) lowerCAmelCase_ = min(_A , min_till_now * number ) # update the maximum product found till now lowerCAmelCase_ = max(_A , _A ) return max_prod
278
0
"""simple docstring""" import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() _lowercase : Union[str, Any] = logging.get_logger(__name__) _lowercase : Optional[int] = [ ('bert.bert', 'visual_bert'), ('bert.cls', 'cls'), ('bert.classifier', 'cls'), ('token_type_embeddings_visual', 'visual_token_type_embeddings'), ('position_embeddings_visual', 'visual_position_embeddings'), ('projection', 'visual_projection'), ] _lowercase : List[Any] = [ 'nlvr2_coco_pre_trained.th', 'nlvr2_fine_tuned.th', 'nlvr2_pre_trained.th', 'vcr_coco_pre_train.th', 'vcr_fine_tune.th', 'vcr_pre_train.th', 'vqa_coco_pre_trained.th', 'vqa_fine_tuned.th', 'vqa_pre_trained.th', ] def lowercase__ ( snake_case_ :Tuple ): __UpperCAmelCase = torch.load(_A , map_location='''cpu''' ) return sd def lowercase__ ( snake_case_ :List[str] , snake_case_ :Any , snake_case_ :Optional[int]=rename_keys_prefix ): __UpperCAmelCase = OrderedDict() __UpperCAmelCase = torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue __UpperCAmelCase = key for name_pair in rename_keys_prefix: __UpperCAmelCase = new_key.replace(name_pair[0] , name_pair[1] ) __UpperCAmelCase = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately __UpperCAmelCase = new_d['''cls.predictions.bias'''] return new_d @torch.no_grad() def lowercase__ ( snake_case_ :List[str] , snake_case_ :Optional[Any] ): assert ( checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS ), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.''' # Get Config if "pre" in checkpoint_path: __UpperCAmelCase = '''pretraining''' if "vcr" in checkpoint_path: __UpperCAmelCase = {'''visual_embedding_dim''': 512} elif "vqa_advanced" in checkpoint_path: __UpperCAmelCase = {'''visual_embedding_dim''': 2_048} elif "vqa" in checkpoint_path: __UpperCAmelCase = {'''visual_embedding_dim''': 2_048} elif "nlvr" in checkpoint_path: __UpperCAmelCase = {'''visual_embedding_dim''': 1_024} else: raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' ) else: if "vcr" in checkpoint_path: __UpperCAmelCase = {'''visual_embedding_dim''': 512} __UpperCAmelCase = '''multichoice''' elif "vqa_advanced" in checkpoint_path: __UpperCAmelCase = {'''visual_embedding_dim''': 2_048} __UpperCAmelCase = '''vqa_advanced''' elif "vqa" in checkpoint_path: __UpperCAmelCase = {'''visual_embedding_dim''': 2_048, '''num_labels''': 3_129} __UpperCAmelCase = '''vqa''' elif "nlvr" in checkpoint_path: __UpperCAmelCase = { '''visual_embedding_dim''': 1_024, '''num_labels''': 2, } __UpperCAmelCase = '''nlvr''' __UpperCAmelCase = VisualBertConfig(**_A ) # Load State Dict __UpperCAmelCase = load_state_dict(_A ) __UpperCAmelCase = get_new_dict(_A , _A ) if model_type == "pretraining": __UpperCAmelCase = VisualBertForPreTraining(_A ) elif model_type == "vqa": __UpperCAmelCase = VisualBertForQuestionAnswering(_A ) elif model_type == "nlvr": __UpperCAmelCase = VisualBertForVisualReasoning(_A ) elif model_type == "multichoice": __UpperCAmelCase = VisualBertForMultipleChoice(_A ) model.load_state_dict(_A ) # Save Checkpoints Path(_A ).mkdir(exist_ok=_A ) model.save_pretrained(_A ) if __name__ == "__main__": _lowercase : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.') parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.') _lowercase : List[str] = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
332
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision import transforms from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def __UpperCamelCase ( _A ): lowerCAmelCase_ = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2] lowerCAmelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False lowerCAmelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False lowerCAmelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False if "large" in model_name or "xlarge" in model_name or "huge" in model_name: if "fl3" in model_name: lowerCAmelCase_ = [3, 3, 3, 3] lowerCAmelCase_ = [5, 5, 5, 5] elif "fl4" in model_name: lowerCAmelCase_ = [4, 4, 4, 4] lowerCAmelCase_ = [3, 3, 3, 3] if "tiny" in model_name or "small" in model_name or "base" in model_name: lowerCAmelCase_ = [3, 3, 3, 3] if "lrf" in model_name: lowerCAmelCase_ = [3, 3, 3, 3] else: lowerCAmelCase_ = [2, 2, 2, 2] if "tiny" in model_name: lowerCAmelCase_ = 96 elif "small" in model_name: lowerCAmelCase_ = 96 elif "base" in model_name: lowerCAmelCase_ = 128 elif "large" in model_name: lowerCAmelCase_ = 192 elif "xlarge" in model_name: lowerCAmelCase_ = 256 elif "huge" in model_name: lowerCAmelCase_ = 352 # set label information lowerCAmelCase_ = '''huggingface/label-files''' if "large" in model_name or "huge" in model_name: lowerCAmelCase_ = '''imagenet-22k-id2label.json''' else: lowerCAmelCase_ = '''imagenet-1k-id2label.json''' lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) ) lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()} lowerCAmelCase_ = {v: k for k, v in idalabel.items()} lowerCAmelCase_ = FocalNetConfig( embed_dim=_A , depths=_A , focal_levels=_A , focal_windows=_A , use_conv_embed=_A , idalabel=_A , labelaid=_A , use_post_layernorm=_A , use_layerscale=_A , ) return config def __UpperCamelCase ( _A ): if "patch_embed.proj" in name: lowerCAmelCase_ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "patch_embed.norm" in name: lowerCAmelCase_ = name.replace('''patch_embed.norm''' , '''embeddings.norm''' ) if "layers" in name: lowerCAmelCase_ = '''encoder.''' + name if "encoder.layers" in name: lowerCAmelCase_ = name.replace('''encoder.layers''' , '''encoder.stages''' ) if "downsample.proj" in name: lowerCAmelCase_ = name.replace('''downsample.proj''' , '''downsample.projection''' ) if "blocks" in name: lowerCAmelCase_ = name.replace('''blocks''' , '''layers''' ) if "modulation.f.weight" in name or "modulation.f.bias" in name: lowerCAmelCase_ = name.replace('''modulation.f''' , '''modulation.projection_in''' ) if "modulation.h.weight" in name or "modulation.h.bias" in name: lowerCAmelCase_ = name.replace('''modulation.h''' , '''modulation.projection_context''' ) if "modulation.proj.weight" in name or "modulation.proj.bias" in name: lowerCAmelCase_ = name.replace('''modulation.proj''' , '''modulation.projection_out''' ) if name == "norm.weight": lowerCAmelCase_ = '''layernorm.weight''' if name == "norm.bias": lowerCAmelCase_ = '''layernorm.bias''' if "head" in name: lowerCAmelCase_ = name.replace('''head''' , '''classifier''' ) else: lowerCAmelCase_ = '''focalnet.''' + name return name def __UpperCamelCase ( _A , _A , _A=False ): # fmt: off lowerCAmelCase_ = { '''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''', '''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''', '''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''', '''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''', '''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''', '''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''', '''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''', '''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''', '''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''', '''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''', } # fmt: on lowerCAmelCase_ = model_name_to_url[model_name] print('''Checkpoint URL: ''' , _A ) lowerCAmelCase_ = torch.hub.load_state_dict_from_url(_A , map_location='''cpu''' )['''model'''] # rename keys for key in state_dict.copy().keys(): lowerCAmelCase_ = state_dict.pop(_A ) lowerCAmelCase_ = val lowerCAmelCase_ = get_focalnet_config(_A ) lowerCAmelCase_ = FocalNetForImageClassification(_A ) model.eval() # load state dict model.load_state_dict(_A ) # verify conversion lowerCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowerCAmelCase_ = BitImageProcessor( do_resize=_A , size={'''shortest_edge''': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=_A , crop_size=224 , do_normalize=_A , image_mean=_A , image_std=_A , ) lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw ) lowerCAmelCase_ = processor(images=_A , return_tensors='''pt''' ) lowerCAmelCase_ = transforms.Compose( [ transforms.Resize(256 ), transforms.CenterCrop(224 ), transforms.ToTensor(), transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ), ] ) lowerCAmelCase_ = image_transforms(_A ).unsqueeze(0 ) # verify pixel_values assert torch.allclose(inputs.pixel_values , _A , atol=1E-4 ) lowerCAmelCase_ = model(**_A ) lowerCAmelCase_ = outputs.logits.argmax(-1 ).item() print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] ) print('''First values of logits:''' , outputs.logits[0, :3] ) if model_name == "focalnet-tiny": lowerCAmelCase_ = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ) elif model_name == "focalnet-tiny-lrf": lowerCAmelCase_ = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] ) elif model_name == "focalnet-small": lowerCAmelCase_ = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] ) elif model_name == "focalnet-small-lrf": lowerCAmelCase_ = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] ) elif model_name == "focalnet-base": lowerCAmelCase_ = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] ) elif model_name == "focalnet-base-lrf": lowerCAmelCase_ = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] ) assert torch.allclose(outputs.logits[0, :3] , _A , atol=1E-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(f"Saving model and processor of {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_A ) processor.save_pretrained(_A ) if push_to_hub: print(f"Pushing model and processor of {model_name} to the hub..." ) model.push_to_hub(f"{model_name}" ) processor.push_to_hub(f"{model_name}" ) if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''focalnet-tiny''', type=str, help='''Name of the FocalNet model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub.''', ) _A = parser.parse_args() convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
278
0
"""simple docstring""" import unittest from datasets import load_dataset from transformers.pipelines import pipeline from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow @is_pipeline_test @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @require_torch def SCREAMING_SNAKE_CASE__ ( self : List[str] ): lowerCAmelCase_ : Optional[int] = pipeline( task='zero-shot-audio-classification' , model='hf-internal-testing/tiny-clap-htsat-unfused' ) lowerCAmelCase_ : Optional[int] = load_dataset('ashraq/esc50' ) lowerCAmelCase_ : str = dataset['train']['audio'][-1]['array'] lowerCAmelCase_ : Optional[int] = audio_classifier(UpperCamelCase__ , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] ) self.assertEqual( nested_simplify(UpperCamelCase__ ) , [{'score': 0.5_01, 'label': 'Sound of a dog'}, {'score': 0.4_99, 'label': 'Sound of vaccum cleaner'}] , ) @unittest.skip('No models are available in TF' ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): pass @slow @require_torch def SCREAMING_SNAKE_CASE__ ( self : List[str] ): lowerCAmelCase_ : Optional[int] = pipeline( task='zero-shot-audio-classification' , model='laion/clap-htsat-unfused' , ) # This is an audio of a dog lowerCAmelCase_ : List[Any] = load_dataset('ashraq/esc50' ) lowerCAmelCase_ : List[Any] = dataset['train']['audio'][-1]['array'] lowerCAmelCase_ : List[Any] = audio_classifier(UpperCamelCase__ , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] ) self.assertEqual( nested_simplify(UpperCamelCase__ ) , [ {'score': 0.9_99, 'label': 'Sound of a dog'}, {'score': 0.0_01, 'label': 'Sound of vaccum cleaner'}, ] , ) lowerCAmelCase_ : Optional[Any] = audio_classifier([audio] * 5 , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] ) self.assertEqual( nested_simplify(UpperCamelCase__ ) , [ [ {'score': 0.9_99, 'label': 'Sound of a dog'}, {'score': 0.0_01, 'label': 'Sound of vaccum cleaner'}, ], ] * 5 , ) lowerCAmelCase_ : Optional[int] = audio_classifier( [audio] * 5 , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] , batch_size=5 ) self.assertEqual( nested_simplify(UpperCamelCase__ ) , [ [ {'score': 0.9_99, 'label': 'Sound of a dog'}, {'score': 0.0_01, 'label': 'Sound of vaccum cleaner'}, ], ] * 5 , ) @unittest.skip('No models are available in TF' ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): pass
224
import argparse from argparse import Namespace import torch from torch import nn from transformers import XGLMConfig, XGLMForCausalLM def __UpperCamelCase ( _A ): lowerCAmelCase_ = [ '''decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(_A , _A ) def __UpperCamelCase ( _A ): lowerCAmelCase_ , lowerCAmelCase_ = emb.weight.shape lowerCAmelCase_ = nn.Linear(_A , _A , bias=_A ) lowerCAmelCase_ = emb.weight.data return lin_layer def __UpperCamelCase ( _A ): lowerCAmelCase_ = torch.load(_A , map_location='''cpu''' ) lowerCAmelCase_ = Namespace(**checkpoint['''cfg''']['''model'''] ) lowerCAmelCase_ = checkpoint['''model'''] remove_ignore_keys_(_A ) lowerCAmelCase_ = state_dict['''decoder.embed_tokens.weight'''].shape[0] lowerCAmelCase_ = {key.replace('''decoder''' , '''model''' ): val for key, val in state_dict.items()} lowerCAmelCase_ = XGLMConfig( vocab_size=_A , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''gelu''' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , ) lowerCAmelCase_ = XGLMForCausalLM(_A ) lowerCAmelCase_ = model.load_state_dict(_A , strict=_A ) print(_A ) lowerCAmelCase_ = make_linear_from_emb(model.model.embed_tokens ) return model if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') _A = parser.parse_args() _A = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path) model.save_pretrained(args.pytorch_dump_folder_path)
278
0
'''simple docstring''' from ...processing_utils import ProcessorMixin class lowerCAmelCase_ ( __UpperCAmelCase ): '''simple docstring''' UpperCamelCase_ : Any = ["""image_processor""", """feature_extractor"""] UpperCamelCase_ : Union[str, Any] = """TvltImageProcessor""" UpperCamelCase_ : Optional[Any] = """TvltFeatureExtractor""" def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple ) -> List[Any]: '''simple docstring''' super().__init__(image_processor=UpperCamelCase__ , feature_extractor=UpperCamelCase__ ) A: Optional[Any] = image_processor A: int = feature_extractor def __call__( self : Tuple , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : Dict=False , SCREAMING_SNAKE_CASE_ : List[Any]=False , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : Tuple , ) -> Any: '''simple docstring''' if images is None and audio is None: raise ValueError('''You need to specify either an `images` or `audio` input to process.''' ) A: Any = None if images is not None: A: Optional[Any] = self.image_processor(UpperCamelCase__ , mask_pixel=UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ ) if images_mixed is not None: A: List[str] = self.image_processor(UpperCamelCase__ , is_mixed=UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ ) if audio is not None: A: str = self.feature_extractor( UpperCamelCase__ , *UpperCamelCase__ , sampling_rate=UpperCamelCase__ , mask_audio=UpperCamelCase__ , **UpperCamelCase__ ) A: Dict = {} if audio is not None: output_dict.update(UpperCamelCase__ ) if images is not None: output_dict.update(UpperCamelCase__ ) if images_mixed_dict is not None: output_dict.update(UpperCamelCase__ ) return output_dict @property def _snake_case ( self : Union[str, Any] ) -> List[str]: '''simple docstring''' A: Optional[int] = self.image_processor.model_input_names A: Tuple = self.feature_extractor.model_input_names return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
319
# This script creates a super tiny model that is useful inside tests, when we just want to test that # the machinery works, without needing to the check the quality of the outcomes. # # This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny - # all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and # emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files. # The latter is done by `fsmt-make-super-tiny-model.py`. # # It will be used then as "stas/tiny-wmt19-en-ru" from pathlib import Path import json import tempfile from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES _A = '''tiny-wmt19-en-ru''' # Build # borrowed from a test _A = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''w</w>''', '''r</w>''', '''t</w>''', '''lo''', '''low''', '''er</w>''', '''low</w>''', '''lowest</w>''', '''newer</w>''', '''wider</w>''', '''<unk>''', ] _A = dict(zip(vocab, range(len(vocab)))) _A = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', ''''''] with tempfile.TemporaryDirectory() as tmpdirname: _A = Path(tmpdirname) _A = build_dir / VOCAB_FILES_NAMES['''src_vocab_file'''] _A = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file'''] _A = build_dir / VOCAB_FILES_NAMES['''merges_file'''] with open(src_vocab_file, '''w''') as fp: fp.write(json.dumps(vocab_tokens)) with open(tgt_vocab_file, '''w''') as fp: fp.write(json.dumps(vocab_tokens)) with open(merges_file, '''w''') as fp: fp.write('''\n'''.join(merges)) _A = FSMTTokenizer( langs=['''en''', '''ru'''], src_vocab_size=len(vocab), tgt_vocab_size=len(vocab), src_vocab_file=src_vocab_file, tgt_vocab_file=tgt_vocab_file, merges_file=merges_file, ) _A = FSMTConfig( langs=['''ru''', '''en'''], src_vocab_size=1_000, tgt_vocab_size=1_000, d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) _A = FSMTForConditionalGeneration(config) print(f"num of params {tiny_model.num_parameters()}") # Test _A = tokenizer(['''Making tiny model'''], return_tensors='''pt''') _A = tiny_model(**batch) print('''test output:''', len(outputs.logits[0])) # Save tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(f"Generated {mname_tiny}") # Upload # transformers-cli upload tiny-wmt19-en-ru
278
0
"""simple docstring""" import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class UpperCAmelCase (ctypes.Structure ): """simple docstring""" _UpperCAmelCase :int = [("size", ctypes.c_int), ("visible", ctypes.c_byte)] def SCREAMING_SNAKE_CASE__ ( ) -> Dict: if os.name == "nt": lowercase__: int = CursorInfo() lowercase__: Optional[int] = ctypes.windll.kernelaa.GetStdHandle(-1_1 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(_A , ctypes.byref(_A ) ) lowercase__: Union[str, Any] = False ctypes.windll.kernelaa.SetConsoleCursorInfo(_A , ctypes.byref(_A ) ) elif os.name == "posix": sys.stdout.write('''\033[?25l''' ) sys.stdout.flush() def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]: if os.name == "nt": lowercase__: str = CursorInfo() lowercase__: str = ctypes.windll.kernelaa.GetStdHandle(-1_1 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(_A , ctypes.byref(_A ) ) lowercase__: Optional[int] = True ctypes.windll.kernelaa.SetConsoleCursorInfo(_A , ctypes.byref(_A ) ) elif os.name == "posix": sys.stdout.write('''\033[?25h''' ) sys.stdout.flush() @contextmanager def SCREAMING_SNAKE_CASE__ ( ) -> int: try: hide_cursor() yield finally: show_cursor()
177
import argparse from collections import defaultdict import yaml _A = '''docs/source/en/_toctree.yml''' def __UpperCamelCase ( _A ): lowerCAmelCase_ = defaultdict(_A ) for doc in model_doc: counts[doc["local"]] += 1 lowerCAmelCase_ = [key for key, value in counts.items() if value > 1] lowerCAmelCase_ = [] for duplicate_key in duplicates: lowerCAmelCase_ = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} ) if len(_A ) > 1: raise ValueError( f"{duplicate_key} is present several times in the documentation table of content at " '''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the ''' '''others.''' ) # Only add this once new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] ) # Sort return sorted(_A , key=lambda _A : s["title"].lower() ) def __UpperCamelCase ( _A=False ): with open(_A , encoding='''utf-8''' ) as f: lowerCAmelCase_ = yaml.safe_load(f.read() ) # Get to the API doc lowerCAmelCase_ = 0 while content[api_idx]["title"] != "API": api_idx += 1 lowerCAmelCase_ = content[api_idx]['''sections'''] # Then to the model doc lowerCAmelCase_ = 0 while api_doc[model_idx]["title"] != "Models": model_idx += 1 lowerCAmelCase_ = api_doc[model_idx]['''sections'''] lowerCAmelCase_ = [(idx, section) for idx, section in enumerate(_A ) if '''sections''' in section] lowerCAmelCase_ = False for idx, modality_doc in modalities_docs: lowerCAmelCase_ = modality_doc['''sections'''] lowerCAmelCase_ = clean_model_doc_toc(_A ) if old_modality_doc != new_modality_doc: lowerCAmelCase_ = True if overwrite: lowerCAmelCase_ = new_modality_doc if diff: if overwrite: lowerCAmelCase_ = model_doc lowerCAmelCase_ = api_doc with open(_A , '''w''' , encoding='''utf-8''' ) as f: f.write(yaml.dump(_A , allow_unicode=_A ) ) else: raise ValueError( '''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' ) if __name__ == "__main__": _A = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') _A = parser.parse_args() check_model_doc(args.fix_and_overwrite)
278
0
import shutil import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_tf_cross_test, require_tf, require_torch, require_torchvision, require_vision, ) from transformers.utils import is_tf_available, is_torch_available, is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, SamImageProcessor, SamProcessor if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf @require_vision @require_torchvision class __snake_case ( unittest.TestCase ): def __a ( self : Optional[int] ): """simple docstring""" SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp() SCREAMING_SNAKE_CASE__ = SamImageProcessor() SCREAMING_SNAKE_CASE__ = SamProcessor(UpperCamelCase__ ) processor.save_pretrained(self.tmpdirname ) def __a ( self : Optional[int] , **_lowercase : Any ): """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ).image_processor def __a ( self : str ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def __a ( self : Optional[int] ): """simple docstring""" SCREAMING_SNAKE_CASE__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] SCREAMING_SNAKE_CASE__ = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def __a ( self : List[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE__ = SamProcessor(image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE__ = self.get_image_processor(do_normalize=UpperCamelCase__ , padding_value=1.0 ) SCREAMING_SNAKE_CASE__ = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=UpperCamelCase__ , padding_value=1.0 ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCamelCase__ ) def __a ( self : List[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE__ = self.get_image_processor() SCREAMING_SNAKE_CASE__ = SamProcessor(image_processor=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs() SCREAMING_SNAKE_CASE__ = image_processor(UpperCamelCase__ , return_tensors="""np""" ) SCREAMING_SNAKE_CASE__ = processor(images=UpperCamelCase__ , return_tensors="""np""" ) input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor input_feat_extract.pop("""reshaped_input_sizes""" ) # pop original_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) @require_torch def __a ( self : int ): """simple docstring""" SCREAMING_SNAKE_CASE__ = self.get_image_processor() SCREAMING_SNAKE_CASE__ = SamProcessor(image_processor=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = [torch.ones((1, 3, 5, 5) )] SCREAMING_SNAKE_CASE__ = [[17_64, 26_46]] SCREAMING_SNAKE_CASE__ = [[6_83, 10_24]] SCREAMING_SNAKE_CASE__ = processor.post_process_masks(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) SCREAMING_SNAKE_CASE__ = processor.post_process_masks( UpperCamelCase__ , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) # should also work with np SCREAMING_SNAKE_CASE__ = [np.ones((1, 3, 5, 5) )] SCREAMING_SNAKE_CASE__ = processor.post_process_masks(UpperCamelCase__ , np.array(UpperCamelCase__ ) , np.array(UpperCamelCase__ ) ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) SCREAMING_SNAKE_CASE__ = [[1, 0], [0, 1]] with self.assertRaises(UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ = processor.post_process_masks(UpperCamelCase__ , np.array(UpperCamelCase__ ) , np.array(UpperCamelCase__ ) ) @require_vision @require_tf class __snake_case ( unittest.TestCase ): def __a ( self : Optional[int] ): """simple docstring""" SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp() SCREAMING_SNAKE_CASE__ = SamImageProcessor() SCREAMING_SNAKE_CASE__ = SamProcessor(UpperCamelCase__ ) processor.save_pretrained(self.tmpdirname ) def __a ( self : Tuple , **_lowercase : Any ): """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ).image_processor def __a ( self : str ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def __a ( self : Any ): """simple docstring""" SCREAMING_SNAKE_CASE__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] SCREAMING_SNAKE_CASE__ = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def __a ( self : Optional[int] ): """simple docstring""" SCREAMING_SNAKE_CASE__ = SamProcessor(image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE__ = self.get_image_processor(do_normalize=UpperCamelCase__ , padding_value=1.0 ) SCREAMING_SNAKE_CASE__ = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=UpperCamelCase__ , padding_value=1.0 ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCamelCase__ ) def __a ( self : int ): """simple docstring""" SCREAMING_SNAKE_CASE__ = self.get_image_processor() SCREAMING_SNAKE_CASE__ = SamProcessor(image_processor=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs() SCREAMING_SNAKE_CASE__ = image_processor(UpperCamelCase__ , return_tensors="""np""" ) SCREAMING_SNAKE_CASE__ = processor(images=UpperCamelCase__ , return_tensors="""np""" ) input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor input_feat_extract.pop("""reshaped_input_sizes""" ) # pop reshaped_input_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) @require_tf def __a ( self : int ): """simple docstring""" SCREAMING_SNAKE_CASE__ = self.get_image_processor() SCREAMING_SNAKE_CASE__ = SamProcessor(image_processor=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = [tf.ones((1, 3, 5, 5) )] SCREAMING_SNAKE_CASE__ = [[17_64, 26_46]] SCREAMING_SNAKE_CASE__ = [[6_83, 10_24]] SCREAMING_SNAKE_CASE__ = processor.post_process_masks(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , return_tensors="""tf""" ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) SCREAMING_SNAKE_CASE__ = processor.post_process_masks( UpperCamelCase__ , tf.convert_to_tensor(UpperCamelCase__ ) , tf.convert_to_tensor(UpperCamelCase__ ) , return_tensors="""tf""" , ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) # should also work with np SCREAMING_SNAKE_CASE__ = [np.ones((1, 3, 5, 5) )] SCREAMING_SNAKE_CASE__ = processor.post_process_masks( UpperCamelCase__ , np.array(UpperCamelCase__ ) , np.array(UpperCamelCase__ ) , return_tensors="""tf""" ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) SCREAMING_SNAKE_CASE__ = [[1, 0], [0, 1]] with self.assertRaises(tf.errors.InvalidArgumentError ): SCREAMING_SNAKE_CASE__ = processor.post_process_masks( UpperCamelCase__ , np.array(UpperCamelCase__ ) , np.array(UpperCamelCase__ ) , return_tensors="""tf""" ) @require_vision @require_torchvision class __snake_case ( unittest.TestCase ): def __a ( self : Dict ): """simple docstring""" SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp() SCREAMING_SNAKE_CASE__ = SamImageProcessor() SCREAMING_SNAKE_CASE__ = SamProcessor(UpperCamelCase__ ) processor.save_pretrained(self.tmpdirname ) def __a ( self : Any , **_lowercase : Any ): """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ).image_processor def __a ( self : Optional[Any] ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def __a ( self : Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] SCREAMING_SNAKE_CASE__ = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs] return image_inputs @is_pt_tf_cross_test def __a ( self : Any ): """simple docstring""" SCREAMING_SNAKE_CASE__ = self.get_image_processor() SCREAMING_SNAKE_CASE__ = SamProcessor(image_processor=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa ) SCREAMING_SNAKE_CASE__ = [tf.convert_to_tensor(UpperCamelCase__ )] SCREAMING_SNAKE_CASE__ = [torch.tensor(UpperCamelCase__ )] SCREAMING_SNAKE_CASE__ = [[17_64, 26_46]] SCREAMING_SNAKE_CASE__ = [[6_83, 10_24]] SCREAMING_SNAKE_CASE__ = processor.post_process_masks( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , return_tensors="""tf""" ) SCREAMING_SNAKE_CASE__ = processor.post_process_masks( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , return_tensors="""pt""" ) self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) ) @is_pt_tf_cross_test def __a ( self : Dict ): """simple docstring""" SCREAMING_SNAKE_CASE__ = self.get_image_processor() SCREAMING_SNAKE_CASE__ = SamProcessor(image_processor=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs() SCREAMING_SNAKE_CASE__ = image_processor(UpperCamelCase__ , return_tensors="""pt""" )["""pixel_values"""].numpy() SCREAMING_SNAKE_CASE__ = processor(images=UpperCamelCase__ , return_tensors="""pt""" )["""pixel_values"""].numpy() SCREAMING_SNAKE_CASE__ = image_processor(UpperCamelCase__ , return_tensors="""tf""" )["""pixel_values"""].numpy() SCREAMING_SNAKE_CASE__ = processor(images=UpperCamelCase__ , return_tensors="""tf""" )["""pixel_values"""].numpy() self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ ) ) self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ ) ) self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ ) )
219
import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest class A ( __UpperCAmelCase ): __snake_case = (UnCLIPScheduler,) def SCREAMING_SNAKE_CASE__ ( self, **UpperCamelCase__ ): """simple docstring""" lowerCAmelCase_ = { '''num_train_timesteps''': 1000, '''variance_type''': '''fixed_small_log''', '''clip_sample''': True, '''clip_sample_range''': 1.0, '''prediction_type''': '''epsilon''', } config.update(**UpperCamelCase__ ) return config def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" for clip_sample_range in [1, 5, 10, 20]: self.check_over_configs(clip_sample_range=UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" for time_step in [0, 500, 999]: for prev_timestep in [None, 5, 100, 250, 500, 750]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=UpperCamelCase__, prev_timestep=UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.scheduler_classes[0] lowerCAmelCase_ = self.get_scheduler_config(variance_type='''fixed_small_log''' ) lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1E-5 def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.scheduler_classes[0] lowerCAmelCase_ = self.get_scheduler_config(variance_type='''learned_range''' ) lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ ) lowerCAmelCase_ = 0.5 assert scheduler._get_variance(1, predicted_variance=UpperCamelCase__ ) - -10.1_712_790 < 1E-5 assert scheduler._get_variance(487, predicted_variance=UpperCamelCase__ ) - -5.7_998_052 < 1E-5 assert scheduler._get_variance(999, predicted_variance=UpperCamelCase__ ) - -0.0_010_011 < 1E-5 def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.scheduler_classes[0] lowerCAmelCase_ = self.get_scheduler_config() lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ ) lowerCAmelCase_ = scheduler.timesteps lowerCAmelCase_ = self.dummy_model() lowerCAmelCase_ = self.dummy_sample_deter lowerCAmelCase_ = torch.manual_seed(0 ) for i, t in enumerate(UpperCamelCase__ ): # 1. predict noise residual lowerCAmelCase_ = model(UpperCamelCase__, UpperCamelCase__ ) # 2. predict previous mean of sample x_t-1 lowerCAmelCase_ = scheduler.step(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, generator=UpperCamelCase__ ).prev_sample lowerCAmelCase_ = pred_prev_sample lowerCAmelCase_ = torch.sum(torch.abs(UpperCamelCase__ ) ) lowerCAmelCase_ = torch.mean(torch.abs(UpperCamelCase__ ) ) assert abs(result_sum.item() - 252.2_682_495 ) < 1E-2 assert abs(result_mean.item() - 0.3_284_743 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.scheduler_classes[0] lowerCAmelCase_ = self.get_scheduler_config() lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ ) scheduler.set_timesteps(25 ) lowerCAmelCase_ = scheduler.timesteps lowerCAmelCase_ = self.dummy_model() lowerCAmelCase_ = self.dummy_sample_deter lowerCAmelCase_ = torch.manual_seed(0 ) for i, t in enumerate(UpperCamelCase__ ): # 1. predict noise residual lowerCAmelCase_ = model(UpperCamelCase__, UpperCamelCase__ ) if i + 1 == timesteps.shape[0]: lowerCAmelCase_ = None else: lowerCAmelCase_ = timesteps[i + 1] # 2. predict previous mean of sample x_t-1 lowerCAmelCase_ = scheduler.step( UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, prev_timestep=UpperCamelCase__, generator=UpperCamelCase__ ).prev_sample lowerCAmelCase_ = pred_prev_sample lowerCAmelCase_ = torch.sum(torch.abs(UpperCamelCase__ ) ) lowerCAmelCase_ = torch.mean(torch.abs(UpperCamelCase__ ) ) assert abs(result_sum.item() - 258.2_044_983 ) < 1E-2 assert abs(result_mean.item() - 0.3_362_038 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" pass def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" pass
278
0
'''simple docstring''' import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .tokenization_wavaveca import WavaVecaCTCTokenizer class UpperCAmelCase_ ( __UpperCAmelCase ): lowerCamelCase : List[Any] = '''Wav2Vec2FeatureExtractor''' lowerCamelCase : List[str] = '''AutoTokenizer''' def __init__( self : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict ) -> Dict: super().__init__(UpperCamelCase__ , UpperCamelCase__ ) lowerCAmelCase = self.feature_extractor lowerCAmelCase = False @classmethod def __UpperCAmelCase ( cls : Any , UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : int ) -> Dict: try: return super().from_pretrained(UpperCamelCase__ , **UpperCamelCase__ ) except OSError: warnings.warn( F'''Loading a tokenizer inside {cls.__name__} from a config that does not''' ' include a `tokenizer_class` attribute is deprecated and will be ' 'removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`' ' attribute to either your `config.json` or `tokenizer_config.json` ' 'file to suppress this warning: ' , UpperCamelCase__ , ) lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ ) lowerCAmelCase = WavaVecaCTCTokenizer.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ ) return cls(feature_extractor=UpperCamelCase__ , tokenizer=UpperCamelCase__ ) def __call__( self : List[str] , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : Optional[int] ) -> Dict: if self._in_target_context_manager: return self.current_processor(*UpperCamelCase__ , **UpperCamelCase__ ) if "raw_speech" in kwargs: warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' ) lowerCAmelCase = kwargs.pop('raw_speech' ) else: lowerCAmelCase = kwargs.pop('audio' , UpperCamelCase__ ) lowerCAmelCase = kwargs.pop('sampling_rate' , UpperCamelCase__ ) lowerCAmelCase = kwargs.pop('text' , UpperCamelCase__ ) if len(UpperCamelCase__ ) > 0: lowerCAmelCase = args[0] lowerCAmelCase = args[1:] if audio is None and text is None: raise ValueError('You need to specify either an `audio` or `text` input to process.' ) if audio is not None: lowerCAmelCase = self.feature_extractor(UpperCamelCase__ , *UpperCamelCase__ , sampling_rate=UpperCamelCase__ , **UpperCamelCase__ ) if text is not None: lowerCAmelCase = self.tokenizer(UpperCamelCase__ , **UpperCamelCase__ ) if text is None: return inputs elif audio is None: return encodings else: lowerCAmelCase = encodings['input_ids'] return inputs def __UpperCAmelCase ( self : List[Any] , *UpperCAmelCase__ : int , **UpperCAmelCase__ : List[Any] ) -> Optional[Any]: if self._in_target_context_manager: return self.current_processor.pad(*UpperCamelCase__ , **UpperCamelCase__ ) lowerCAmelCase = kwargs.pop('input_features' , UpperCamelCase__ ) lowerCAmelCase = kwargs.pop('labels' , UpperCamelCase__ ) if len(UpperCamelCase__ ) > 0: lowerCAmelCase = args[0] lowerCAmelCase = args[1:] if input_features is not None: lowerCAmelCase = self.feature_extractor.pad(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ ) if labels is not None: lowerCAmelCase = self.tokenizer.pad(UpperCamelCase__ , **UpperCamelCase__ ) if labels is None: return input_features elif input_features is None: return labels else: lowerCAmelCase = labels['input_ids'] return input_features def __UpperCAmelCase ( self : Optional[Any] , *UpperCAmelCase__ : int , **UpperCAmelCase__ : str ) -> Dict: return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ ) def __UpperCAmelCase ( self : Optional[Any] , *UpperCAmelCase__ : int , **UpperCAmelCase__ : Optional[Any] ) -> Optional[int]: return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ ) @contextmanager def __UpperCAmelCase ( self : int ) -> Optional[Any]: warnings.warn( '`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ' 'labels by using the argument `text` of the regular `__call__` method (either in the same call as ' 'your audio inputs, or in a separate call.' ) lowerCAmelCase = True lowerCAmelCase = self.tokenizer yield lowerCAmelCase = self.feature_extractor lowerCAmelCase = False
4
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _A = logging.get_logger(__name__) _A = torch.device('''cpu''') def __UpperCamelCase ( ): lowerCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw ) return im def __UpperCamelCase ( _A ): if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.1_7_0_3E0_0, 2.1_1_0_7E0_0, -2.0_8_1_1E0_0, 8.8_6_8_5E-0_1, 2.4_3_6_0E-0_1] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.9_6_3_6E-0_1, 2.3_4_7_8E-0_1, -1.6_9_6_3E0_0, -1.7_3_8_1E0_0, -8.6_3_3_7E-0_1] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.2_7_6_8E-0_1, -4.7_4_2_9E-0_1, -1.0_8_9_7E0_0, -1.0_2_4_8E0_0, 3.5_5_2_3E-0_2] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.5_3_3_0E-0_1, 2.4_2_1_1E-0_1, -6.0_1_8_5E-0_1, -8.2_7_8_9E-0_1, -6.0_4_4_6E-0_2] ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = dct.pop(_A ) lowerCAmelCase_ = val def __UpperCamelCase ( _A ): lowerCAmelCase_ = [] for k in state_dict.keys(): lowerCAmelCase_ = k if ".pwconv" in k: lowerCAmelCase_ = k_new.replace('''.pwconv''' , '''.point_wise_conv''' ) if ".dwconv" in k: lowerCAmelCase_ = k_new.replace('''.dwconv''' , '''.depth_wise_conv''' ) if ".Proj." in k: lowerCAmelCase_ = k_new.replace('''.Proj.''' , '''.proj.''' ) if "patch_embed" in k_new: lowerCAmelCase_ = k_new.replace('''patch_embed''' , '''swiftformer.patch_embed.patch_embedding''' ) if "network" in k_new: lowerCAmelCase_ = k_new.split('''.''' ) if ls[2].isdigit(): lowerCAmelCase_ = '''swiftformer.encoder.network.''' + ls[1] + '''.blocks.''' + ls[2] + '''.''' + '''.'''.join(ls[3:] ) else: lowerCAmelCase_ = k_new.replace('''network''' , '''swiftformer.encoder.network''' ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size lowerCAmelCase_ = 1000 lowerCAmelCase_ = '''huggingface/label-files''' lowerCAmelCase_ = '''imagenet-1k-id2label.json''' lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) ) lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()} lowerCAmelCase_ = idalabel lowerCAmelCase_ = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": lowerCAmelCase_ = [3, 3, 6, 4] lowerCAmelCase_ = [48, 56, 112, 220] elif swiftformer_name == "swiftformer_s": lowerCAmelCase_ = [3, 3, 9, 6] lowerCAmelCase_ = [48, 64, 168, 224] elif swiftformer_name == "swiftformer_l1": lowerCAmelCase_ = [4, 3, 10, 5] lowerCAmelCase_ = [48, 96, 192, 384] elif swiftformer_name == "swiftformer_l3": lowerCAmelCase_ = [4, 4, 12, 6] lowerCAmelCase_ = [64, 128, 320, 512] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith('''https''' ): lowerCAmelCase_ = torch.hub.load_state_dict_from_url(_A , map_location='''cpu''' , check_hash=_A ) else: lowerCAmelCase_ = torch.load(_A , map_location='''cpu''' ) lowerCAmelCase_ = checkpoint lowerCAmelCase_ = create_rename_keys(_A ) for rename_key_src, rename_key_dest in rename_keys: rename_key(_A , _A , _A ) # load HuggingFace model lowerCAmelCase_ = SwiftFormerForImageClassification(_A ).eval() hf_model.load_state_dict(_A ) # prepare test inputs lowerCAmelCase_ = prepare_img() lowerCAmelCase_ = ViTImageProcessor.from_pretrained('''preprocessor_config''' ) lowerCAmelCase_ = processor(images=_A , return_tensors='''pt''' ) # compare outputs from both models lowerCAmelCase_ = get_expected_output(_A ) lowerCAmelCase_ = hf_model(inputs['''pixel_values'''] ).logits assert hf_logits.shape == torch.Size([1, 1000] ) assert torch.allclose(hf_logits[0, 0:5] , _A , atol=1E-3 ) Path(_A ).mkdir(exist_ok=_A ) print(f"Saving model {swiftformer_name} to {pytorch_dump_folder_path}" ) hf_model.save_pretrained(_A ) if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--swiftformer_name''', default='''swiftformer_xs''', choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''], type=str, help='''Name of the SwiftFormer model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''./converted_outputs/''', type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''') _A = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
278
0
"""simple docstring""" from __future__ import annotations def lowercase ( __snake_case : Union[str, Any] ): lowercase_ : Dict = str(_A ) return len(_A ) == 9 and set(_A ) == set('''123456789''' ) def lowercase ( ): for base_num in range(9_9_9_9 , 4_9_9_9 , -1 ): lowercase_ : Any = 1_0_0_0_0_2 * base_num if is_9_pandigital(_A ): return candidate for base_num in range(3_3_3 , 9_9 , -1 ): lowercase_ : Optional[int] = 1_0_0_2_0_0_3 * base_num if is_9_pandigital(_A ): return candidate return None if __name__ == "__main__": print(F"""{solution() = }""")
33
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _A = logging.get_logger(__name__) _A = { '''google/vit-base-patch16-224''': '''https://huggingface.co/vit-base-patch16-224/resolve/main/config.json''', # See all ViT models at https://huggingface.co/models?filter=vit } class A ( __UpperCAmelCase ): __snake_case = 'vit' def __init__( self, UpperCamelCase__=768, UpperCamelCase__=12, UpperCamelCase__=12, UpperCamelCase__=3072, UpperCamelCase__="gelu", UpperCamelCase__=0.0, UpperCamelCase__=0.0, UpperCamelCase__=0.02, UpperCamelCase__=1E-12, UpperCamelCase__=224, UpperCamelCase__=16, UpperCamelCase__=3, UpperCamelCase__=True, UpperCamelCase__=16, **UpperCamelCase__, ): """simple docstring""" super().__init__(**UpperCamelCase__ ) lowerCAmelCase_ = hidden_size lowerCAmelCase_ = num_hidden_layers lowerCAmelCase_ = num_attention_heads lowerCAmelCase_ = intermediate_size lowerCAmelCase_ = hidden_act lowerCAmelCase_ = hidden_dropout_prob lowerCAmelCase_ = attention_probs_dropout_prob lowerCAmelCase_ = initializer_range lowerCAmelCase_ = layer_norm_eps lowerCAmelCase_ = image_size lowerCAmelCase_ = patch_size lowerCAmelCase_ = num_channels lowerCAmelCase_ = qkv_bias lowerCAmelCase_ = encoder_stride class A ( __UpperCAmelCase ): __snake_case = version.parse('1.11' ) @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return 1E-4
278
0
from ...configuration_utils import PretrainedConfig from ...utils import logging a =logging.get_logger(__name__) a ={ """google/pegasus-large""": """https://huggingface.co/google/pegasus-large/resolve/main/config.json""", # See all PEGASUS models at https://huggingface.co/models?filter=pegasus } class A_ ( __UpperCAmelCase ): _UpperCAmelCase : Dict = '''pegasus''' _UpperCAmelCase : str = ['''past_key_values'''] _UpperCAmelCase : Optional[int] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self : str ,SCREAMING_SNAKE_CASE__ : int=5_0_2_6_5 ,SCREAMING_SNAKE_CASE__ : Optional[int]=1_0_2_4 ,SCREAMING_SNAKE_CASE__ : str=1_2 ,SCREAMING_SNAKE_CASE__ : List[Any]=4_0_9_6 ,SCREAMING_SNAKE_CASE__ : Optional[int]=1_6 ,SCREAMING_SNAKE_CASE__ : int=1_2 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=4_0_9_6 ,SCREAMING_SNAKE_CASE__ : Dict=1_6 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=0.0 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.0 ,SCREAMING_SNAKE_CASE__ : Dict=True ,SCREAMING_SNAKE_CASE__ : Dict=True ,SCREAMING_SNAKE_CASE__ : Optional[Any]="gelu" ,SCREAMING_SNAKE_CASE__ : str=1_0_2_4 ,SCREAMING_SNAKE_CASE__ : Dict=0.1 ,SCREAMING_SNAKE_CASE__ : Tuple=0.0 ,SCREAMING_SNAKE_CASE__ : str=0.0 ,SCREAMING_SNAKE_CASE__ : int=0.02 ,SCREAMING_SNAKE_CASE__ : Any=0 ,SCREAMING_SNAKE_CASE__ : int=False ,SCREAMING_SNAKE_CASE__ : Optional[int]=0 ,SCREAMING_SNAKE_CASE__ : str=1 ,SCREAMING_SNAKE_CASE__ : Any=1 ,**SCREAMING_SNAKE_CASE__ : Tuple ,): __lowerCamelCase : List[Any] = vocab_size __lowerCamelCase : Optional[int] = max_position_embeddings __lowerCamelCase : Union[str, Any] = d_model __lowerCamelCase : Any = encoder_ffn_dim __lowerCamelCase : Optional[int] = encoder_layers __lowerCamelCase : List[str] = encoder_attention_heads __lowerCamelCase : Union[str, Any] = decoder_ffn_dim __lowerCamelCase : Tuple = decoder_layers __lowerCamelCase : Union[str, Any] = decoder_attention_heads __lowerCamelCase : List[str] = dropout __lowerCamelCase : str = attention_dropout __lowerCamelCase : Any = activation_dropout __lowerCamelCase : int = activation_function __lowerCamelCase : Union[str, Any] = init_std __lowerCamelCase : List[Any] = encoder_layerdrop __lowerCamelCase : Any = decoder_layerdrop __lowerCamelCase : Union[str, Any] = use_cache __lowerCamelCase : Dict = encoder_layers __lowerCamelCase : Any = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=UpperCamelCase__ ,eos_token_id=UpperCamelCase__ ,is_encoder_decoder=UpperCamelCase__ ,decoder_start_token_id=UpperCamelCase__ ,forced_eos_token_id=UpperCamelCase__ ,**UpperCamelCase__ ,) @property def lowerCAmelCase ( self : List[str]): return self.encoder_attention_heads @property def lowerCAmelCase ( self : Optional[Any]): return self.d_model
73
import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def __UpperCamelCase ( _A , _A ): assert isinstance(_A , _A ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCAmelCase_ = JsonDatasetReader(_A , cache_dir=_A , keep_in_memory=_A ).read() _check_json_dataset(_A , _A ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase_ = features.copy() if features else default_expected_features lowerCAmelCase_ = ( Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase_ = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read() _check_json_dataset(_A , _A ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}, ] , ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''} lowerCAmelCase_ = features.copy() if features else default_expected_features lowerCAmelCase_ = ( Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase_ = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read() assert isinstance(_A , _A ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def __UpperCamelCase ( _A , _A ): # jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"} lowerCAmelCase_ = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''} lowerCAmelCase_ = features.copy() lowerCAmelCase_ = ( Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read() assert isinstance(_A , _A ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase_ = JsonDatasetReader(_A , cache_dir=_A , split=_A ).read() _check_json_dataset(_A , _A ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''' , [str, list] ) def __UpperCamelCase ( _A , _A , _A ): if issubclass(_A , _A ): lowerCAmelCase_ = jsonl_path elif issubclass(_A , _A ): lowerCAmelCase_ = [jsonl_path] lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase_ = JsonDatasetReader(_A , cache_dir=_A ).read() _check_json_dataset(_A , _A ) def __UpperCamelCase ( _A , _A , _A=("train",) ): assert isinstance(_A , _A ) for split in splits: lowerCAmelCase_ = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCAmelCase_ = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=_A , keep_in_memory=_A ).read() _check_json_datasetdict(_A , _A ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase_ = features.copy() if features else default_expected_features lowerCAmelCase_ = ( Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase_ = JsonDatasetReader({'''train''': jsonl_path} , features=_A , cache_dir=_A ).read() _check_json_datasetdict(_A , _A ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def __UpperCamelCase ( _A , _A , _A ): if split: lowerCAmelCase_ = {split: jsonl_path} else: lowerCAmelCase_ = '''train''' lowerCAmelCase_ = {'''train''': jsonl_path, '''test''': jsonl_path} lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase_ = JsonDatasetReader(_A , cache_dir=_A ).read() _check_json_datasetdict(_A , _A , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def __UpperCamelCase ( _A ): return json.load(_A ) def __UpperCamelCase ( _A ): return [json.loads(_A ) for line in buffer] class A : @pytest.mark.parametrize('''lines, load_json_function''', [(True, load_json_lines), (False, load_json)] ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ): """simple docstring""" with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, lines=UpperCamelCase__ ).write() buffer.seek(0 ) lowerCAmelCase_ = load_json_function(UpperCamelCase__ ) assert isinstance(UpperCamelCase__, UpperCamelCase__ ) assert isinstance(exported_content[0], UpperCamelCase__ ) assert len(UpperCamelCase__ ) == 10 @pytest.mark.parametrize( '''orient, container, keys, len_at''', [ ('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None), ('''split''', dict, {'''columns''', '''data'''}, '''data'''), ('''index''', dict, set('''0123456789''' ), None), ('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''), ('''values''', list, None, None), ('''table''', dict, {'''schema''', '''data'''}, '''data'''), ], ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ): """simple docstring""" with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, lines=UpperCamelCase__, orient=UpperCamelCase__ ).write() buffer.seek(0 ) lowerCAmelCase_ = load_json(UpperCamelCase__ ) assert isinstance(UpperCamelCase__, UpperCamelCase__ ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(UpperCamelCase__, '''keys''' ) and not hasattr(exported_content[0], '''keys''' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(UpperCamelCase__ ) == 10 @pytest.mark.parametrize('''lines, load_json_function''', [(True, load_json_lines), (False, load_json)] ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ): """simple docstring""" with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, lines=UpperCamelCase__, num_proc=2 ).write() buffer.seek(0 ) lowerCAmelCase_ = load_json_function(UpperCamelCase__ ) assert isinstance(UpperCamelCase__, UpperCamelCase__ ) assert isinstance(exported_content[0], UpperCamelCase__ ) assert len(UpperCamelCase__ ) == 10 @pytest.mark.parametrize( '''orient, container, keys, len_at''', [ ('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None), ('''split''', dict, {'''columns''', '''data'''}, '''data'''), ('''index''', dict, set('''0123456789''' ), None), ('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''), ('''values''', list, None, None), ('''table''', dict, {'''schema''', '''data'''}, '''data'''), ], ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ): """simple docstring""" with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, lines=UpperCamelCase__, orient=UpperCamelCase__, num_proc=2 ).write() buffer.seek(0 ) lowerCAmelCase_ = load_json(UpperCamelCase__ ) assert isinstance(UpperCamelCase__, UpperCamelCase__ ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(UpperCamelCase__, '''keys''' ) and not hasattr(exported_content[0], '''keys''' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(UpperCamelCase__ ) == 10 def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ): """simple docstring""" with pytest.raises(UpperCamelCase__ ): with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, num_proc=0 ) @pytest.mark.parametrize('''compression, extension''', [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ): """simple docstring""" lowerCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / f"test.json.{extension}" lowerCAmelCase_ = str(shared_datadir / f"test_file.json.{extension}" ) JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, compression=UpperCamelCase__ ).write() with fsspec.open(UpperCamelCase__, '''rb''', compression='''infer''' ) as f: lowerCAmelCase_ = f.read() with fsspec.open(UpperCamelCase__, '''rb''', compression='''infer''' ) as f: lowerCAmelCase_ = f.read() assert exported_content == original_content
278
0
import cmath import math def snake_case ( snake_case__ :str , snake_case__ :Tuple , snake_case__ :Optional[Any] , snake_case__ :Optional[Any]) -> Union[str, Any]: _A = math.radians(_A) _A = math.radians(_A) # Convert voltage and current to rectangular form _A = cmath.rect(_A , _A) _A = cmath.rect(_A , _A) # Calculate apparent power return voltage_rect * current_rect if __name__ == "__main__": import doctest doctest.testmod()
180
import importlib import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Union import torch from ..utils import BaseOutput _A = '''scheduler_config.json''' class A ( __UpperCAmelCase ): __snake_case = 1 __snake_case = 2 __snake_case = 3 __snake_case = 4 __snake_case = 5 __snake_case = 6 __snake_case = 7 __snake_case = 8 __snake_case = 9 __snake_case = 10 __snake_case = 11 __snake_case = 12 __snake_case = 13 __snake_case = 14 @dataclass class A ( __UpperCAmelCase ): __snake_case = 42 class A : __snake_case = SCHEDULER_CONFIG_NAME __snake_case = [] __snake_case = True @classmethod def SCREAMING_SNAKE_CASE__ ( cls, UpperCamelCase__ = None, UpperCamelCase__ = None, UpperCamelCase__=False, **UpperCamelCase__, ): """simple docstring""" lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = cls.load_config( pretrained_model_name_or_path=UpperCamelCase__, subfolder=UpperCamelCase__, return_unused_kwargs=UpperCamelCase__, return_commit_hash=UpperCamelCase__, **UpperCamelCase__, ) return cls.from_config(UpperCamelCase__, return_unused_kwargs=UpperCamelCase__, **UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = False, **UpperCamelCase__ ): """simple docstring""" self.save_config(save_directory=UpperCamelCase__, push_to_hub=UpperCamelCase__, **UpperCamelCase__ ) @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return self._get_compatibles() @classmethod def SCREAMING_SNAKE_CASE__ ( cls ): """simple docstring""" lowerCAmelCase_ = list(set([cls.__name__] + cls._compatibles ) ) lowerCAmelCase_ = importlib.import_module(__name__.split('''.''' )[0] ) lowerCAmelCase_ = [ getattr(UpperCamelCase__, UpperCamelCase__ ) for c in compatible_classes_str if hasattr(UpperCamelCase__, UpperCamelCase__ ) ] return compatible_classes
278
0
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): '''simple docstring''' return abs(_A ) if a == 0 else greatest_common_divisor(b % a , _A ) def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): '''simple docstring''' while y: # --> when y=0 then loop will terminate and return x as final GCD. lowercase , lowercase = y, x % y return abs(_A ) def UpperCamelCase ( ): '''simple docstring''' try: lowercase = input('''Enter two integers separated by comma (,): ''' ).split(''',''' ) lowercase = int(nums[0] ) lowercase = int(nums[1] ) print( f'greatest_common_divisor({num_a}, {num_a}) = ' f'{greatest_common_divisor(_A , _A )}' ) print(f'By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(_A , _A )}' ) except (IndexError, UnboundLocalError, ValueError): print('''Wrong input''' ) if __name__ == "__main__": main()
101
import unittest import numpy as np import torch from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class A ( unittest.TestCase ): @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCAmelCase_ = UNetaDModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=3, out_channels=3, down_block_types=('''DownBlock2D''', '''AttnDownBlock2D'''), up_block_types=('''AttnUpBlock2D''', '''UpBlock2D'''), ) return model def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.dummy_uncond_unet lowerCAmelCase_ = KarrasVeScheduler() lowerCAmelCase_ = KarrasVePipeline(unet=UpperCamelCase__, scheduler=UpperCamelCase__ ) pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowerCAmelCase_ = torch.manual_seed(0 ) lowerCAmelCase_ = pipe(num_inference_steps=2, generator=UpperCamelCase__, output_type='''numpy''' ).images lowerCAmelCase_ = torch.manual_seed(0 ) lowerCAmelCase_ = pipe(num_inference_steps=2, generator=UpperCamelCase__, output_type='''numpy''', return_dict=UpperCamelCase__ )[0] lowerCAmelCase_ = image[0, -3:, -3:, -1] lowerCAmelCase_ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCAmelCase_ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class A ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = '''google/ncsnpp-celebahq-256''' lowerCAmelCase_ = UNetaDModel.from_pretrained(UpperCamelCase__ ) lowerCAmelCase_ = KarrasVeScheduler() lowerCAmelCase_ = KarrasVePipeline(unet=UpperCamelCase__, scheduler=UpperCamelCase__ ) pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowerCAmelCase_ = torch.manual_seed(0 ) lowerCAmelCase_ = pipe(num_inference_steps=20, generator=UpperCamelCase__, output_type='''numpy''' ).images lowerCAmelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) lowerCAmelCase_ = np.array([0.578, 0.5_811, 0.5_924, 0.5_809, 0.587, 0.5_886, 0.5_861, 0.5_802, 0.586] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
278
0
SCREAMING_SNAKE_CASE_ = {str(digit): digit**5 for digit in range(1_0)} def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: '''simple docstring''' return sum(DIGITS_FIFTH_POWER[digit] for digit in str(_A ) ) def __lowercase ( ) -> Optional[int]: '''simple docstring''' return sum( number for number in range(10_00 , 1_00_00_00 ) if number == digits_fifth_powers_sum(_A ) ) if __name__ == "__main__": print(solution())
296
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
278
0
"""simple docstring""" import inspect import unittest from math import floor from transformers import CvtConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import CvtForImageClassification, CvtModel from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _UpperCAmelCase ( __UpperCAmelCase ): def a ( self : List[str] ): __UpperCAmelCase = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(UpperCamelCase__ , '''embed_dim''' ) ) self.parent.assertTrue(hasattr(UpperCamelCase__ , '''num_heads''' ) ) class _UpperCAmelCase : def __init__( self : Optional[int] , _lowercase : int , _lowercase : List[str]=13 , _lowercase : Tuple=64 , _lowercase : str=3 , _lowercase : int=[16, 48, 96] , _lowercase : List[Any]=[1, 3, 6] , _lowercase : Tuple=[1, 2, 10] , _lowercase : str=[7, 3, 3] , _lowercase : List[Any]=[4, 2, 2] , _lowercase : Any=[2, 1, 1] , _lowercase : Tuple=[2, 2, 2] , _lowercase : int=[False, False, True] , _lowercase : Union[str, Any]=[0.0, 0.0, 0.0] , _lowercase : Union[str, Any]=0.02 , _lowercase : Union[str, Any]=1E-12 , _lowercase : Optional[int]=True , _lowercase : Union[str, Any]=True , _lowercase : Tuple=2 , ): __UpperCAmelCase = parent __UpperCAmelCase = batch_size __UpperCAmelCase = image_size __UpperCAmelCase = patch_sizes __UpperCAmelCase = patch_stride __UpperCAmelCase = patch_padding __UpperCAmelCase = is_training __UpperCAmelCase = use_labels __UpperCAmelCase = num_labels __UpperCAmelCase = num_channels __UpperCAmelCase = embed_dim __UpperCAmelCase = num_heads __UpperCAmelCase = stride_kv __UpperCAmelCase = depth __UpperCAmelCase = cls_token __UpperCAmelCase = attention_drop_rate __UpperCAmelCase = initializer_range __UpperCAmelCase = layer_norm_eps def a ( self : Optional[int] ): __UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __UpperCAmelCase = None if self.use_labels: __UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels ) __UpperCAmelCase = self.get_config() return config, pixel_values, labels def a ( self : Union[str, Any] ): return CvtConfig( image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , ) def a ( self : Tuple , _lowercase : Optional[Any] , _lowercase : Optional[int] , _lowercase : str ): __UpperCAmelCase = CvtModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __UpperCAmelCase = model(UpperCamelCase__ ) __UpperCAmelCase = (self.image_size, self.image_size) __UpperCAmelCase , __UpperCAmelCase = image_size[0], image_size[1] for i in range(len(self.depth ) ): __UpperCAmelCase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) __UpperCAmelCase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) ) def a ( self : Union[str, Any] , _lowercase : Dict , _lowercase : Dict , _lowercase : Optional[int] ): __UpperCAmelCase = self.num_labels __UpperCAmelCase = CvtForImageClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __UpperCAmelCase = model(UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def a ( self : Dict ): __UpperCAmelCase = self.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = config_and_inputs __UpperCAmelCase = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class _UpperCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): a__ : List[str] = (CvtModel, CvtForImageClassification) if is_torch_available() else () a__ : Optional[Any] = ( {"feature-extraction": CvtModel, "image-classification": CvtForImageClassification} if is_torch_available() else {} ) a__ : Union[str, Any] = False a__ : Tuple = False a__ : Optional[int] = False a__ : List[str] = False a__ : Dict = False def a ( self : Any ): __UpperCAmelCase = CvtModelTester(self ) __UpperCAmelCase = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 ) def a ( self : Any ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def a ( self : Optional[int] ): return @unittest.skip(reason='''Cvt does not output attentions''' ) def a ( self : Union[str, Any] ): pass @unittest.skip(reason='''Cvt does not use inputs_embeds''' ) def a ( self : Optional[int] ): pass @unittest.skip(reason='''Cvt does not support input and output embeddings''' ) def a ( self : str ): pass def a ( self : List[str] ): __UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase = model_class(UpperCamelCase__ ) __UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCAmelCase = [*signature.parameters.keys()] __UpperCAmelCase = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , UpperCamelCase__ ) def a ( self : Optional[Any] ): __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def a ( self : int ): def check_hidden_states_output(_lowercase : Dict , _lowercase : Union[str, Any] , _lowercase : Optional[int] ): __UpperCAmelCase = model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() with torch.no_grad(): __UpperCAmelCase = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) __UpperCAmelCase = outputs.hidden_states __UpperCAmelCase = len(self.model_tester.depth ) self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) __UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase = True check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCAmelCase = True check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def a ( self : List[str] ): __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def a ( self : Any ): pass @slow def a ( self : str ): for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase = CvtModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) def lowercase__ ( ): __UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class _UpperCAmelCase ( unittest.TestCase ): @cached_property def a ( self : str ): return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def a ( self : Optional[Any] ): __UpperCAmelCase = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(UpperCamelCase__ ) __UpperCAmelCase = self.default_image_processor __UpperCAmelCase = prepare_img() __UpperCAmelCase = image_processor(images=UpperCamelCase__ , return_tensors='''pt''' ).to(UpperCamelCase__ ) # forward pass with torch.no_grad(): __UpperCAmelCase = model(**UpperCamelCase__ ) # verify the logits __UpperCAmelCase = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , UpperCamelCase__ ) __UpperCAmelCase = torch.tensor([0.9_285, 0.9_015, -0.3_150] ).to(UpperCamelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 ) )
332
import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def __UpperCamelCase ( _A = 3 ): if isinstance(_A , _A ): raise TypeError('''number of qubits must be a integer.''' ) if number_of_qubits <= 0: raise ValueError('''number of qubits must be > 0.''' ) if math.floor(_A ) != number_of_qubits: raise ValueError('''number of qubits must be exact integer.''' ) if number_of_qubits > 10: raise ValueError('''number of qubits too large to simulate(>10).''' ) lowerCAmelCase_ = QuantumRegister(_A , '''qr''' ) lowerCAmelCase_ = ClassicalRegister(_A , '''cr''' ) lowerCAmelCase_ = QuantumCircuit(_A , _A ) lowerCAmelCase_ = number_of_qubits for i in range(_A ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(_A ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , _A , _A ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(_A , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(_A , _A ) # simulate with 10000 shots lowerCAmelCase_ = Aer.get_backend('''qasm_simulator''' ) lowerCAmelCase_ = execute(_A , _A , shots=10000 ) return job.result().get_counts(_A ) if __name__ == "__main__": print( f"Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}" )
278
0
"""simple docstring""" def UpperCamelCase_ ( lowerCAmelCase__ : List[str] ) -> Tuple: """simple docstring""" lowerCAmelCase_ : str = [int(_A ) for i in ip_va_address.split('.' ) if i.isdigit()] return len(_A ) == 4 and all(0 <= int(_A ) <= 254 for octet in octets ) if __name__ == "__main__": lowercase__ : Optional[int] = input().strip() lowercase__ : Any = """valid""" if is_ip_va_address_valid(ip) else """invalid""" print(f'{ip} is a {valid_or_invalid} IP v4 address.')
224
from functools import lru_cache @lru_cache def __UpperCamelCase ( _A ): if num < 0: raise ValueError('''Number should not be negative.''' ) return 1 if num in (0, 1) else num * factorial(num - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
278
0
'''simple docstring''' from math import loga def SCREAMING_SNAKE_CASE( __lowercase ) -> Union[str, Any]: if a < 0: raise ValueError('''Input value must be a positive integer''' ) elif isinstance(_A , _A ): raise TypeError('''Input value must be a \'int\' type''' ) return 0 if (a == 0) else int(loga(a & -a ) ) if __name__ == "__main__": import doctest doctest.testmod()
319
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation def __UpperCamelCase ( _A ): lowerCAmelCase_ = 384 lowerCAmelCase_ = 7 if "tiny" in model_name: lowerCAmelCase_ = 96 lowerCAmelCase_ = (2, 2, 6, 2) lowerCAmelCase_ = (3, 6, 12, 24) elif "small" in model_name: lowerCAmelCase_ = 96 lowerCAmelCase_ = (2, 2, 18, 2) lowerCAmelCase_ = (3, 6, 12, 24) elif "base" in model_name: lowerCAmelCase_ = 128 lowerCAmelCase_ = (2, 2, 18, 2) lowerCAmelCase_ = (4, 8, 16, 32) lowerCAmelCase_ = 12 lowerCAmelCase_ = 512 elif "large" in model_name: lowerCAmelCase_ = 192 lowerCAmelCase_ = (2, 2, 18, 2) lowerCAmelCase_ = (6, 12, 24, 48) lowerCAmelCase_ = 12 lowerCAmelCase_ = 768 # set label information lowerCAmelCase_ = 150 lowerCAmelCase_ = '''huggingface/label-files''' lowerCAmelCase_ = '''ade20k-id2label.json''' lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) ) lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()} lowerCAmelCase_ = {v: k for k, v in idalabel.items()} lowerCAmelCase_ = SwinConfig( embed_dim=_A , depths=_A , num_heads=_A , window_size=_A , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , ) lowerCAmelCase_ = UperNetConfig( backbone_config=_A , auxiliary_in_channels=_A , num_labels=_A , idalabel=_A , labelaid=_A , ) return config def __UpperCamelCase ( _A ): lowerCAmelCase_ = [] # fmt: off # stem rename_keys.append(('''backbone.patch_embed.projection.weight''', '''backbone.embeddings.patch_embeddings.projection.weight''') ) rename_keys.append(('''backbone.patch_embed.projection.bias''', '''backbone.embeddings.patch_embeddings.projection.bias''') ) rename_keys.append(('''backbone.patch_embed.norm.weight''', '''backbone.embeddings.norm.weight''') ) rename_keys.append(('''backbone.patch_embed.norm.bias''', '''backbone.embeddings.norm.bias''') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm1.weight", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm1.bias", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight", f"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias", f"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm2.weight", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm2.bias", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight", f"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias", f"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight", f"backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight") ) rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias", f"backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias") ) if i < 3: rename_keys.append((f"backbone.stages.{i}.downsample.reduction.weight", f"backbone.encoder.layers.{i}.downsample.reduction.weight") ) rename_keys.append((f"backbone.stages.{i}.downsample.norm.weight", f"backbone.encoder.layers.{i}.downsample.norm.weight") ) rename_keys.append((f"backbone.stages.{i}.downsample.norm.bias", f"backbone.encoder.layers.{i}.downsample.norm.bias") ) rename_keys.append((f"backbone.norm{i}.weight", f"backbone.hidden_states_norms.stage{i+1}.weight") ) rename_keys.append((f"backbone.norm{i}.bias", f"backbone.hidden_states_norms.stage{i+1}.bias") ) # decode head rename_keys.extend( [ ('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''), ('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''), ('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''), ('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''), ] ) # fmt: on return rename_keys def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = dct.pop(_A ) lowerCAmelCase_ = val def __UpperCamelCase ( _A , _A ): lowerCAmelCase_ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): lowerCAmelCase_ = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) lowerCAmelCase_ = state_dict.pop(f"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight" ) lowerCAmelCase_ = state_dict.pop(f"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict lowerCAmelCase_ = in_proj_weight[:dim, :] lowerCAmelCase_ = in_proj_bias[: dim] lowerCAmelCase_ = in_proj_weight[ dim : dim * 2, : ] lowerCAmelCase_ = in_proj_bias[ dim : dim * 2 ] lowerCAmelCase_ = in_proj_weight[ -dim :, : ] lowerCAmelCase_ = in_proj_bias[-dim :] # fmt: on def __UpperCamelCase ( _A ): lowerCAmelCase_ , lowerCAmelCase_ = x.shape lowerCAmelCase_ = x.reshape(_A , 4 , in_channel // 4 ) lowerCAmelCase_ = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(_A , _A ) return x def __UpperCamelCase ( _A ): lowerCAmelCase_ , lowerCAmelCase_ = x.shape lowerCAmelCase_ = x.reshape(_A , in_channel // 4 , 4 ) lowerCAmelCase_ = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(_A , _A ) return x def __UpperCamelCase ( _A ): lowerCAmelCase_ = x.shape[0] lowerCAmelCase_ = x.reshape(4 , in_channel // 4 ) lowerCAmelCase_ = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(_A ) return x def __UpperCamelCase ( _A ): lowerCAmelCase_ = x.shape[0] lowerCAmelCase_ = x.reshape(in_channel // 4 , 4 ) lowerCAmelCase_ = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(_A ) return x def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = { '''upernet-swin-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth''', '''upernet-swin-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth''', '''upernet-swin-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth''', '''upernet-swin-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth''', } lowerCAmelCase_ = model_name_to_url[model_name] lowerCAmelCase_ = torch.hub.load_state_dict_from_url(_A , map_location='''cpu''' , file_name=_A )[ '''state_dict''' ] for name, param in state_dict.items(): print(_A , param.shape ) lowerCAmelCase_ = get_upernet_config(_A ) lowerCAmelCase_ = UperNetForSemanticSegmentation(_A ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): lowerCAmelCase_ = state_dict.pop(_A ) if "bn" in key: lowerCAmelCase_ = key.replace('''bn''' , '''batch_norm''' ) lowerCAmelCase_ = val # rename keys lowerCAmelCase_ = create_rename_keys(_A ) for src, dest in rename_keys: rename_key(_A , _A , _A ) read_in_q_k_v(_A , config.backbone_config ) # fix downsample parameters for key, value in state_dict.items(): if "downsample" in key: if "reduction" in key: lowerCAmelCase_ = reverse_correct_unfold_reduction_order(_A ) if "norm" in key: lowerCAmelCase_ = reverse_correct_unfold_norm_order(_A ) model.load_state_dict(_A ) # verify on image lowerCAmelCase_ = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg''' lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw ).convert('''RGB''' ) lowerCAmelCase_ = SegformerImageProcessor() lowerCAmelCase_ = processor(_A , return_tensors='''pt''' ).pixel_values with torch.no_grad(): lowerCAmelCase_ = model(_A ) lowerCAmelCase_ = outputs.logits print(logits.shape ) print('''First values of logits:''' , logits[0, 0, :3, :3] ) # assert values if model_name == "upernet-swin-tiny": lowerCAmelCase_ = torch.tensor( [[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] ) elif model_name == "upernet-swin-small": lowerCAmelCase_ = torch.tensor( [[-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.0_9_0_8, -7.0_9_0_8, -6.8_5_3_4]] ) elif model_name == "upernet-swin-base": lowerCAmelCase_ = torch.tensor( [[-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.4_7_6_3, -6.4_7_6_3, -6.3_2_5_4]] ) elif model_name == "upernet-swin-large": lowerCAmelCase_ = torch.tensor( [[-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.4_0_4_4, -7.4_0_4_4, -7.2_5_8_6]] ) print('''Logits:''' , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , _A , atol=1E-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(f"Saving model {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_A ) print(f"Saving processor to {pytorch_dump_folder_path}" ) processor.save_pretrained(_A ) if push_to_hub: print(f"Pushing model and processor for {model_name} to hub" ) model.push_to_hub(f"openmmlab/{model_name}" ) processor.push_to_hub(f"openmmlab/{model_name}" ) if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''upernet-swin-tiny''', type=str, choices=[f"upernet-swin-{size}" for size in ['''tiny''', '''small''', '''base''', '''large''']], help='''Name of the Swin + UperNet model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) _A = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
278
0
"""simple docstring""" # This script creates a super tiny model that is useful inside tests, when we just want to test that # the machinery works, without needing to the check the quality of the outcomes. # # This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny - # all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and # emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files. # The latter is done by `fsmt-make-super-tiny-model.py`. # # It will be used then as "stas/tiny-wmt19-en-ru" from pathlib import Path import json import tempfile from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES __A = "tiny-wmt19-en-ru" # Build # borrowed from a test __A = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "lo", "low", "er</w>", "low</w>", "lowest</w>", "newer</w>", "wider</w>", "<unk>", ] __A = dict(zip(vocab, range(len(vocab)))) __A = ["l o 123", "lo w 1456", "e r</w> 1789", ""] with tempfile.TemporaryDirectory() as tmpdirname: __A = Path(tmpdirname) __A = build_dir / VOCAB_FILES_NAMES["src_vocab_file"] __A = build_dir / VOCAB_FILES_NAMES["tgt_vocab_file"] __A = build_dir / VOCAB_FILES_NAMES["merges_file"] with open(src_vocab_file, "w") as fp: fp.write(json.dumps(vocab_tokens)) with open(tgt_vocab_file, "w") as fp: fp.write(json.dumps(vocab_tokens)) with open(merges_file, "w") as fp: fp.write("\n".join(merges)) __A = FSMTTokenizer( langs=["en", "ru"], src_vocab_size=len(vocab), tgt_vocab_size=len(vocab), src_vocab_file=src_vocab_file, tgt_vocab_file=tgt_vocab_file, merges_file=merges_file, ) __A = FSMTConfig( langs=["ru", "en"], src_vocab_size=1_0_0_0, tgt_vocab_size=1_0_0_0, d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) __A = FSMTForConditionalGeneration(config) print(f'''num of params {tiny_model.num_parameters()}''') # Test __A = tokenizer(["Making tiny model"], return_tensors="pt") __A = tiny_model(**batch) print("test output:", len(outputs.logits[0])) # Save tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(f'''Generated {mname_tiny}''') # Upload # transformers-cli upload tiny-wmt19-en-ru
177
import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def __UpperCamelCase ( _A , _A ): lowerCAmelCase_ = args.log_outputs lowerCAmelCase_ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] ) # load metric lowerCAmelCase_ = load_metric('''wer''' ) lowerCAmelCase_ = load_metric('''cer''' ) # compute metrics lowerCAmelCase_ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] ) lowerCAmelCase_ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] ) # print & log results lowerCAmelCase_ = f"WER: {wer_result}\nCER: {cer_result}" print(_A ) with open(f"{dataset_id}_eval_results.txt" , '''w''' ) as f: f.write(_A ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: lowerCAmelCase_ = f"log_{dataset_id}_predictions.txt" lowerCAmelCase_ = f"log_{dataset_id}_targets.txt" with open(_A , '''w''' ) as p, open(_A , '''w''' ) as t: # mapping function to write output def write_to_file(_A , _A ): p.write(f"{i}" + '''\n''' ) p.write(batch['''prediction'''] + '''\n''' ) t.write(f"{i}" + '''\n''' ) t.write(batch['''target'''] + '''\n''' ) result.map(_A , with_indices=_A ) def __UpperCamelCase ( _A ): lowerCAmelCase_ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training lowerCAmelCase_ = re.sub(_A , '''''' , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! lowerCAmelCase_ = ['''\n\n''', '''\n''', ''' ''', ''' '''] for t in token_sequences_to_ignore: lowerCAmelCase_ = ''' '''.join(text.split(_A ) ) return text def __UpperCamelCase ( _A ): # load dataset lowerCAmelCase_ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=_A ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor lowerCAmelCase_ = AutoFeatureExtractor.from_pretrained(args.model_id ) lowerCAmelCase_ = feature_extractor.sampling_rate # resample audio lowerCAmelCase_ = dataset.cast_column('''audio''' , Audio(sampling_rate=_A ) ) # load eval pipeline if args.device is None: lowerCAmelCase_ = 0 if torch.cuda.is_available() else -1 lowerCAmelCase_ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(_A ): lowerCAmelCase_ = asr( batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) lowerCAmelCase_ = prediction['''text'''] lowerCAmelCase_ = normalize_text(batch['''sentence'''] ) return batch # run inference on all examples lowerCAmelCase_ = dataset.map(_A , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(_A , _A ) if __name__ == "__main__": _A = argparse.ArgumentParser() parser.add_argument( '''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers''' ) parser.add_argument( '''--dataset''', type=str, required=True, help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''', ) parser.add_argument( '''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice''' ) parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''') parser.add_argument( '''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.''' ) parser.add_argument( '''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.''' ) parser.add_argument( '''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.''' ) parser.add_argument( '''--device''', type=int, default=None, help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''', ) _A = parser.parse_args() main(args)
278
0
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary # Register SEW's fairseq modules from sew_asapp import tasks # noqa: F401 from transformers import ( SEWConfig, SEWForCTC, SEWModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() __lowerCamelCase : Dict = logging.get_logger(__name__) __lowerCamelCase : Optional[int] = { '''post_extract_proj''': '''feature_projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.upsample.0''': '''encoder.upsample.projection''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''layer_norm''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict , __UpperCamelCase : str , __UpperCamelCase : str ) -> str: """simple docstring""" for attribute in key.split(""".""" ): SCREAMING_SNAKE_CASE__ = getattr(_A , _A ) if weight_type is not None: SCREAMING_SNAKE_CASE__ = getattr(_A , _A ).shape else: SCREAMING_SNAKE_CASE__ = hf_pointer.shape assert hf_shape == value.shape, ( f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": SCREAMING_SNAKE_CASE__ = value elif weight_type == "weight_g": SCREAMING_SNAKE_CASE__ = value elif weight_type == "weight_v": SCREAMING_SNAKE_CASE__ = value elif weight_type == "bias": SCREAMING_SNAKE_CASE__ = value else: SCREAMING_SNAKE_CASE__ = value logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ = [] SCREAMING_SNAKE_CASE__ = fairseq_model.state_dict() SCREAMING_SNAKE_CASE__ = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): SCREAMING_SNAKE_CASE__ = False if "conv_layers" in name: load_conv_layer( _A , _A , _A , _A , hf_model.config.feat_extract_norm == """group""" , ) SCREAMING_SNAKE_CASE__ = True else: for key, mapped_key in MAPPING.items(): SCREAMING_SNAKE_CASE__ = """sew.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: SCREAMING_SNAKE_CASE__ = True if "*" in mapped_key: SCREAMING_SNAKE_CASE__ = name.split(_A )[0].split(""".""" )[-2] SCREAMING_SNAKE_CASE__ = mapped_key.replace("""*""" , _A ) if "weight_g" in name: SCREAMING_SNAKE_CASE__ = """weight_g""" elif "weight_v" in name: SCREAMING_SNAKE_CASE__ = """weight_v""" elif "weight" in name: SCREAMING_SNAKE_CASE__ = """weight""" elif "bias" in name: SCREAMING_SNAKE_CASE__ = """bias""" else: SCREAMING_SNAKE_CASE__ = None set_recursively(_A , _A , _A , _A , _A ) continue if not is_used: unused_weights.append(_A ) logger.warning(f"""Unused weights: {unused_weights}""" ) def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[Any] ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ = full_name.split("""conv_layers.""" )[-1] SCREAMING_SNAKE_CASE__ = name.split(""".""" ) SCREAMING_SNAKE_CASE__ = int(items[0] ) SCREAMING_SNAKE_CASE__ = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) SCREAMING_SNAKE_CASE__ = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) SCREAMING_SNAKE_CASE__ = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) SCREAMING_SNAKE_CASE__ = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) SCREAMING_SNAKE_CASE__ = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_A ) def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : Any ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ = SEWConfig() if is_finetuned: SCREAMING_SNAKE_CASE__ = model.wav_encoder.wav_model.cfg else: SCREAMING_SNAKE_CASE__ = model.cfg SCREAMING_SNAKE_CASE__ = fs_config.conv_bias SCREAMING_SNAKE_CASE__ = eval(fs_config.conv_feature_layers ) SCREAMING_SNAKE_CASE__ = [x[0] for x in conv_layers] SCREAMING_SNAKE_CASE__ = [x[1] for x in conv_layers] SCREAMING_SNAKE_CASE__ = [x[2] for x in conv_layers] SCREAMING_SNAKE_CASE__ = """gelu""" SCREAMING_SNAKE_CASE__ = """layer""" if fs_config.extractor_mode == """layer_norm""" else """group""" SCREAMING_SNAKE_CASE__ = 0.0 SCREAMING_SNAKE_CASE__ = fs_config.activation_fn.name SCREAMING_SNAKE_CASE__ = fs_config.encoder_embed_dim SCREAMING_SNAKE_CASE__ = 0.02 SCREAMING_SNAKE_CASE__ = fs_config.encoder_ffn_embed_dim SCREAMING_SNAKE_CASE__ = 1E-5 SCREAMING_SNAKE_CASE__ = fs_config.encoder_layerdrop SCREAMING_SNAKE_CASE__ = fs_config.encoder_attention_heads SCREAMING_SNAKE_CASE__ = fs_config.conv_pos_groups SCREAMING_SNAKE_CASE__ = fs_config.conv_pos SCREAMING_SNAKE_CASE__ = len(_A ) SCREAMING_SNAKE_CASE__ = fs_config.encoder_layers SCREAMING_SNAKE_CASE__ = fs_config.squeeze_factor # take care of any params that are overridden by the Wav2VecCtc model if is_finetuned: SCREAMING_SNAKE_CASE__ = model.cfg SCREAMING_SNAKE_CASE__ = fs_config.final_dropout SCREAMING_SNAKE_CASE__ = fs_config.layerdrop SCREAMING_SNAKE_CASE__ = fs_config.activation_dropout SCREAMING_SNAKE_CASE__ = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0 SCREAMING_SNAKE_CASE__ = fs_config.attention_dropout SCREAMING_SNAKE_CASE__ = fs_config.dropout_input SCREAMING_SNAKE_CASE__ = fs_config.dropout SCREAMING_SNAKE_CASE__ = fs_config.mask_channel_length SCREAMING_SNAKE_CASE__ = fs_config.mask_channel_prob SCREAMING_SNAKE_CASE__ = fs_config.mask_length SCREAMING_SNAKE_CASE__ = fs_config.mask_prob SCREAMING_SNAKE_CASE__ = """Wav2Vec2FeatureExtractor""" SCREAMING_SNAKE_CASE__ = """Wav2Vec2CTCTokenizer""" return config @torch.no_grad() def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : int , __UpperCamelCase : List[str]=None , __UpperCamelCase : Tuple=None , __UpperCamelCase : Optional[Any]=True ) -> Dict: """simple docstring""" if is_finetuned: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) if config_path is not None: SCREAMING_SNAKE_CASE__ = SEWConfig.from_pretrained(_A ) else: SCREAMING_SNAKE_CASE__ = convert_config(model[0] , _A ) SCREAMING_SNAKE_CASE__ = model[0].eval() SCREAMING_SNAKE_CASE__ = True if config.feat_extract_norm == """layer""" else False SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=_A , return_attention_mask=_A , ) if is_finetuned: if dict_path: SCREAMING_SNAKE_CASE__ = Dictionary.load(_A ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq SCREAMING_SNAKE_CASE__ = target_dict.pad_index SCREAMING_SNAKE_CASE__ = target_dict.bos_index SCREAMING_SNAKE_CASE__ = target_dict.pad_index SCREAMING_SNAKE_CASE__ = target_dict.bos_index SCREAMING_SNAKE_CASE__ = target_dict.eos_index SCREAMING_SNAKE_CASE__ = len(target_dict.symbols ) SCREAMING_SNAKE_CASE__ = os.path.join(_A , """vocab.json""" ) if not os.path.isdir(_A ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_A ) ) return os.makedirs(_A , exist_ok=_A ) with open(_A , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(target_dict.indices , _A ) SCREAMING_SNAKE_CASE__ = WavaVecaCTCTokenizer( _A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=_A , ) SCREAMING_SNAKE_CASE__ = WavaVecaProcessor(feature_extractor=_A , tokenizer=_A ) processor.save_pretrained(_A ) SCREAMING_SNAKE_CASE__ = SEWForCTC(_A ) else: SCREAMING_SNAKE_CASE__ = SEWModel(_A ) feature_extractor.save_pretrained(_A ) recursively_load_weights(_A , _A , _A ) hf_model.save_pretrained(_A ) if __name__ == "__main__": __lowerCamelCase : Tuple = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--is_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) __lowerCamelCase : Any = parser.parse_args() convert_sew_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned )
219
from typing import Dict, List, Optional from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _A = logging.get_logger(__name__) _A = { '''nielsr/canine-s''': 2_048, } # Unicode defines 1,114,112 total “codepoints” _A = 1_114_112 # Below: Constants defining canonical codepoints for special, pseudo-characters. # Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py _A = 0 _A = 0xe0_00 _A = 0xe0_01 _A = 0xe0_02 _A = 0xe0_03 _A = 0xe0_04 # Maps special codepoints to human-readable names. _A = { # Special symbols are represented using codepoints values that are valid, # but designated as "Private Use", meaning that they will never be assigned # characters by the Unicode Consortium, and are thus safe for use here. # # NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly # excluded and should fail with a hard error. CLS: "[CLS]", SEP: "[SEP]", BOS: "[BOS]", MASK: "[MASK]", PAD: "[PAD]", RESERVED: "[RESERVED]", } # Maps special codepoint human-readable names to their codepoint values. _A = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()} class A ( __UpperCAmelCase ): __snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self, UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=False, UpperCamelCase__=2048, **UpperCamelCase__, ): """simple docstring""" lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else bos_token lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else eos_token lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else sep_token lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else cls_token lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else mask_token super().__init__( bos_token=UpperCamelCase__, eos_token=UpperCamelCase__, sep_token=UpperCamelCase__, cls_token=UpperCamelCase__, pad_token=UpperCamelCase__, mask_token=UpperCamelCase__, add_prefix_space=UpperCamelCase__, model_max_length=UpperCamelCase__, **UpperCamelCase__, ) # Creates a mapping for looking up the IDs of special symbols. lowerCAmelCase_ = {} for codepoint, name in SPECIAL_CODEPOINTS.items(): lowerCAmelCase_ = codepoint # Creates a mapping for looking up the string forms of special symbol IDs. lowerCAmelCase_ = { codepoint: name for name, codepoint in self._special_codepoints.items() } lowerCAmelCase_ = UNICODE_VOCAB_SIZE lowerCAmelCase_ = len(self._special_codepoints ) @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return self._unicode_vocab_size def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ): """simple docstring""" return list(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ): """simple docstring""" try: return ord(UpperCamelCase__ ) except TypeError: raise ValueError(f"invalid token: '{token}'" ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ): """simple docstring""" try: if index in SPECIAL_CODEPOINTS: return SPECIAL_CODEPOINTS[index] return chr(UpperCamelCase__ ) except TypeError: raise ValueError(f"invalid id: {index}" ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ): """simple docstring""" return "".join(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ): """simple docstring""" lowerCAmelCase_ = [self.sep_token_id] lowerCAmelCase_ = [self.cls_token_id] lowerCAmelCase_ = cls + token_ids_a + sep if token_ids_a is not None: result += token_ids_a + sep return result def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None, UpperCamelCase__ = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase__, token_ids_a=UpperCamelCase__, already_has_special_tokens=UpperCamelCase__ ) lowerCAmelCase_ = [1] + ([0] * len(UpperCamelCase__ )) + [1] if token_ids_a is not None: result += ([0] * len(UpperCamelCase__ )) + [1] return result def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ): """simple docstring""" lowerCAmelCase_ = [self.sep_token_id] lowerCAmelCase_ = [self.cls_token_id] lowerCAmelCase_ = len(cls + token_ids_a + sep ) * [0] if token_ids_a is not None: result += len(token_ids_a + sep ) * [1] return result def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ): """simple docstring""" return ()
278
0
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class UpperCAmelCase_ ( unittest.TestCase ): def __UpperCAmelCase ( self : Any ) -> Any: lowerCAmelCase = tempfile.mkdtemp() # fmt: off lowerCAmelCase = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest'] # fmt: on lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) lowerCAmelCase = { 'do_resize': True, 'size': {'height': 1_8, 'width': 1_8}, 'do_normalize': True, 'image_mean': [0.5, 0.5, 0.5], 'image_std': [0.5, 0.5, 0.5], } lowerCAmelCase = os.path.join(self.tmpdirname , UpperCamelCase__ ) with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp: json.dump(UpperCamelCase__ , UpperCamelCase__ ) def __UpperCAmelCase ( self : Optional[int] , **UpperCAmelCase__ : List[Any] ) -> Dict: return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def __UpperCAmelCase ( self : Tuple , **UpperCAmelCase__ : int ) -> List[str]: return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def __UpperCAmelCase ( self : str ) -> Any: shutil.rmtree(self.tmpdirname ) def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]: lowerCAmelCase = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )] lowerCAmelCase = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def __UpperCAmelCase ( self : Optional[Any] ) -> Tuple: lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = self.get_image_processor() lowerCAmelCase = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) processor.save_pretrained(self.tmpdirname ) lowerCAmelCase = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCamelCase__ ) def __UpperCAmelCase ( self : str ) -> Any: lowerCAmelCase = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowerCAmelCase = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) lowerCAmelCase = self.get_image_processor(do_normalize=UpperCamelCase__ , padding_value=1.0 ) lowerCAmelCase = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCamelCase__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCamelCase__ ) def __UpperCAmelCase ( self : Optional[int] ) -> int: lowerCAmelCase = self.get_image_processor() lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) lowerCAmelCase = self.prepare_image_inputs() lowerCAmelCase = image_processor(UpperCamelCase__ , return_tensors='np' ) lowerCAmelCase = processor(images=UpperCamelCase__ , return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __UpperCAmelCase ( self : Dict ) -> Tuple: lowerCAmelCase = self.get_image_processor() lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) lowerCAmelCase = 'lower newer' lowerCAmelCase = processor(text=UpperCamelCase__ ) lowerCAmelCase = tokenizer(UpperCamelCase__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __UpperCAmelCase ( self : Dict ) -> Dict: lowerCAmelCase = self.get_image_processor() lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) lowerCAmelCase = 'lower newer' lowerCAmelCase = self.prepare_image_inputs() lowerCAmelCase = processor(text=UpperCamelCase__ , images=UpperCamelCase__ ) self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] ) # test if it raises when no input is passed with self.assertRaises(UpperCamelCase__ ): processor() def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]: lowerCAmelCase = self.get_image_processor() lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowerCAmelCase = processor.batch_decode(UpperCamelCase__ ) lowerCAmelCase = tokenizer.batch_decode(UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) def __UpperCAmelCase ( self : Tuple ) -> List[Any]: lowerCAmelCase = self.get_image_processor() lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) lowerCAmelCase = 'lower newer' lowerCAmelCase = self.prepare_image_inputs() lowerCAmelCase = processor(text=UpperCamelCase__ , images=UpperCamelCase__ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
4
def __UpperCamelCase ( _A = 1000000 ): lowerCAmelCase_ = 1 lowerCAmelCase_ = 1 lowerCAmelCase_ = {1: 1} for inputa in range(2 , _A ): lowerCAmelCase_ = 0 lowerCAmelCase_ = inputa while True: if number in counters: counter += counters[number] break if number % 2 == 0: number //= 2 counter += 1 else: lowerCAmelCase_ = (3 * number) + 1 counter += 1 if inputa not in counters: lowerCAmelCase_ = counter if counter > pre_counter: lowerCAmelCase_ = inputa lowerCAmelCase_ = counter return largest_number if __name__ == "__main__": print(solution(int(input().strip())))
278
0
"""simple docstring""" import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, PerceiverTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): __A : Dict = '''pt''' elif is_tf_available(): __A : List[Any] = '''tf''' else: __A : Dict = '''jax''' class _UpperCAmelCase ( __UpperCAmelCase , unittest.TestCase ): SCREAMING_SNAKE_CASE_ : int = PerceiverTokenizer SCREAMING_SNAKE_CASE_ : Any = False def A ( self : Tuple ) -> Dict: super().setUp() lowercase_ : List[Any] = PerceiverTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def A ( self : int ) -> Dict: return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' ) def A ( self : List[Any] , **A : List[Any] ) -> Optional[Any]: return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def A ( self : int , A : Union[str, Any] , A : List[Any]=False , A : Optional[int]=20 , A : List[str]=5 ) -> Optional[Any]: lowercase_ : Optional[Any] = [] for i in range(len(UpperCamelCase__ ) ): try: lowercase_ : List[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCamelCase__ ) except UnicodeDecodeError: pass toks.append((i, tok) ) lowercase_ : Optional[int] = list(filter(lambda A : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , UpperCamelCase__ ) ) lowercase_ : str = list(filter(lambda A : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCamelCase__ ) , UpperCamelCase__ ) ) if max_length is not None and len(UpperCamelCase__ ) > max_length: lowercase_ : Optional[Any] = toks[:max_length] if min_length is not None and len(UpperCamelCase__ ) < min_length and len(UpperCamelCase__ ) > 0: while len(UpperCamelCase__ ) < min_length: lowercase_ : Optional[int] = toks + toks # toks_str = [t[1] for t in toks] lowercase_ : Optional[Any] = [t[0] for t in toks] # Ensure consistency lowercase_ : Union[str, Any] = tokenizer.decode(UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ ) if " " not in output_txt and len(UpperCamelCase__ ) > 1: lowercase_ : str = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCamelCase__ ) + ''' ''' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCamelCase__ ) ) if with_prefix_space: lowercase_ : Union[str, Any] = ''' ''' + output_txt lowercase_ : Any = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) return output_txt, output_ids def A ( self : Any ) -> Dict: lowercase_ : Dict = self.perceiver_tokenizer lowercase_ : Tuple = '''Unicode €.''' lowercase_ : List[Any] = tokenizer(UpperCamelCase__ ) lowercase_ : Union[str, Any] = [4, 91, 1_16, 1_11, 1_05, 1_17, 1_06, 1_07, 38, 2_32, 1_36, 1_78, 52, 5] self.assertEqual(encoded['''input_ids'''] , UpperCamelCase__ ) # decoding lowercase_ : List[str] = tokenizer.decode(UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , '''[CLS]Unicode €.[SEP]''' ) lowercase_ : List[str] = tokenizer('''e è é ê ë''' ) lowercase_ : Optional[int] = [4, 1_07, 38, 2_01, 1_74, 38, 2_01, 1_75, 38, 2_01, 1_76, 38, 2_01, 1_77, 5] self.assertEqual(encoded['''input_ids'''] , UpperCamelCase__ ) # decoding lowercase_ : List[Any] = tokenizer.decode(UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , '''[CLS]e è é ê ë[SEP]''' ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' ) def A ( self : Optional[int] ) -> List[Any]: lowercase_ : Optional[Any] = self.perceiver_tokenizer lowercase_ : Union[str, Any] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] # fmt: off lowercase_ : str = [4, 71, 38, 1_14, 1_17, 1_16, 1_09, 38, 1_18, 1_03, 1_20, 1_03, 1_09, 1_20, 1_03, 1_18, 1_10, 38, 1_08, 1_17, 1_20, 38, 1_21, 1_23, 1_15, 1_15, 1_03, 1_20, 1_11, 1_28, 1_03, 1_22, 1_11, 1_17, 1_16, 52, 5, 0] # fmt: on lowercase_ : str = tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) if FRAMEWORK != "jax": lowercase_ : str = list(batch.input_ids.numpy()[0] ) else: lowercase_ : Dict = list(batch.input_ids.tolist()[0] ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual((2, 38) , batch.input_ids.shape ) self.assertEqual((2, 38) , batch.attention_mask.shape ) def A ( self : Optional[Any] ) -> Optional[Any]: lowercase_ : Any = self.perceiver_tokenizer lowercase_ : int = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] lowercase_ : Any = tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ ) # check if input_ids are returned and no decoder_input_ids self.assertIn('''input_ids''' , UpperCamelCase__ ) self.assertIn('''attention_mask''' , UpperCamelCase__ ) self.assertNotIn('''decoder_input_ids''' , UpperCamelCase__ ) self.assertNotIn('''decoder_attention_mask''' , UpperCamelCase__ ) def A ( self : Optional[int] ) -> Optional[Any]: lowercase_ : Optional[int] = self.perceiver_tokenizer lowercase_ : Optional[int] = [ '''Summary of the text.''', '''Another summary.''', ] lowercase_ : str = tokenizer( text_target=UpperCamelCase__ , max_length=32 , padding='''max_length''' , truncation=UpperCamelCase__ , return_tensors=UpperCamelCase__ ) self.assertEqual(32 , targets['''input_ids'''].shape[1] ) def A ( self : Any ) -> Tuple: lowercase_ : Tuple = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test lowercase_ : Any = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc lowercase_ : List[str] = tempfile.mkdtemp() lowercase_ : Optional[Any] = ''' He is very happy, UNwant\u00E9d,running''' lowercase_ : Dict = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) tokenizer.save_pretrained(UpperCamelCase__ ) lowercase_ : int = tokenizer.__class__.from_pretrained(UpperCamelCase__ ) lowercase_ : Dict = after_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) shutil.rmtree(UpperCamelCase__ ) lowercase_ : str = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc lowercase_ : int = tempfile.mkdtemp() lowercase_ : Tuple = ''' He is very happy, UNwant\u00E9d,running''' tokenizer.add_tokens(['''bim''', '''bambam'''] ) lowercase_ : Optional[Any] = tokenizer.additional_special_tokens additional_special_tokens.append('''new_additional_special_token''' ) tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} ) lowercase_ : Dict = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) tokenizer.save_pretrained(UpperCamelCase__ ) lowercase_ : Union[str, Any] = tokenizer.__class__.from_pretrained(UpperCamelCase__ ) lowercase_ : Any = after_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) lowercase_ : List[str] = tokenizer.__class__.from_pretrained(UpperCamelCase__ , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(UpperCamelCase__ ) def A ( self : Any ) -> List[Any]: lowercase_ : List[str] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(UpperCamelCase__ ) with open(os.path.join(UpperCamelCase__ , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file: lowercase_ : List[Any] = json.load(UpperCamelCase__ ) with open(os.path.join(UpperCamelCase__ , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file: lowercase_ : Optional[int] = json.load(UpperCamelCase__ ) lowercase_ : int = [F'''<extra_id_{i}>''' for i in range(1_25 )] lowercase_ : Any = added_tokens_extra_ids + [ '''an_additional_special_token''' ] lowercase_ : List[str] = added_tokens_extra_ids + [ '''an_additional_special_token''' ] with open(os.path.join(UpperCamelCase__ , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile: json.dump(UpperCamelCase__ , UpperCamelCase__ ) with open(os.path.join(UpperCamelCase__ , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile: json.dump(UpperCamelCase__ , UpperCamelCase__ ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files lowercase_ : List[Any] = tokenizer_class.from_pretrained( UpperCamelCase__ , ) self.assertIn( '''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens ) self.assertEqual( ['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained lowercase_ : Optional[Any] = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=UpperCamelCase__ )] lowercase_ : Optional[int] = tokenizer_class.from_pretrained( UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , ) self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens ) self.assertEqual( ['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , ) def A ( self : int ) -> int: lowercase_ : Union[str, Any] = self.perceiver_tokenizer self.assertEqual(tokenizer.decode([1_78] ) , '''�''' ) def A ( self : List[Any] ) -> Tuple: pass def A ( self : Optional[int] ) -> Union[str, Any]: pass def A ( self : Optional[int] ) -> List[str]: pass def A ( self : Any ) -> Optional[Any]: pass def A ( self : Optional[Any] ) -> Any: lowercase_ : int = self.get_tokenizers(fast=UpperCamelCase__ , do_lower_case=UpperCamelCase__ ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): lowercase_ : List[Any] = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]'''] lowercase_ : Optional[Any] = tokenizer.convert_tokens_to_string(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
33
import unittest from transformers import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device if is_torch_available(): import torch from transformers import AutoModelForImageClassification if is_vision_available(): from transformers import AutoImageProcessor @require_torch @require_vision class A ( unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = AutoImageProcessor.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' ) lowerCAmelCase_ = AutoModelForImageClassification.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' ) model.to(UpperCamelCase__ ) from datasets import load_dataset lowerCAmelCase_ = load_dataset('''nielsr/rvlcdip-demo''' ) lowerCAmelCase_ = dataset['''train'''][0]['''image'''].convert('''RGB''' ) lowerCAmelCase_ = image_processor(UpperCamelCase__, return_tensors='''pt''' ).to(UpperCamelCase__ ) # forward pass with torch.no_grad(): lowerCAmelCase_ = model(**UpperCamelCase__ ) lowerCAmelCase_ = outputs.logits lowerCAmelCase_ = torch.Size((1, 16) ) self.assertEqual(logits.shape, UpperCamelCase__ ) lowerCAmelCase_ = torch.tensor( [-0.4_158, -0.4_092, -0.4_347], device=UpperCamelCase__, dtype=torch.float, ) self.assertTrue(torch.allclose(logits[0, :3], UpperCamelCase__, atol=1E-4 ) )
278
0
from ..utils import ( OptionalDependencyNotAvailable, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * # noqa F403 else: from .scheduling_consistency_models import CMStochasticIterativeScheduler from .scheduling_ddim import DDIMScheduler from .scheduling_ddim_inverse import DDIMInverseScheduler from .scheduling_ddim_parallel import DDIMParallelScheduler from .scheduling_ddpm import DDPMScheduler from .scheduling_ddpm_parallel import DDPMParallelScheduler from .scheduling_deis_multistep import DEISMultistepScheduler from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler from .scheduling_euler_discrete import EulerDiscreteScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler from .scheduling_karras_ve import KarrasVeScheduler from .scheduling_pndm import PNDMScheduler from .scheduling_repaint import RePaintScheduler from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler from .scheduling_unclip import UnCLIPScheduler from .scheduling_unipc_multistep import UniPCMultistepScheduler from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin from .scheduling_vq_diffusion import VQDiffusionScheduler try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_objects import * # noqa F403 else: from .scheduling_ddim_flax import FlaxDDIMScheduler from .scheduling_ddpm_flax import FlaxDDPMScheduler from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler from .scheduling_pndm_flax import FlaxPNDMScheduler from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler from .scheduling_utils_flax import ( FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left, ) try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .scheduling_lms_discrete import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
73
import argparse import os import re import torch from flax.traverse_util import flatten_dict from tax import checkpoints from transformers import ( AutoTokenizer, PixaStructConfig, PixaStructForConditionalGeneration, PixaStructImageProcessor, PixaStructProcessor, PixaStructTextConfig, PixaStructVisionConfig, ) def __UpperCamelCase ( _A ): lowerCAmelCase_ = checkpoints.load_tax_checkpoint(_A ) lowerCAmelCase_ = flatten_dict(_A ) return flax_params def __UpperCamelCase ( _A ): lowerCAmelCase_ = {} lowerCAmelCase_ = { '''token_embedder''': '''embeddings''', '''encoder_norm''': '''layernorm''', '''kernel''': '''weight''', '''.out''': '''.output''', '''scale''': '''weight''', '''embedders_0.pos_embedding''': '''row_embedder.weight''', '''embedders_1.pos_embedding''': '''column_embedder.weight''', } lowerCAmelCase_ = { '''query''': '''attention.query''', '''key''': '''attention.key''', '''value''': '''attention.value''', '''output.dense''': '''output''', '''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''', '''pre_self_attention_layer_norm''': '''self_attention.layer_norm''', '''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''', '''mlp.''': '''mlp.DenseReluDense.''', '''pre_mlp_layer_norm''': '''mlp.layer_norm''', '''self_attention.o''': '''self_attention.attention.o''', '''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''', '''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''', '''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''', '''decoder.logits_dense.weight''': '''decoder.lm_head.weight''', } for key in flax_dict.keys(): if "target" in key: # remove the first prefix from the key lowerCAmelCase_ = '''.'''.join(key[1:] ) # rename the key for old, new in CONVERSION_MAPPING.items(): lowerCAmelCase_ = new_key.replace(_A , _A ) if "decoder" in new_key: for old, new in DECODER_CONVERSION_MAPPING.items(): lowerCAmelCase_ = new_key.replace(_A , _A ) if "layers" in new_key and "decoder" not in new_key: # use regex to replace the layer number lowerCAmelCase_ = re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , _A ) lowerCAmelCase_ = new_key.replace('''encoder''' , '''encoder.encoder''' ) elif "layers" in new_key and "decoder" in new_key: # use regex to replace the layer number lowerCAmelCase_ = re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , _A ) lowerCAmelCase_ = flax_dict[key] lowerCAmelCase_ = {} # convert converted_dict into torch format for key in converted_dict.keys(): if ("embed_tokens" not in key) and ("embedder" not in key): lowerCAmelCase_ = torch.from_numpy(converted_dict[key].T ) else: lowerCAmelCase_ = torch.from_numpy(converted_dict[key] ) return converted_torch_dict def __UpperCamelCase ( _A , _A , _A=False , _A=False ): lowerCAmelCase_ = get_flax_param(_A ) if not use_large: lowerCAmelCase_ = PixaStructVisionConfig() lowerCAmelCase_ = PixaStructTextConfig() else: lowerCAmelCase_ = PixaStructVisionConfig( hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 ) lowerCAmelCase_ = PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 ) lowerCAmelCase_ = PixaStructConfig( vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=_A ) lowerCAmelCase_ = PixaStructForConditionalGeneration(_A ) lowerCAmelCase_ = rename_and_convert_flax_params(_A ) model.load_state_dict(_A ) lowerCAmelCase_ = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' ) lowerCAmelCase_ = PixaStructImageProcessor() lowerCAmelCase_ = PixaStructProcessor(image_processor=_A , tokenizer=_A ) if use_large: lowerCAmelCase_ = 4096 lowerCAmelCase_ = True # mkdir if needed os.makedirs(_A , exist_ok=_A ) model.save_pretrained(_A ) processor.save_pretrained(_A ) print('''Model saved in {}'''.format(_A ) ) if __name__ == "__main__": _A = argparse.ArgumentParser() parser.add_argument('''--t5x_checkpoint_path''', default=None, type=str, help='''Path to the original T5x checkpoint.''') parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--use_large''', action='''store_true''', help='''Use large model.''') parser.add_argument('''--is_vqa''', action='''store_true''', help='''Use large model.''') _A = parser.parse_args() convert_pixastruct_original_pytorch_checkpoint_to_hf( args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large )
278
0
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _SCREAMING_SNAKE_CASE = { 'configuration_efficientnet': [ 'EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'EfficientNetConfig', 'EfficientNetOnnxConfig', ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = ['EfficientNetImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ 'EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'EfficientNetForImageClassification', 'EfficientNetModel', 'EfficientNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_efficientnet import ( EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientNetConfig, EfficientNetOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientnet import EfficientNetImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientnet import ( EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientNetForImageClassification, EfficientNetModel, EfficientNetPreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure)
180
import warnings from ...utils import logging from .image_processing_beit import BeitImageProcessor _A = logging.get_logger(__name__) class A ( __UpperCAmelCase ): def __init__( self, *UpperCamelCase__, **UpperCamelCase__ ): """simple docstring""" warnings.warn( '''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use BeitImageProcessor instead.''', UpperCamelCase__, ) super().__init__(*UpperCamelCase__, **UpperCamelCase__ )
278
0
import numpy as np from transformers import Pipeline def UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' lowercase = np.max(_A , axis=-1 , keepdims=_A ) lowercase = np.exp(outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_A ) class lowercase ( __UpperCAmelCase ): def A__ ( self ,**A__): lowercase = {} if "second_text" in kwargs: lowercase = kwargs['''second_text'''] return preprocess_kwargs, {}, {} def A__ ( self ,A__ ,A__=None): return self.tokenizer(UpperCamelCase__ ,text_pair=UpperCamelCase__ ,return_tensors=self.framework) def A__ ( self ,A__): return self.model(**UpperCamelCase__) def A__ ( self ,A__): lowercase = model_outputs.logits[0].numpy() lowercase = softmax(UpperCamelCase__) lowercase = np.argmax(UpperCamelCase__) lowercase = self.model.config.idalabel[best_class] lowercase = probabilities[best_class].item() lowercase = logits.tolist() return {"label": label, "score": score, "logits": logits}
101
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() _A = logging.get_logger(__name__) def __UpperCamelCase ( _A ): lowerCAmelCase_ = YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: lowerCAmelCase_ = 192 lowerCAmelCase_ = 768 lowerCAmelCase_ = 12 lowerCAmelCase_ = 3 lowerCAmelCase_ = [800, 1333] lowerCAmelCase_ = False elif yolos_name == "yolos_s_dWr": lowerCAmelCase_ = 330 lowerCAmelCase_ = 14 lowerCAmelCase_ = 6 lowerCAmelCase_ = 1320 elif "yolos_s" in yolos_name: lowerCAmelCase_ = 384 lowerCAmelCase_ = 1536 lowerCAmelCase_ = 12 lowerCAmelCase_ = 6 elif "yolos_b" in yolos_name: lowerCAmelCase_ = [800, 1344] lowerCAmelCase_ = 91 lowerCAmelCase_ = '''huggingface/label-files''' lowerCAmelCase_ = '''coco-detection-id2label.json''' lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) ) lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()} lowerCAmelCase_ = idalabel lowerCAmelCase_ = {v: k for k, v in idalabel.items()} return config def __UpperCamelCase ( _A , _A , _A = False ): for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCAmelCase_ = state_dict.pop(f"blocks.{i}.attn.qkv.weight" ) lowerCAmelCase_ = state_dict.pop(f"blocks.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict lowerCAmelCase_ = in_proj_weight[: config.hidden_size, :] lowerCAmelCase_ = in_proj_bias[: config.hidden_size] lowerCAmelCase_ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCAmelCase_ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCAmelCase_ = in_proj_weight[-config.hidden_size :, :] lowerCAmelCase_ = in_proj_bias[-config.hidden_size :] def __UpperCamelCase ( _A ): if "backbone" in name: lowerCAmelCase_ = name.replace('''backbone''' , '''vit''' ) if "cls_token" in name: lowerCAmelCase_ = name.replace('''cls_token''' , '''embeddings.cls_token''' ) if "det_token" in name: lowerCAmelCase_ = name.replace('''det_token''' , '''embeddings.detection_tokens''' ) if "mid_pos_embed" in name: lowerCAmelCase_ = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' ) if "pos_embed" in name: lowerCAmelCase_ = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' ) if "patch_embed.proj" in name: lowerCAmelCase_ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "blocks" in name: lowerCAmelCase_ = name.replace('''blocks''' , '''encoder.layer''' ) if "attn.proj" in name: lowerCAmelCase_ = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: lowerCAmelCase_ = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: lowerCAmelCase_ = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: lowerCAmelCase_ = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: lowerCAmelCase_ = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: lowerCAmelCase_ = name.replace('''mlp.fc2''' , '''output.dense''' ) if "class_embed" in name: lowerCAmelCase_ = name.replace('''class_embed''' , '''class_labels_classifier''' ) if "bbox_embed" in name: lowerCAmelCase_ = name.replace('''bbox_embed''' , '''bbox_predictor''' ) if "vit.norm" in name: lowerCAmelCase_ = name.replace('''vit.norm''' , '''vit.layernorm''' ) return name def __UpperCamelCase ( _A , _A ): for key in orig_state_dict.copy().keys(): lowerCAmelCase_ = orig_state_dict.pop(_A ) if "qkv" in key: lowerCAmelCase_ = key.split('''.''' ) lowerCAmelCase_ = int(key_split[2] ) lowerCAmelCase_ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: lowerCAmelCase_ = val[:dim, :] lowerCAmelCase_ = val[ dim : dim * 2, : ] lowerCAmelCase_ = val[-dim:, :] else: lowerCAmelCase_ = val[:dim] lowerCAmelCase_ = val[dim : dim * 2] lowerCAmelCase_ = val[-dim:] else: lowerCAmelCase_ = val return orig_state_dict def __UpperCamelCase ( ): lowerCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw ) return im @torch.no_grad() def __UpperCamelCase ( _A , _A , _A , _A = False ): lowerCAmelCase_ = get_yolos_config(_A ) # load original state_dict lowerCAmelCase_ = torch.load(_A , map_location='''cpu''' )['''model'''] # load 🤗 model lowerCAmelCase_ = YolosForObjectDetection(_A ) model.eval() lowerCAmelCase_ = convert_state_dict(_A , _A ) model.load_state_dict(_A ) # Check outputs on an image, prepared by YolosImageProcessor lowerCAmelCase_ = 800 if yolos_name != '''yolos_ti''' else 512 lowerCAmelCase_ = YolosImageProcessor(format='''coco_detection''' , size=_A ) lowerCAmelCase_ = image_processor(images=prepare_img() , return_tensors='''pt''' ) lowerCAmelCase_ = model(**_A ) lowerCAmelCase_ , lowerCAmelCase_ = outputs.logits, outputs.pred_boxes lowerCAmelCase_ , lowerCAmelCase_ = None, None if yolos_name == "yolos_ti": lowerCAmelCase_ = torch.tensor( [[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] ) lowerCAmelCase_ = torch.tensor( [[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] ) elif yolos_name == "yolos_s_200_pre": lowerCAmelCase_ = torch.tensor( [[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] ) lowerCAmelCase_ = torch.tensor( [[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] ) elif yolos_name == "yolos_s_300_pre": lowerCAmelCase_ = torch.tensor( [[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] ) lowerCAmelCase_ = torch.tensor( [[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] ) elif yolos_name == "yolos_s_dWr": lowerCAmelCase_ = torch.tensor( [[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] ) lowerCAmelCase_ = torch.tensor( [[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] ) elif yolos_name == "yolos_base": lowerCAmelCase_ = torch.tensor( [[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] ) lowerCAmelCase_ = torch.tensor( [[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] ) else: raise ValueError(f"Unknown yolos_name: {yolos_name}" ) assert torch.allclose(logits[0, :3, :3] , _A , atol=1E-4 ) assert torch.allclose(pred_boxes[0, :3, :3] , _A , atol=1E-4 ) Path(_A ).mkdir(exist_ok=_A ) print(f"Saving model {yolos_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_A ) print(f"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(_A ) if push_to_hub: lowerCAmelCase_ = { '''yolos_ti''': '''yolos-tiny''', '''yolos_s_200_pre''': '''yolos-small''', '''yolos_s_300_pre''': '''yolos-small-300''', '''yolos_s_dWr''': '''yolos-small-dwr''', '''yolos_base''': '''yolos-base''', } print('''Pushing to the hub...''' ) lowerCAmelCase_ = model_mapping[yolos_name] image_processor.push_to_hub(_A , organization='''hustvl''' ) model.push_to_hub(_A , organization='''hustvl''' ) if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--yolos_name''', default='''yolos_s_200_pre''', type=str, help=( '''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\',''' ''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.''' ), ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) _A = parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
278
0
import numpy as np def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Any: '''simple docstring''' return 1 / (1 + np.exp(-vector )) def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: '''simple docstring''' return vector * sigmoid(_A ) if __name__ == "__main__": import doctest doctest.testmod()
296
def __UpperCamelCase ( _A ): if not numbers: return 0 if not isinstance(_A , (list, tuple) ) or not all( isinstance(_A , _A ) for number in numbers ): raise ValueError('''numbers must be an iterable of integers''' ) lowerCAmelCase_ = lowerCAmelCase_ = lowerCAmelCase_ = numbers[0] for i in range(1 , len(_A ) ): # update the maximum and minimum subarray products lowerCAmelCase_ = numbers[i] if number < 0: lowerCAmelCase_ , lowerCAmelCase_ = min_till_now, max_till_now lowerCAmelCase_ = max(_A , max_till_now * number ) lowerCAmelCase_ = min(_A , min_till_now * number ) # update the maximum product found till now lowerCAmelCase_ = max(_A , _A ) return max_prod
278
0
"""simple docstring""" import json import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from transformers import OneFormerImageProcessor from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput if is_vision_available(): from PIL import Image def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :Dict="shi-labs/oneformer_demo" ): with open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) as f: __UpperCAmelCase = json.load(_A ) __UpperCAmelCase = {} __UpperCAmelCase = [] __UpperCAmelCase = [] for key, info in class_info.items(): __UpperCAmelCase = info['''name'''] class_names.append(info['''name'''] ) if info["isthing"]: thing_ids.append(int(_A ) ) __UpperCAmelCase = thing_ids __UpperCAmelCase = class_names return metadata class _UpperCAmelCase ( unittest.TestCase ): def __init__( self : Tuple , _lowercase : Optional[int] , _lowercase : Any=7 , _lowercase : str=3 , _lowercase : Optional[int]=30 , _lowercase : List[str]=4_00 , _lowercase : Optional[int]=None , _lowercase : Any=True , _lowercase : int=True , _lowercase : Any=[0.5, 0.5, 0.5] , _lowercase : Optional[Any]=[0.5, 0.5, 0.5] , _lowercase : Optional[int]=10 , _lowercase : Tuple=False , _lowercase : Optional[int]=2_55 , _lowercase : Tuple="shi-labs/oneformer_demo" , _lowercase : str="ade20k_panoptic.json" , _lowercase : int=10 , ): __UpperCAmelCase = parent __UpperCAmelCase = batch_size __UpperCAmelCase = num_channels __UpperCAmelCase = min_resolution __UpperCAmelCase = max_resolution __UpperCAmelCase = do_resize __UpperCAmelCase = {'''shortest_edge''': 32, '''longest_edge''': 13_33} if size is None else size __UpperCAmelCase = do_normalize __UpperCAmelCase = image_mean __UpperCAmelCase = image_std __UpperCAmelCase = class_info_file __UpperCAmelCase = prepare_metadata(UpperCamelCase__ , UpperCamelCase__ ) __UpperCAmelCase = num_text __UpperCAmelCase = repo_path # for the post_process_functions __UpperCAmelCase = 2 __UpperCAmelCase = 10 __UpperCAmelCase = 10 __UpperCAmelCase = 3 __UpperCAmelCase = 4 __UpperCAmelCase = num_labels __UpperCAmelCase = do_reduce_labels __UpperCAmelCase = ignore_index def a ( self : Tuple ): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "num_labels": self.num_labels, "do_reduce_labels": self.do_reduce_labels, "ignore_index": self.ignore_index, "class_info_file": self.class_info_file, "metadata": self.metadata, "num_text": self.num_text, } def a ( self : Union[str, Any] , _lowercase : str , _lowercase : int=False ): if not batched: __UpperCAmelCase = image_inputs[0] if isinstance(UpperCamelCase__ , Image.Image ): __UpperCAmelCase , __UpperCAmelCase = image.size else: __UpperCAmelCase , __UpperCAmelCase = image.shape[1], image.shape[2] if w < h: __UpperCAmelCase = int(self.size['''shortest_edge'''] * h / w ) __UpperCAmelCase = self.size['''shortest_edge'''] elif w > h: __UpperCAmelCase = self.size['''shortest_edge'''] __UpperCAmelCase = int(self.size['''shortest_edge'''] * w / h ) else: __UpperCAmelCase = self.size['''shortest_edge'''] __UpperCAmelCase = self.size['''shortest_edge'''] else: __UpperCAmelCase = [] for image in image_inputs: __UpperCAmelCase , __UpperCAmelCase = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) __UpperCAmelCase = max(UpperCamelCase__ , key=lambda _lowercase : item[0] )[0] __UpperCAmelCase = max(UpperCamelCase__ , key=lambda _lowercase : item[1] )[1] return expected_height, expected_width def a ( self : Tuple ): return OneFormerForUniversalSegmentationOutput( # +1 for null class class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , ) @require_torch @require_vision class _UpperCAmelCase ( __UpperCAmelCase , unittest.TestCase ): a__ : Union[str, Any] = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None # only for test_image_processing_common.test_image_proc_to_json_string a__ : str = image_processing_class def a ( self : List[Any] ): __UpperCAmelCase = OneFormerImageProcessorTester(self ) @property def a ( self : str ): return self.image_processing_tester.prepare_image_processor_dict() def a ( self : Optional[int] ): __UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCamelCase__ , '''image_mean''' ) ) self.assertTrue(hasattr(UpperCamelCase__ , '''image_std''' ) ) self.assertTrue(hasattr(UpperCamelCase__ , '''do_normalize''' ) ) self.assertTrue(hasattr(UpperCamelCase__ , '''do_resize''' ) ) self.assertTrue(hasattr(UpperCamelCase__ , '''size''' ) ) self.assertTrue(hasattr(UpperCamelCase__ , '''ignore_index''' ) ) self.assertTrue(hasattr(UpperCamelCase__ , '''class_info_file''' ) ) self.assertTrue(hasattr(UpperCamelCase__ , '''num_text''' ) ) self.assertTrue(hasattr(UpperCamelCase__ , '''repo_path''' ) ) self.assertTrue(hasattr(UpperCamelCase__ , '''metadata''' ) ) self.assertTrue(hasattr(UpperCamelCase__ , '''do_reduce_labels''' ) ) def a ( self : Any ): pass def a ( self : Union[str, Any] ): __UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __UpperCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , Image.Image ) # Test not batched input __UpperCAmelCase = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values __UpperCAmelCase , __UpperCAmelCase = self.image_processing_tester.get_expected_values(UpperCamelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched __UpperCAmelCase , __UpperCAmelCase = self.image_processing_tester.get_expected_values(UpperCamelCase__ , batched=UpperCamelCase__ ) __UpperCAmelCase = image_processor( UpperCamelCase__ , ['''semantic'''] * len(UpperCamelCase__ ) , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def a ( self : List[Any] ): __UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __UpperCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , np.ndarray ) # Test not batched input __UpperCAmelCase = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values __UpperCAmelCase , __UpperCAmelCase = self.image_processing_tester.get_expected_values(UpperCamelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched __UpperCAmelCase , __UpperCAmelCase = self.image_processing_tester.get_expected_values(UpperCamelCase__ , batched=UpperCamelCase__ ) __UpperCAmelCase = image_processor( UpperCamelCase__ , ['''semantic'''] * len(UpperCamelCase__ ) , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def a ( self : List[str] ): __UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __UpperCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , torch.Tensor ) # Test not batched input __UpperCAmelCase = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values __UpperCAmelCase , __UpperCAmelCase = self.image_processing_tester.get_expected_values(UpperCamelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched __UpperCAmelCase , __UpperCAmelCase = self.image_processing_tester.get_expected_values(UpperCamelCase__ , batched=UpperCamelCase__ ) __UpperCAmelCase = image_processor( UpperCamelCase__ , ['''semantic'''] * len(UpperCamelCase__ ) , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def a ( self : Optional[Any] , _lowercase : Optional[int]=False , _lowercase : List[str]=False , _lowercase : Optional[int]="np" ): __UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # prepare image and target __UpperCAmelCase = self.image_processing_tester.num_labels __UpperCAmelCase = None __UpperCAmelCase = None __UpperCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase__ ) if with_segmentation_maps: __UpperCAmelCase = num_labels if is_instance_map: __UpperCAmelCase = list(range(UpperCamelCase__ ) ) * 2 __UpperCAmelCase = dict(enumerate(UpperCamelCase__ ) ) __UpperCAmelCase = [ np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs ] if segmentation_type == "pil": __UpperCAmelCase = [Image.fromarray(UpperCamelCase__ ) for annotation in annotations] __UpperCAmelCase = image_processor( UpperCamelCase__ , ['''semantic'''] * len(UpperCamelCase__ ) , UpperCamelCase__ , return_tensors='''pt''' , instance_id_to_semantic_id=UpperCamelCase__ , pad_and_return_pixel_mask=UpperCamelCase__ , ) return inputs def a ( self : Optional[Any] ): pass def a ( self : int ): def common(_lowercase : List[str]=False , _lowercase : int=None ): __UpperCAmelCase = self.comm_get_image_processor_inputs( with_segmentation_maps=UpperCamelCase__ , is_instance_map=UpperCamelCase__ , segmentation_type=UpperCamelCase__ ) __UpperCAmelCase = inputs['''mask_labels'''] __UpperCAmelCase = inputs['''class_labels'''] __UpperCAmelCase = inputs['''pixel_values'''] __UpperCAmelCase = inputs['''text_inputs'''] # check the batch_size for mask_label, class_label, text_input in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): self.assertEqual(mask_label.shape[0] , class_label.shape[0] ) # this ensure padding has happened self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] ) self.assertEqual(len(UpperCamelCase__ ) , self.image_processing_tester.num_text ) common() common(is_instance_map=UpperCamelCase__ ) common(is_instance_map=UpperCamelCase__ , segmentation_type='''pil''' ) common(is_instance_map=UpperCamelCase__ , segmentation_type='''pil''' ) def a ( self : Optional[Any] ): __UpperCAmelCase = np.zeros((20, 50) ) __UpperCAmelCase = 1 __UpperCAmelCase = 1 __UpperCAmelCase = 1 __UpperCAmelCase = binary_mask_to_rle(UpperCamelCase__ ) self.assertEqual(len(UpperCamelCase__ ) , 4 ) self.assertEqual(rle[0] , 21 ) self.assertEqual(rle[1] , 45 ) def a ( self : Any ): __UpperCAmelCase = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , ) __UpperCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs() __UpperCAmelCase = fature_extractor.post_process_semantic_segmentation(UpperCamelCase__ ) self.assertEqual(len(UpperCamelCase__ ) , self.image_processing_tester.batch_size ) self.assertEqual( segmentation[0].shape , ( self.image_processing_tester.height, self.image_processing_tester.width, ) , ) __UpperCAmelCase = [(1, 4) for i in range(self.image_processing_tester.batch_size )] __UpperCAmelCase = fature_extractor.post_process_semantic_segmentation(UpperCamelCase__ , target_sizes=UpperCamelCase__ ) self.assertEqual(segmentation[0].shape , target_sizes[0] ) def a ( self : Optional[Any] ): __UpperCAmelCase = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , ) __UpperCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs() __UpperCAmelCase = image_processor.post_process_instance_segmentation(UpperCamelCase__ , threshold=0 ) self.assertTrue(len(UpperCamelCase__ ) == self.image_processing_tester.batch_size ) for el in segmentation: self.assertTrue('''segmentation''' in el ) self.assertTrue('''segments_info''' in el ) self.assertEqual(type(el['''segments_info'''] ) , UpperCamelCase__ ) self.assertEqual( el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) ) def a ( self : Optional[Any] ): __UpperCAmelCase = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , ) __UpperCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs() __UpperCAmelCase = image_processor.post_process_panoptic_segmentation(UpperCamelCase__ , threshold=0 ) self.assertTrue(len(UpperCamelCase__ ) == self.image_processing_tester.batch_size ) for el in segmentation: self.assertTrue('''segmentation''' in el ) self.assertTrue('''segments_info''' in el ) self.assertEqual(type(el['''segments_info'''] ) , UpperCamelCase__ ) self.assertEqual( el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
332
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision import transforms from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def __UpperCamelCase ( _A ): lowerCAmelCase_ = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2] lowerCAmelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False lowerCAmelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False lowerCAmelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False if "large" in model_name or "xlarge" in model_name or "huge" in model_name: if "fl3" in model_name: lowerCAmelCase_ = [3, 3, 3, 3] lowerCAmelCase_ = [5, 5, 5, 5] elif "fl4" in model_name: lowerCAmelCase_ = [4, 4, 4, 4] lowerCAmelCase_ = [3, 3, 3, 3] if "tiny" in model_name or "small" in model_name or "base" in model_name: lowerCAmelCase_ = [3, 3, 3, 3] if "lrf" in model_name: lowerCAmelCase_ = [3, 3, 3, 3] else: lowerCAmelCase_ = [2, 2, 2, 2] if "tiny" in model_name: lowerCAmelCase_ = 96 elif "small" in model_name: lowerCAmelCase_ = 96 elif "base" in model_name: lowerCAmelCase_ = 128 elif "large" in model_name: lowerCAmelCase_ = 192 elif "xlarge" in model_name: lowerCAmelCase_ = 256 elif "huge" in model_name: lowerCAmelCase_ = 352 # set label information lowerCAmelCase_ = '''huggingface/label-files''' if "large" in model_name or "huge" in model_name: lowerCAmelCase_ = '''imagenet-22k-id2label.json''' else: lowerCAmelCase_ = '''imagenet-1k-id2label.json''' lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) ) lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()} lowerCAmelCase_ = {v: k for k, v in idalabel.items()} lowerCAmelCase_ = FocalNetConfig( embed_dim=_A , depths=_A , focal_levels=_A , focal_windows=_A , use_conv_embed=_A , idalabel=_A , labelaid=_A , use_post_layernorm=_A , use_layerscale=_A , ) return config def __UpperCamelCase ( _A ): if "patch_embed.proj" in name: lowerCAmelCase_ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "patch_embed.norm" in name: lowerCAmelCase_ = name.replace('''patch_embed.norm''' , '''embeddings.norm''' ) if "layers" in name: lowerCAmelCase_ = '''encoder.''' + name if "encoder.layers" in name: lowerCAmelCase_ = name.replace('''encoder.layers''' , '''encoder.stages''' ) if "downsample.proj" in name: lowerCAmelCase_ = name.replace('''downsample.proj''' , '''downsample.projection''' ) if "blocks" in name: lowerCAmelCase_ = name.replace('''blocks''' , '''layers''' ) if "modulation.f.weight" in name or "modulation.f.bias" in name: lowerCAmelCase_ = name.replace('''modulation.f''' , '''modulation.projection_in''' ) if "modulation.h.weight" in name or "modulation.h.bias" in name: lowerCAmelCase_ = name.replace('''modulation.h''' , '''modulation.projection_context''' ) if "modulation.proj.weight" in name or "modulation.proj.bias" in name: lowerCAmelCase_ = name.replace('''modulation.proj''' , '''modulation.projection_out''' ) if name == "norm.weight": lowerCAmelCase_ = '''layernorm.weight''' if name == "norm.bias": lowerCAmelCase_ = '''layernorm.bias''' if "head" in name: lowerCAmelCase_ = name.replace('''head''' , '''classifier''' ) else: lowerCAmelCase_ = '''focalnet.''' + name return name def __UpperCamelCase ( _A , _A , _A=False ): # fmt: off lowerCAmelCase_ = { '''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''', '''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''', '''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''', '''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''', '''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''', '''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''', '''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''', '''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''', '''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''', '''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''', } # fmt: on lowerCAmelCase_ = model_name_to_url[model_name] print('''Checkpoint URL: ''' , _A ) lowerCAmelCase_ = torch.hub.load_state_dict_from_url(_A , map_location='''cpu''' )['''model'''] # rename keys for key in state_dict.copy().keys(): lowerCAmelCase_ = state_dict.pop(_A ) lowerCAmelCase_ = val lowerCAmelCase_ = get_focalnet_config(_A ) lowerCAmelCase_ = FocalNetForImageClassification(_A ) model.eval() # load state dict model.load_state_dict(_A ) # verify conversion lowerCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowerCAmelCase_ = BitImageProcessor( do_resize=_A , size={'''shortest_edge''': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=_A , crop_size=224 , do_normalize=_A , image_mean=_A , image_std=_A , ) lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw ) lowerCAmelCase_ = processor(images=_A , return_tensors='''pt''' ) lowerCAmelCase_ = transforms.Compose( [ transforms.Resize(256 ), transforms.CenterCrop(224 ), transforms.ToTensor(), transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ), ] ) lowerCAmelCase_ = image_transforms(_A ).unsqueeze(0 ) # verify pixel_values assert torch.allclose(inputs.pixel_values , _A , atol=1E-4 ) lowerCAmelCase_ = model(**_A ) lowerCAmelCase_ = outputs.logits.argmax(-1 ).item() print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] ) print('''First values of logits:''' , outputs.logits[0, :3] ) if model_name == "focalnet-tiny": lowerCAmelCase_ = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ) elif model_name == "focalnet-tiny-lrf": lowerCAmelCase_ = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] ) elif model_name == "focalnet-small": lowerCAmelCase_ = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] ) elif model_name == "focalnet-small-lrf": lowerCAmelCase_ = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] ) elif model_name == "focalnet-base": lowerCAmelCase_ = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] ) elif model_name == "focalnet-base-lrf": lowerCAmelCase_ = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] ) assert torch.allclose(outputs.logits[0, :3] , _A , atol=1E-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(f"Saving model and processor of {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_A ) processor.save_pretrained(_A ) if push_to_hub: print(f"Pushing model and processor of {model_name} to the hub..." ) model.push_to_hub(f"{model_name}" ) processor.push_to_hub(f"{model_name}" ) if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''focalnet-tiny''', type=str, help='''Name of the FocalNet model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub.''', ) _A = parser.parse_args() convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
278
0
"""simple docstring""" import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class UpperCamelCase__ ( __UpperCAmelCase ): """simple docstring""" _SCREAMING_SNAKE_CASE = ["""image_processor""", """tokenizer"""] _SCREAMING_SNAKE_CASE = """LayoutLMv2ImageProcessor""" _SCREAMING_SNAKE_CASE = ("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""") def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any]=None , SCREAMING_SNAKE_CASE_ : Tuple=None , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ): if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , UpperCamelCase__ , ) lowerCAmelCase_ : str = kwargs.pop('feature_extractor' ) lowerCAmelCase_ : Union[str, Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(UpperCamelCase__ , UpperCamelCase__ ) def __call__( self : str , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] = None , SCREAMING_SNAKE_CASE_ : List[str] = None , SCREAMING_SNAKE_CASE_ : Any = None , SCREAMING_SNAKE_CASE_ : List[str] = None , SCREAMING_SNAKE_CASE_ : Dict = True , SCREAMING_SNAKE_CASE_ : List[str] = False , SCREAMING_SNAKE_CASE_ : List[str] = None , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Dict = 0 , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Tuple = None , SCREAMING_SNAKE_CASE_ : Tuple = None , SCREAMING_SNAKE_CASE_ : Optional[Any] = False , SCREAMING_SNAKE_CASE_ : str = False , SCREAMING_SNAKE_CASE_ : str = False , SCREAMING_SNAKE_CASE_ : Any = False , SCREAMING_SNAKE_CASE_ : Dict = True , SCREAMING_SNAKE_CASE_ : int = None , **SCREAMING_SNAKE_CASE_ : Union[str, Any] , ): if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( 'You cannot provide bounding boxes ' 'if you initialized the image processor with apply_ocr set to True.' ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( 'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' ) if return_overflowing_tokens is True and return_offsets_mapping is False: raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' ) # first, apply the image processor lowerCAmelCase_ : Tuple = self.image_processor(images=UpperCamelCase__ , return_tensors=UpperCamelCase__ ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(UpperCamelCase__ , UpperCamelCase__ ): lowerCAmelCase_ : Union[str, Any] = [text] # add batch dimension (as the image processor always adds a batch dimension) lowerCAmelCase_ : Optional[Any] = features['words'] lowerCAmelCase_ : Optional[Any] = self.tokenizer( text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , ) # add pixel values lowerCAmelCase_ : List[Any] = features.pop('pixel_values' ) if return_overflowing_tokens is True: lowerCAmelCase_ : Dict = self.get_overflowing_images(UpperCamelCase__ , encoded_inputs['overflow_to_sample_mapping'] ) lowerCAmelCase_ : Union[str, Any] = images return encoded_inputs def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple ): lowerCAmelCase_ : Any = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(UpperCamelCase__ ) != len(UpperCamelCase__ ): raise ValueError( 'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got' F" {len(UpperCamelCase__ )} and {len(UpperCamelCase__ )}" ) return images_with_overflow def SCREAMING_SNAKE_CASE__ ( self : List[Any] , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : Tuple ): return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : Tuple ): return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ ) @property def SCREAMING_SNAKE_CASE__ ( self : int ): return ["input_ids", "bbox", "attention_mask", "image"] @property def SCREAMING_SNAKE_CASE__ ( self : Dict ): warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , UpperCamelCase__ , ) return self.image_processor_class @property def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , UpperCamelCase__ , ) return self.image_processor
224
import argparse from argparse import Namespace import torch from torch import nn from transformers import XGLMConfig, XGLMForCausalLM def __UpperCamelCase ( _A ): lowerCAmelCase_ = [ '''decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(_A , _A ) def __UpperCamelCase ( _A ): lowerCAmelCase_ , lowerCAmelCase_ = emb.weight.shape lowerCAmelCase_ = nn.Linear(_A , _A , bias=_A ) lowerCAmelCase_ = emb.weight.data return lin_layer def __UpperCamelCase ( _A ): lowerCAmelCase_ = torch.load(_A , map_location='''cpu''' ) lowerCAmelCase_ = Namespace(**checkpoint['''cfg''']['''model'''] ) lowerCAmelCase_ = checkpoint['''model'''] remove_ignore_keys_(_A ) lowerCAmelCase_ = state_dict['''decoder.embed_tokens.weight'''].shape[0] lowerCAmelCase_ = {key.replace('''decoder''' , '''model''' ): val for key, val in state_dict.items()} lowerCAmelCase_ = XGLMConfig( vocab_size=_A , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''gelu''' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , ) lowerCAmelCase_ = XGLMForCausalLM(_A ) lowerCAmelCase_ = model.load_state_dict(_A , strict=_A ) print(_A ) lowerCAmelCase_ = make_linear_from_emb(model.model.embed_tokens ) return model if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') _A = parser.parse_args() _A = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path) model.save_pretrained(args.pytorch_dump_folder_path)
278
0
'''simple docstring''' import inspect import unittest from transformers import YolosConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import YolosForObjectDetection, YolosModel from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any]=13 , SCREAMING_SNAKE_CASE_ : Optional[int]=[30, 30] , SCREAMING_SNAKE_CASE_ : Tuple=2 , SCREAMING_SNAKE_CASE_ : str=3 , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=32 , SCREAMING_SNAKE_CASE_ : Optional[Any]=5 , SCREAMING_SNAKE_CASE_ : Optional[int]=4 , SCREAMING_SNAKE_CASE_ : Tuple=37 , SCREAMING_SNAKE_CASE_ : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE_ : List[str]=0.1 , SCREAMING_SNAKE_CASE_ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=10 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE_ : List[str]=3 , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , SCREAMING_SNAKE_CASE_ : int=8 , SCREAMING_SNAKE_CASE_ : List[Any]=10 , ) -> Tuple: '''simple docstring''' A: str = parent A: Union[str, Any] = batch_size A: List[Any] = image_size A: Tuple = patch_size A: List[Any] = num_channels A: List[Any] = is_training A: str = use_labels A: Any = hidden_size A: List[Any] = num_hidden_layers A: List[str] = num_attention_heads A: Optional[int] = intermediate_size A: Dict = hidden_act A: Optional[Any] = hidden_dropout_prob A: Any = attention_probs_dropout_prob A: Tuple = type_sequence_label_size A: int = initializer_range A: Union[str, Any] = num_labels A: Optional[int] = scope A: List[str] = n_targets A: int = num_detection_tokens # we set the expected sequence length (which is used in several tests) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens A: List[str] = (image_size[1] // patch_size) * (image_size[0] // patch_size) A: str = num_patches + 1 + self.num_detection_tokens def _snake_case ( self : Tuple ) -> Any: '''simple docstring''' A: Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] ) A: Optional[int] = None if self.use_labels: # labels is a list of Dict (each Dict being the labels for a given example in the batch) A: Dict = [] for i in range(self.batch_size ): A: int = {} A: Dict = torch.randint( high=self.num_labels , size=(self.n_targets,) , device=UpperCamelCase__ ) A: Optional[Any] = torch.rand(self.n_targets , 4 , device=UpperCamelCase__ ) labels.append(UpperCamelCase__ ) A: Tuple = self.get_config() return config, pixel_values, labels def _snake_case ( self : Optional[int] ) -> Tuple: '''simple docstring''' return YolosConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , ) def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> List[Any]: '''simple docstring''' A: List[str] = YolosModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() A: Optional[int] = model(UpperCamelCase__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) ) def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str ) -> List[str]: '''simple docstring''' A: Tuple = YolosForObjectDetection(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() A: Union[str, Any] = model(pixel_values=UpperCamelCase__ ) A: Tuple = model(UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) ) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) ) A: Optional[int] = model(pixel_values=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) ) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) ) def _snake_case ( self : List[Any] ) -> int: '''simple docstring''' A: Optional[Any] = self.prepare_config_and_inputs() A , A , A: Union[str, Any] = config_and_inputs A: int = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase_ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): '''simple docstring''' UpperCamelCase_ : Optional[int] = (YolosModel, YolosForObjectDetection) if is_torch_available() else () UpperCamelCase_ : Any = ( {"""feature-extraction""": YolosModel, """object-detection""": YolosForObjectDetection} if is_torch_available() else {} ) UpperCamelCase_ : Tuple = False UpperCamelCase_ : Tuple = False UpperCamelCase_ : List[Any] = False UpperCamelCase_ : Union[str, Any] = False def _snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str]=False ) -> Optional[int]: '''simple docstring''' A: Optional[Any] = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ ) if return_labels: if model_class.__name__ == "YolosForObjectDetection": A: List[Any] = [] for i in range(self.model_tester.batch_size ): A: int = {} A: int = torch.ones( size=(self.model_tester.n_targets,) , device=UpperCamelCase__ , dtype=torch.long ) A: Optional[Any] = torch.ones( self.model_tester.n_targets , 4 , device=UpperCamelCase__ , dtype=torch.float ) labels.append(UpperCamelCase__ ) A: Optional[Any] = labels return inputs_dict def _snake_case ( self : str ) -> Dict: '''simple docstring''' A: Any = YolosModelTester(self ) A: Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 ) def _snake_case ( self : Tuple ) -> Tuple: '''simple docstring''' self.config_tester.run_common_tests() def _snake_case ( self : int ) -> Dict: '''simple docstring''' pass def _snake_case ( self : str ) -> str: '''simple docstring''' A , A: List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A: Dict = model_class(UpperCamelCase__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) A: Union[str, Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) ) def _snake_case ( self : str ) -> Any: '''simple docstring''' A , A: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A: Union[str, Any] = model_class(UpperCamelCase__ ) A: Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A: Optional[int] = [*signature.parameters.keys()] A: Dict = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , UpperCamelCase__ ) def _snake_case ( self : Optional[int] ) -> Optional[Any]: '''simple docstring''' A: List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def _snake_case ( self : List[Any] ) -> List[Any]: '''simple docstring''' A , A: Any = self.model_tester.prepare_config_and_inputs_for_common() A: Optional[int] = True # in YOLOS, the seq_len is different A: Any = self.model_tester.expected_seq_len for model_class in self.all_model_classes: A: Tuple = True A: str = False A: int = True A: int = model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() with torch.no_grad(): A: List[str] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) A: Union[str, Any] = outputs.attentions self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] A: Dict = True A: Union[str, Any] = model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() with torch.no_grad(): A: Optional[Any] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) A: int = outputs.attentions self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) A: str = len(UpperCamelCase__ ) # Check attention is always last and order is fine A: Any = True A: List[str] = True A: Optional[int] = model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() with torch.no_grad(): A: int = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) A: str = 1 self.assertEqual(out_len + added_hidden_states , len(UpperCamelCase__ ) ) A: str = outputs.attentions self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) def _snake_case ( self : Tuple ) -> List[Any]: '''simple docstring''' def check_hidden_states_output(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str ): A: List[Any] = model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() with torch.no_grad(): A: str = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) A: Optional[Any] = outputs.hidden_states A: int = getattr( self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ ) # YOLOS has a different seq_length A: int = self.model_tester.expected_seq_len self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) A , A: str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A: Union[str, Any] = True check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A: Optional[Any] = True check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def _snake_case ( self : Any ) -> Dict: '''simple docstring''' A: str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_object_detection(*UpperCamelCase__ ) @slow def _snake_case ( self : Any ) -> str: '''simple docstring''' for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A: Optional[int] = YolosModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE( ) -> Optional[Any]: A: Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def _snake_case ( self : Optional[Any] ) -> Any: '''simple docstring''' return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None @slow def _snake_case ( self : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' A: Optional[Any] = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(UpperCamelCase__ ) A: Union[str, Any] = self.default_image_processor A: str = prepare_img() A: Any = image_processor(images=UpperCamelCase__ , return_tensors='''pt''' ).to(UpperCamelCase__ ) # forward pass with torch.no_grad(): A: Union[str, Any] = model(inputs.pixel_values ) # verify outputs A: Tuple = torch.Size((1, 1_00, 92) ) self.assertEqual(outputs.logits.shape , UpperCamelCase__ ) A: Optional[Any] = torch.tensor( [[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=UpperCamelCase__ , ) A: Dict = torch.tensor( [[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=UpperCamelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase__ , atol=1E-4 ) ) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , UpperCamelCase__ , atol=1E-4 ) ) # verify postprocessing A: List[Any] = image_processor.post_process_object_detection( UpperCamelCase__ , threshold=0.3 , target_sizes=[image.size[::-1]] )[0] A: str = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861] ).to(UpperCamelCase__ ) A: Any = [75, 75, 17, 63, 17] A: Optional[Any] = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495] ).to(UpperCamelCase__ ) self.assertEqual(len(results['''scores'''] ) , 5 ) self.assertTrue(torch.allclose(results['''scores'''] , UpperCamelCase__ , atol=1E-4 ) ) self.assertSequenceEqual(results['''labels'''].tolist() , UpperCamelCase__ ) self.assertTrue(torch.allclose(results['''boxes'''][0, :] , UpperCamelCase__ ) )
319
# This script creates a super tiny model that is useful inside tests, when we just want to test that # the machinery works, without needing to the check the quality of the outcomes. # # This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny - # all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and # emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files. # The latter is done by `fsmt-make-super-tiny-model.py`. # # It will be used then as "stas/tiny-wmt19-en-ru" from pathlib import Path import json import tempfile from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES _A = '''tiny-wmt19-en-ru''' # Build # borrowed from a test _A = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''w</w>''', '''r</w>''', '''t</w>''', '''lo''', '''low''', '''er</w>''', '''low</w>''', '''lowest</w>''', '''newer</w>''', '''wider</w>''', '''<unk>''', ] _A = dict(zip(vocab, range(len(vocab)))) _A = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', ''''''] with tempfile.TemporaryDirectory() as tmpdirname: _A = Path(tmpdirname) _A = build_dir / VOCAB_FILES_NAMES['''src_vocab_file'''] _A = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file'''] _A = build_dir / VOCAB_FILES_NAMES['''merges_file'''] with open(src_vocab_file, '''w''') as fp: fp.write(json.dumps(vocab_tokens)) with open(tgt_vocab_file, '''w''') as fp: fp.write(json.dumps(vocab_tokens)) with open(merges_file, '''w''') as fp: fp.write('''\n'''.join(merges)) _A = FSMTTokenizer( langs=['''en''', '''ru'''], src_vocab_size=len(vocab), tgt_vocab_size=len(vocab), src_vocab_file=src_vocab_file, tgt_vocab_file=tgt_vocab_file, merges_file=merges_file, ) _A = FSMTConfig( langs=['''ru''', '''en'''], src_vocab_size=1_000, tgt_vocab_size=1_000, d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) _A = FSMTForConditionalGeneration(config) print(f"num of params {tiny_model.num_parameters()}") # Test _A = tokenizer(['''Making tiny model'''], return_tensors='''pt''') _A = tiny_model(**batch) print('''test output:''', len(outputs.logits[0])) # Save tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(f"Generated {mname_tiny}") # Upload # transformers-cli upload tiny-wmt19-en-ru
278
0
"""simple docstring""" from typing import Any, Dict, Optional import torch import torch.nn.functional as F from torch import nn from ..utils import maybe_allow_in_graph from .activations import get_activation from .attention_processor import Attention from .embeddings import CombinedTimestepLabelEmbeddings @maybe_allow_in_graph class UpperCAmelCase (nn.Module ): """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=0.0 , _UpperCAmelCase = None , _UpperCAmelCase = "geglu" , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = True , _UpperCAmelCase = "layer_norm" , _UpperCAmelCase = False , ): super().__init__() lowercase__: List[str] = only_cross_attention lowercase__: Dict = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm_zero''' lowercase__: List[str] = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm''' if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None: raise ValueError( F"""`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to""" F""" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.""" ) # Define 3 blocks. Each block has its own normalization layer. # 1. Self-Attn if self.use_ada_layer_norm: lowercase__: Optional[Any] = AdaLayerNorm(UpperCamelCase__ , UpperCamelCase__ ) elif self.use_ada_layer_norm_zero: lowercase__: Tuple = AdaLayerNormZero(UpperCamelCase__ , UpperCamelCase__ ) else: lowercase__: int = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ ) lowercase__: List[Any] = Attention( query_dim=UpperCamelCase__ , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , dropout=UpperCamelCase__ , bias=UpperCamelCase__ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=UpperCamelCase__ , ) # 2. Cross-Attn if cross_attention_dim is not None or double_self_attention: # We currently only use AdaLayerNormZero for self attention where there will only be one attention block. # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during # the second cross attention block. lowercase__: Tuple = ( AdaLayerNorm(UpperCamelCase__ , UpperCamelCase__ ) if self.use_ada_layer_norm else nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ ) ) lowercase__: Optional[Any] = Attention( query_dim=UpperCamelCase__ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , dropout=UpperCamelCase__ , bias=UpperCamelCase__ , upcast_attention=UpperCamelCase__ , ) # is self-attn if encoder_hidden_states is none else: lowercase__: Optional[int] = None lowercase__: Any = None # 3. Feed-forward lowercase__: Any = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ ) lowercase__: List[str] = FeedForward(UpperCamelCase__ , dropout=UpperCamelCase__ , activation_fn=UpperCamelCase__ , final_dropout=UpperCamelCase__ ) # let chunk size default to None lowercase__: Tuple = None lowercase__: Optional[Any] = 0 def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: List[Any] = chunk_size lowercase__: Any = dim def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , ): if self.use_ada_layer_norm: lowercase__: Union[str, Any] = self.norma(UpperCamelCase__ , UpperCamelCase__ ) elif self.use_ada_layer_norm_zero: lowercase__, lowercase__, lowercase__, lowercase__, lowercase__: Any = self.norma( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hidden_dtype=hidden_states.dtype ) else: lowercase__: int = self.norma(UpperCamelCase__ ) lowercase__: Any = cross_attention_kwargs if cross_attention_kwargs is not None else {} lowercase__: int = self.attna( UpperCamelCase__ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=UpperCamelCase__ , **UpperCamelCase__ , ) if self.use_ada_layer_norm_zero: lowercase__: Tuple = gate_msa.unsqueeze(1 ) * attn_output lowercase__: Optional[Any] = attn_output + hidden_states # 2. Cross-Attention if self.attna is not None: lowercase__: Tuple = ( self.norma(UpperCamelCase__ , UpperCamelCase__ ) if self.use_ada_layer_norm else self.norma(UpperCamelCase__ ) ) lowercase__: Tuple = self.attna( UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , attention_mask=UpperCamelCase__ , **UpperCamelCase__ , ) lowercase__: Optional[int] = attn_output + hidden_states # 3. Feed-forward lowercase__: Optional[int] = self.norma(UpperCamelCase__ ) if self.use_ada_layer_norm_zero: lowercase__: Dict = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] if self._chunk_size is not None: # "feed_forward_chunk_size" can be used to save memory if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0: raise ValueError( F"""`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.""" ) lowercase__: str = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size lowercase__: Dict = torch.cat( [self.ff(UpperCamelCase__ ) for hid_slice in norm_hidden_states.chunk(UpperCamelCase__ , dim=self._chunk_dim )] , dim=self._chunk_dim , ) else: lowercase__: Optional[int] = self.ff(UpperCamelCase__ ) if self.use_ada_layer_norm_zero: lowercase__: Optional[int] = gate_mlp.unsqueeze(1 ) * ff_output lowercase__: Any = ff_output + hidden_states return hidden_states class UpperCAmelCase (nn.Module ): """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = 4 , _UpperCAmelCase = 0.0 , _UpperCAmelCase = "geglu" , _UpperCAmelCase = False , ): super().__init__() lowercase__: str = int(dim * mult ) lowercase__: Union[str, Any] = dim_out if dim_out is not None else dim if activation_fn == "gelu": lowercase__: Union[str, Any] = GELU(UpperCamelCase__ , UpperCamelCase__ ) if activation_fn == "gelu-approximate": lowercase__: Any = GELU(UpperCamelCase__ , UpperCamelCase__ , approximate='''tanh''' ) elif activation_fn == "geglu": lowercase__: Dict = GEGLU(UpperCamelCase__ , UpperCamelCase__ ) elif activation_fn == "geglu-approximate": lowercase__: List[str] = ApproximateGELU(UpperCamelCase__ , UpperCamelCase__ ) lowercase__: Union[str, Any] = nn.ModuleList([] ) # project in self.net.append(UpperCamelCase__ ) # project dropout self.net.append(nn.Dropout(UpperCamelCase__ ) ) # project out self.net.append(nn.Linear(UpperCamelCase__ , UpperCamelCase__ ) ) # FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout if final_dropout: self.net.append(nn.Dropout(UpperCamelCase__ ) ) def _snake_case ( self , _UpperCAmelCase ): for module in self.net: lowercase__: Dict = module(UpperCamelCase__ ) return hidden_states class UpperCAmelCase (nn.Module ): """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = "none" ): super().__init__() lowercase__: int = nn.Linear(UpperCamelCase__ , UpperCamelCase__ ) lowercase__: Union[str, Any] = approximate def _snake_case ( self , _UpperCAmelCase ): if gate.device.type != "mps": return F.gelu(UpperCamelCase__ , approximate=self.approximate ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype ) def _snake_case ( self , _UpperCAmelCase ): lowercase__: Union[str, Any] = self.proj(UpperCamelCase__ ) lowercase__: List[Any] = self.gelu(UpperCamelCase__ ) return hidden_states class UpperCAmelCase (nn.Module ): """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase ): super().__init__() lowercase__: Optional[Any] = nn.Linear(UpperCamelCase__ , dim_out * 2 ) def _snake_case ( self , _UpperCAmelCase ): if gate.device.type != "mps": return F.gelu(UpperCamelCase__ ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype ) def _snake_case ( self , _UpperCAmelCase ): lowercase__, lowercase__: List[str] = self.proj(UpperCamelCase__ ).chunk(2 , dim=-1 ) return hidden_states * self.gelu(UpperCamelCase__ ) class UpperCAmelCase (nn.Module ): """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase ): super().__init__() lowercase__: Any = nn.Linear(UpperCamelCase__ , UpperCamelCase__ ) def _snake_case ( self , _UpperCAmelCase ): lowercase__: List[str] = self.proj(UpperCamelCase__ ) return x * torch.sigmoid(1.702 * x ) class UpperCAmelCase (nn.Module ): """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase ): super().__init__() lowercase__: Tuple = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ ) lowercase__: Optional[int] = nn.SiLU() lowercase__: Tuple = nn.Linear(UpperCamelCase__ , embedding_dim * 2 ) lowercase__: Optional[int] = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: str = self.linear(self.silu(self.emb(UpperCamelCase__ ) ) ) lowercase__, lowercase__: Union[str, Any] = torch.chunk(UpperCamelCase__ , 2 ) lowercase__: Optional[Any] = self.norm(UpperCamelCase__ ) * (1 + scale) + shift return x class UpperCAmelCase (nn.Module ): """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase ): super().__init__() lowercase__: Any = CombinedTimestepLabelEmbeddings(UpperCamelCase__ , UpperCamelCase__ ) lowercase__: Union[str, Any] = nn.SiLU() lowercase__: Tuple = nn.Linear(UpperCamelCase__ , 6 * embedding_dim , bias=UpperCamelCase__ ) lowercase__: Tuple = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ , eps=1e-6 ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None ): lowercase__: int = self.linear(self.silu(self.emb(UpperCamelCase__ , UpperCamelCase__ , hidden_dtype=UpperCamelCase__ ) ) ) lowercase__, lowercase__, lowercase__, lowercase__, lowercase__, lowercase__: List[Any] = emb.chunk(6 , dim=1 ) lowercase__: Dict = self.norm(UpperCamelCase__ ) * (1 + scale_msa[:, None]) + shift_msa[:, None] return x, gate_msa, shift_mlp, scale_mlp, gate_mlp class UpperCAmelCase (nn.Module ): """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = 1e-5 ): super().__init__() lowercase__: Any = num_groups lowercase__: Optional[Any] = eps if act_fn is None: lowercase__: List[Any] = None else: lowercase__: Optional[Any] = get_activation(UpperCamelCase__ ) lowercase__: Any = nn.Linear(UpperCamelCase__ , out_dim * 2 ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase ): if self.act: lowercase__: Optional[int] = self.act(UpperCamelCase__ ) lowercase__: List[str] = self.linear(UpperCamelCase__ ) lowercase__: Tuple = emb[:, :, None, None] lowercase__, lowercase__: Union[str, Any] = emb.chunk(2 , dim=1 ) lowercase__: List[str] = F.group_norm(UpperCamelCase__ , self.num_groups , eps=self.eps ) lowercase__: int = x * (1 + scale) + shift return x
177
import argparse from collections import defaultdict import yaml _A = '''docs/source/en/_toctree.yml''' def __UpperCamelCase ( _A ): lowerCAmelCase_ = defaultdict(_A ) for doc in model_doc: counts[doc["local"]] += 1 lowerCAmelCase_ = [key for key, value in counts.items() if value > 1] lowerCAmelCase_ = [] for duplicate_key in duplicates: lowerCAmelCase_ = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} ) if len(_A ) > 1: raise ValueError( f"{duplicate_key} is present several times in the documentation table of content at " '''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the ''' '''others.''' ) # Only add this once new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] ) # Sort return sorted(_A , key=lambda _A : s["title"].lower() ) def __UpperCamelCase ( _A=False ): with open(_A , encoding='''utf-8''' ) as f: lowerCAmelCase_ = yaml.safe_load(f.read() ) # Get to the API doc lowerCAmelCase_ = 0 while content[api_idx]["title"] != "API": api_idx += 1 lowerCAmelCase_ = content[api_idx]['''sections'''] # Then to the model doc lowerCAmelCase_ = 0 while api_doc[model_idx]["title"] != "Models": model_idx += 1 lowerCAmelCase_ = api_doc[model_idx]['''sections'''] lowerCAmelCase_ = [(idx, section) for idx, section in enumerate(_A ) if '''sections''' in section] lowerCAmelCase_ = False for idx, modality_doc in modalities_docs: lowerCAmelCase_ = modality_doc['''sections'''] lowerCAmelCase_ = clean_model_doc_toc(_A ) if old_modality_doc != new_modality_doc: lowerCAmelCase_ = True if overwrite: lowerCAmelCase_ = new_modality_doc if diff: if overwrite: lowerCAmelCase_ = model_doc lowerCAmelCase_ = api_doc with open(_A , '''w''' , encoding='''utf-8''' ) as f: f.write(yaml.dump(_A , allow_unicode=_A ) ) else: raise ValueError( '''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' ) if __name__ == "__main__": _A = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') _A = parser.parse_args() check_model_doc(args.fix_and_overwrite)
278
0
import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : Any ) -> Tuple: """simple docstring""" assert isinstance(_A , _A ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Any ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ = tmp_path / """cache""" SCREAMING_SNAKE_CASE__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): SCREAMING_SNAKE_CASE__ = JsonDatasetReader(_A , cache_dir=_A , keep_in_memory=_A ).read() _check_json_dataset(_A , _A ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = tmp_path / """cache""" SCREAMING_SNAKE_CASE__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} SCREAMING_SNAKE_CASE__ = features.copy() if features else default_expected_features SCREAMING_SNAKE_CASE__ = ( Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None ) SCREAMING_SNAKE_CASE__ = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read() _check_json_dataset(_A , _A ) @pytest.mark.parametrize( """features""" , [ None, {"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""}, ] , ) def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : Any , __UpperCamelCase : List[str] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = tmp_path / """cache""" SCREAMING_SNAKE_CASE__ = {"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""} SCREAMING_SNAKE_CASE__ = features.copy() if features else default_expected_features SCREAMING_SNAKE_CASE__ = ( Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None ) SCREAMING_SNAKE_CASE__ = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read() assert isinstance(_A , _A ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = {"""col_2""": """int64""", """col_3""": """float64""", """col_1""": """string"""} SCREAMING_SNAKE_CASE__ = features.copy() SCREAMING_SNAKE_CASE__ = ( Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None ) SCREAMING_SNAKE_CASE__ = tmp_path / """cache""" SCREAMING_SNAKE_CASE__ = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read() assert isinstance(_A , _A ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] , __UpperCamelCase : int , __UpperCamelCase : List[str] ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ = tmp_path / """cache""" SCREAMING_SNAKE_CASE__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} SCREAMING_SNAKE_CASE__ = JsonDatasetReader(_A , cache_dir=_A , split=_A ).read() _check_json_dataset(_A , _A ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""" , [str, list] ) def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : str ) -> Optional[int]: """simple docstring""" if issubclass(_A , _A ): SCREAMING_SNAKE_CASE__ = jsonl_path elif issubclass(_A , _A ): SCREAMING_SNAKE_CASE__ = [jsonl_path] SCREAMING_SNAKE_CASE__ = tmp_path / """cache""" SCREAMING_SNAKE_CASE__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} SCREAMING_SNAKE_CASE__ = JsonDatasetReader(_A , cache_dir=_A ).read() _check_json_dataset(_A , _A ) def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : Tuple , __UpperCamelCase : int=("train",) ) -> List[Any]: """simple docstring""" assert isinstance(_A , _A ) for split in splits: SCREAMING_SNAKE_CASE__ = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : int ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ = tmp_path / """cache""" SCREAMING_SNAKE_CASE__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): SCREAMING_SNAKE_CASE__ = JsonDatasetReader({"""train""": jsonl_path} , cache_dir=_A , keep_in_memory=_A ).read() _check_json_datasetdict(_A , _A ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ = tmp_path / """cache""" SCREAMING_SNAKE_CASE__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} SCREAMING_SNAKE_CASE__ = features.copy() if features else default_expected_features SCREAMING_SNAKE_CASE__ = ( Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None ) SCREAMING_SNAKE_CASE__ = JsonDatasetReader({"""train""": jsonl_path} , features=_A , cache_dir=_A ).read() _check_json_datasetdict(_A , _A ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple ) -> Dict: """simple docstring""" if split: SCREAMING_SNAKE_CASE__ = {split: jsonl_path} else: SCREAMING_SNAKE_CASE__ = """train""" SCREAMING_SNAKE_CASE__ = {"""train""": jsonl_path, """test""": jsonl_path} SCREAMING_SNAKE_CASE__ = tmp_path / """cache""" SCREAMING_SNAKE_CASE__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} SCREAMING_SNAKE_CASE__ = JsonDatasetReader(_A , cache_dir=_A ).read() _check_json_datasetdict(_A , _A , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Union[str, Any]: """simple docstring""" return json.load(_A ) def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Optional[int]: """simple docstring""" return [json.loads(_A ) for line in buffer] class __snake_case : @pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] ) def __a ( self : Any , _lowercase : int , _lowercase : List[str] , _lowercase : Dict ): """simple docstring""" with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase__ , UpperCamelCase__ , lines=UpperCamelCase__ ).write() buffer.seek(0 ) SCREAMING_SNAKE_CASE__ = load_json_function(UpperCamelCase__ ) assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) assert isinstance(exported_content[0] , UpperCamelCase__ ) assert len(UpperCamelCase__ ) == 10 @pytest.mark.parametrize( """orient, container, keys, len_at""" , [ ("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None), ("""split""", dict, {"""columns""", """data"""}, """data"""), ("""index""", dict, set("""0123456789""" ), None), ("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""), ("""values""", list, None, None), ("""table""", dict, {"""schema""", """data"""}, """data"""), ] , ) def __a ( self : List[str] , _lowercase : Dict , _lowercase : int , _lowercase : List[str] , _lowercase : int , _lowercase : Optional[Any] ): """simple docstring""" with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase__ , UpperCamelCase__ , lines=UpperCamelCase__ , orient=UpperCamelCase__ ).write() buffer.seek(0 ) SCREAMING_SNAKE_CASE__ = load_json(UpperCamelCase__ ) assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(UpperCamelCase__ , """keys""" ) and not hasattr(exported_content[0] , """keys""" ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(UpperCamelCase__ ) == 10 @pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] ) def __a ( self : Optional[Any] , _lowercase : str , _lowercase : Optional[Any] , _lowercase : Optional[Any] ): """simple docstring""" with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase__ , UpperCamelCase__ , lines=UpperCamelCase__ , num_proc=2 ).write() buffer.seek(0 ) SCREAMING_SNAKE_CASE__ = load_json_function(UpperCamelCase__ ) assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) assert isinstance(exported_content[0] , UpperCamelCase__ ) assert len(UpperCamelCase__ ) == 10 @pytest.mark.parametrize( """orient, container, keys, len_at""" , [ ("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None), ("""split""", dict, {"""columns""", """data"""}, """data"""), ("""index""", dict, set("""0123456789""" ), None), ("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""), ("""values""", list, None, None), ("""table""", dict, {"""schema""", """data"""}, """data"""), ] , ) def __a ( self : Union[str, Any] , _lowercase : Optional[Any] , _lowercase : Union[str, Any] , _lowercase : Optional[Any] , _lowercase : Optional[int] , _lowercase : List[Any] ): """simple docstring""" with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase__ , UpperCamelCase__ , lines=UpperCamelCase__ , orient=UpperCamelCase__ , num_proc=2 ).write() buffer.seek(0 ) SCREAMING_SNAKE_CASE__ = load_json(UpperCamelCase__ ) assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(UpperCamelCase__ , """keys""" ) and not hasattr(exported_content[0] , """keys""" ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(UpperCamelCase__ ) == 10 def __a ( self : Optional[int] , _lowercase : List[str] ): """simple docstring""" with pytest.raises(UpperCamelCase__ ): with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase__ , UpperCamelCase__ , num_proc=0 ) @pytest.mark.parametrize("""compression, extension""" , [("""gzip""", """gz"""), ("""bz2""", """bz2"""), ("""xz""", """xz""")] ) def __a ( self : Tuple , _lowercase : str , _lowercase : List[Any] , _lowercase : Tuple , _lowercase : Any , _lowercase : Optional[int] ): """simple docstring""" SCREAMING_SNAKE_CASE__ = tmp_path_factory.mktemp("""data""" ) / f"""test.json.{extension}""" SCREAMING_SNAKE_CASE__ = str(shared_datadir / f"""test_file.json.{extension}""" ) JsonDatasetWriter(UpperCamelCase__ , UpperCamelCase__ , compression=UpperCamelCase__ ).write() with fsspec.open(UpperCamelCase__ , """rb""" , compression="""infer""" ) as f: SCREAMING_SNAKE_CASE__ = f.read() with fsspec.open(UpperCamelCase__ , """rb""" , compression="""infer""" ) as f: SCREAMING_SNAKE_CASE__ = f.read() assert exported_content == original_content
219
import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest class A ( __UpperCAmelCase ): __snake_case = (UnCLIPScheduler,) def SCREAMING_SNAKE_CASE__ ( self, **UpperCamelCase__ ): """simple docstring""" lowerCAmelCase_ = { '''num_train_timesteps''': 1000, '''variance_type''': '''fixed_small_log''', '''clip_sample''': True, '''clip_sample_range''': 1.0, '''prediction_type''': '''epsilon''', } config.update(**UpperCamelCase__ ) return config def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" for clip_sample_range in [1, 5, 10, 20]: self.check_over_configs(clip_sample_range=UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" for time_step in [0, 500, 999]: for prev_timestep in [None, 5, 100, 250, 500, 750]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=UpperCamelCase__, prev_timestep=UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.scheduler_classes[0] lowerCAmelCase_ = self.get_scheduler_config(variance_type='''fixed_small_log''' ) lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1E-5 def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.scheduler_classes[0] lowerCAmelCase_ = self.get_scheduler_config(variance_type='''learned_range''' ) lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ ) lowerCAmelCase_ = 0.5 assert scheduler._get_variance(1, predicted_variance=UpperCamelCase__ ) - -10.1_712_790 < 1E-5 assert scheduler._get_variance(487, predicted_variance=UpperCamelCase__ ) - -5.7_998_052 < 1E-5 assert scheduler._get_variance(999, predicted_variance=UpperCamelCase__ ) - -0.0_010_011 < 1E-5 def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.scheduler_classes[0] lowerCAmelCase_ = self.get_scheduler_config() lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ ) lowerCAmelCase_ = scheduler.timesteps lowerCAmelCase_ = self.dummy_model() lowerCAmelCase_ = self.dummy_sample_deter lowerCAmelCase_ = torch.manual_seed(0 ) for i, t in enumerate(UpperCamelCase__ ): # 1. predict noise residual lowerCAmelCase_ = model(UpperCamelCase__, UpperCamelCase__ ) # 2. predict previous mean of sample x_t-1 lowerCAmelCase_ = scheduler.step(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, generator=UpperCamelCase__ ).prev_sample lowerCAmelCase_ = pred_prev_sample lowerCAmelCase_ = torch.sum(torch.abs(UpperCamelCase__ ) ) lowerCAmelCase_ = torch.mean(torch.abs(UpperCamelCase__ ) ) assert abs(result_sum.item() - 252.2_682_495 ) < 1E-2 assert abs(result_mean.item() - 0.3_284_743 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.scheduler_classes[0] lowerCAmelCase_ = self.get_scheduler_config() lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ ) scheduler.set_timesteps(25 ) lowerCAmelCase_ = scheduler.timesteps lowerCAmelCase_ = self.dummy_model() lowerCAmelCase_ = self.dummy_sample_deter lowerCAmelCase_ = torch.manual_seed(0 ) for i, t in enumerate(UpperCamelCase__ ): # 1. predict noise residual lowerCAmelCase_ = model(UpperCamelCase__, UpperCamelCase__ ) if i + 1 == timesteps.shape[0]: lowerCAmelCase_ = None else: lowerCAmelCase_ = timesteps[i + 1] # 2. predict previous mean of sample x_t-1 lowerCAmelCase_ = scheduler.step( UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, prev_timestep=UpperCamelCase__, generator=UpperCamelCase__ ).prev_sample lowerCAmelCase_ = pred_prev_sample lowerCAmelCase_ = torch.sum(torch.abs(UpperCamelCase__ ) ) lowerCAmelCase_ = torch.mean(torch.abs(UpperCamelCase__ ) ) assert abs(result_sum.item() - 258.2_044_983 ) < 1E-2 assert abs(result_mean.item() - 0.3_362_038 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" pass def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" pass
278
0
'''simple docstring''' import itertools import math def a_ ( lowerCamelCase : Optional[int] ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_A ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def a_ ( ): lowerCAmelCase = 2 while True: if is_prime(_A ): yield num num += 1 def a_ ( lowerCamelCase : Optional[Any] = 10001 ): return next(itertools.islice(prime_generator() , nth - 1 , _A ) ) if __name__ == "__main__": print(F'''{solution() = }''')
4
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _A = logging.get_logger(__name__) _A = torch.device('''cpu''') def __UpperCamelCase ( ): lowerCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw ) return im def __UpperCamelCase ( _A ): if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.1_7_0_3E0_0, 2.1_1_0_7E0_0, -2.0_8_1_1E0_0, 8.8_6_8_5E-0_1, 2.4_3_6_0E-0_1] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.9_6_3_6E-0_1, 2.3_4_7_8E-0_1, -1.6_9_6_3E0_0, -1.7_3_8_1E0_0, -8.6_3_3_7E-0_1] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.2_7_6_8E-0_1, -4.7_4_2_9E-0_1, -1.0_8_9_7E0_0, -1.0_2_4_8E0_0, 3.5_5_2_3E-0_2] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.5_3_3_0E-0_1, 2.4_2_1_1E-0_1, -6.0_1_8_5E-0_1, -8.2_7_8_9E-0_1, -6.0_4_4_6E-0_2] ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = dct.pop(_A ) lowerCAmelCase_ = val def __UpperCamelCase ( _A ): lowerCAmelCase_ = [] for k in state_dict.keys(): lowerCAmelCase_ = k if ".pwconv" in k: lowerCAmelCase_ = k_new.replace('''.pwconv''' , '''.point_wise_conv''' ) if ".dwconv" in k: lowerCAmelCase_ = k_new.replace('''.dwconv''' , '''.depth_wise_conv''' ) if ".Proj." in k: lowerCAmelCase_ = k_new.replace('''.Proj.''' , '''.proj.''' ) if "patch_embed" in k_new: lowerCAmelCase_ = k_new.replace('''patch_embed''' , '''swiftformer.patch_embed.patch_embedding''' ) if "network" in k_new: lowerCAmelCase_ = k_new.split('''.''' ) if ls[2].isdigit(): lowerCAmelCase_ = '''swiftformer.encoder.network.''' + ls[1] + '''.blocks.''' + ls[2] + '''.''' + '''.'''.join(ls[3:] ) else: lowerCAmelCase_ = k_new.replace('''network''' , '''swiftformer.encoder.network''' ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size lowerCAmelCase_ = 1000 lowerCAmelCase_ = '''huggingface/label-files''' lowerCAmelCase_ = '''imagenet-1k-id2label.json''' lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) ) lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()} lowerCAmelCase_ = idalabel lowerCAmelCase_ = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": lowerCAmelCase_ = [3, 3, 6, 4] lowerCAmelCase_ = [48, 56, 112, 220] elif swiftformer_name == "swiftformer_s": lowerCAmelCase_ = [3, 3, 9, 6] lowerCAmelCase_ = [48, 64, 168, 224] elif swiftformer_name == "swiftformer_l1": lowerCAmelCase_ = [4, 3, 10, 5] lowerCAmelCase_ = [48, 96, 192, 384] elif swiftformer_name == "swiftformer_l3": lowerCAmelCase_ = [4, 4, 12, 6] lowerCAmelCase_ = [64, 128, 320, 512] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith('''https''' ): lowerCAmelCase_ = torch.hub.load_state_dict_from_url(_A , map_location='''cpu''' , check_hash=_A ) else: lowerCAmelCase_ = torch.load(_A , map_location='''cpu''' ) lowerCAmelCase_ = checkpoint lowerCAmelCase_ = create_rename_keys(_A ) for rename_key_src, rename_key_dest in rename_keys: rename_key(_A , _A , _A ) # load HuggingFace model lowerCAmelCase_ = SwiftFormerForImageClassification(_A ).eval() hf_model.load_state_dict(_A ) # prepare test inputs lowerCAmelCase_ = prepare_img() lowerCAmelCase_ = ViTImageProcessor.from_pretrained('''preprocessor_config''' ) lowerCAmelCase_ = processor(images=_A , return_tensors='''pt''' ) # compare outputs from both models lowerCAmelCase_ = get_expected_output(_A ) lowerCAmelCase_ = hf_model(inputs['''pixel_values'''] ).logits assert hf_logits.shape == torch.Size([1, 1000] ) assert torch.allclose(hf_logits[0, 0:5] , _A , atol=1E-3 ) Path(_A ).mkdir(exist_ok=_A ) print(f"Saving model {swiftformer_name} to {pytorch_dump_folder_path}" ) hf_model.save_pretrained(_A ) if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--swiftformer_name''', default='''swiftformer_xs''', choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''], type=str, help='''Name of the SwiftFormer model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''./converted_outputs/''', type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''') _A = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
278
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __A : List[str] = logging.get_logger(__name__) __A : Tuple = { '''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''', } class _UpperCAmelCase ( __UpperCAmelCase , __UpperCAmelCase ): SCREAMING_SNAKE_CASE_ : Tuple = "focalnet" def __init__( self : str , A : Optional[int]=2_24 , A : Optional[int]=4 , A : Union[str, Any]=3 , A : List[str]=96 , A : Optional[Any]=False , A : Dict=[1_92, 3_84, 7_68, 7_68] , A : Union[str, Any]=[2, 2, 6, 2] , A : Optional[int]=[2, 2, 2, 2] , A : Any=[3, 3, 3, 3] , A : int="gelu" , A : Dict=4.0 , A : Optional[int]=0.0 , A : List[str]=0.1 , A : int=False , A : Tuple=1e-4 , A : Dict=False , A : Dict=False , A : Tuple=False , A : Dict=0.02 , A : Any=1e-5 , A : List[Any]=32 , A : Optional[int]=None , A : Any=None , **A : Optional[int] , ) -> Any: super().__init__(**UpperCamelCase__ ) lowercase_ : Optional[int] = image_size lowercase_ : Dict = patch_size lowercase_ : Dict = num_channels lowercase_ : int = embed_dim lowercase_ : Optional[int] = use_conv_embed lowercase_ : int = hidden_sizes lowercase_ : Tuple = depths lowercase_ : Tuple = focal_levels lowercase_ : Any = focal_windows lowercase_ : Any = hidden_act lowercase_ : List[str] = mlp_ratio lowercase_ : int = hidden_dropout_prob lowercase_ : Tuple = drop_path_rate lowercase_ : Optional[int] = use_layerscale lowercase_ : Dict = layerscale_value lowercase_ : Any = use_post_layernorm lowercase_ : Union[str, Any] = use_post_layernorm_in_modulation lowercase_ : int = normalize_modulator lowercase_ : Dict = initializer_range lowercase_ : List[str] = layer_norm_eps lowercase_ : List[str] = encoder_stride lowercase_ : Dict = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )] lowercase_ , lowercase_ : Dict = get_aligned_output_features_output_indices( out_features=UpperCamelCase__ , out_indices=UpperCamelCase__ , stage_names=self.stage_names )
33
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _A = logging.get_logger(__name__) _A = { '''google/vit-base-patch16-224''': '''https://huggingface.co/vit-base-patch16-224/resolve/main/config.json''', # See all ViT models at https://huggingface.co/models?filter=vit } class A ( __UpperCAmelCase ): __snake_case = 'vit' def __init__( self, UpperCamelCase__=768, UpperCamelCase__=12, UpperCamelCase__=12, UpperCamelCase__=3072, UpperCamelCase__="gelu", UpperCamelCase__=0.0, UpperCamelCase__=0.0, UpperCamelCase__=0.02, UpperCamelCase__=1E-12, UpperCamelCase__=224, UpperCamelCase__=16, UpperCamelCase__=3, UpperCamelCase__=True, UpperCamelCase__=16, **UpperCamelCase__, ): """simple docstring""" super().__init__(**UpperCamelCase__ ) lowerCAmelCase_ = hidden_size lowerCAmelCase_ = num_hidden_layers lowerCAmelCase_ = num_attention_heads lowerCAmelCase_ = intermediate_size lowerCAmelCase_ = hidden_act lowerCAmelCase_ = hidden_dropout_prob lowerCAmelCase_ = attention_probs_dropout_prob lowerCAmelCase_ = initializer_range lowerCAmelCase_ = layer_norm_eps lowerCAmelCase_ = image_size lowerCAmelCase_ = patch_size lowerCAmelCase_ = num_channels lowerCAmelCase_ = qkv_bias lowerCAmelCase_ = encoder_stride class A ( __UpperCAmelCase ): __snake_case = version.parse('1.11' ) @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return 1E-4
278
0
import inspect import unittest from transformers import RegNetConfig, is_flax_available from transformers.testing_utils import require_flax, slow from transformers.utils import cached_property, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A_ ( unittest.TestCase ): def __init__( self : int ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : int=3 ,SCREAMING_SNAKE_CASE__ : List[str]=3_2 ,SCREAMING_SNAKE_CASE__ : Dict=3 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_0 ,SCREAMING_SNAKE_CASE__ : List[Any]=[1_0, 2_0, 3_0, 4_0] ,SCREAMING_SNAKE_CASE__ : Any=[1, 1, 2, 1] ,SCREAMING_SNAKE_CASE__ : List[Any]=True ,SCREAMING_SNAKE_CASE__ : Optional[Any]=True ,SCREAMING_SNAKE_CASE__ : Tuple="relu" ,SCREAMING_SNAKE_CASE__ : Optional[Any]=3 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=None ,): __lowerCamelCase : int = parent __lowerCamelCase : Union[str, Any] = batch_size __lowerCamelCase : Optional[int] = image_size __lowerCamelCase : int = num_channels __lowerCamelCase : str = embeddings_size __lowerCamelCase : Optional[Any] = hidden_sizes __lowerCamelCase : Optional[int] = depths __lowerCamelCase : Dict = is_training __lowerCamelCase : Any = use_labels __lowerCamelCase : Optional[int] = hidden_act __lowerCamelCase : Union[str, Any] = num_labels __lowerCamelCase : Optional[Any] = scope __lowerCamelCase : Tuple = len(UpperCamelCase__) def lowerCAmelCase ( self : Any): __lowerCamelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) __lowerCamelCase : Optional[Any] = self.get_config() return config, pixel_values def lowerCAmelCase ( self : Union[str, Any]): return RegNetConfig( num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,) def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Optional[int]): __lowerCamelCase : Tuple = FlaxRegNetModel(config=UpperCamelCase__) __lowerCamelCase : List[Any] = model(UpperCamelCase__) # Output shape (b, c, h, w) self.parent.assertEqual( result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) ,) def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Any): __lowerCamelCase : str = self.num_labels __lowerCamelCase : Tuple = FlaxRegNetForImageClassification(config=UpperCamelCase__) __lowerCamelCase : Optional[Any] = model(UpperCamelCase__) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels)) def lowerCAmelCase ( self : int): __lowerCamelCase : str = self.prepare_config_and_inputs() __lowerCamelCase , __lowerCamelCase : Tuple = config_and_inputs __lowerCamelCase : Tuple = {'pixel_values': pixel_values} return config, inputs_dict @require_flax class A_ ( __UpperCAmelCase , unittest.TestCase ): _UpperCAmelCase : Union[str, Any] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else () _UpperCAmelCase : Tuple = False _UpperCAmelCase : str = False _UpperCAmelCase : List[Any] = False def lowerCAmelCase ( self : Union[str, Any]): __lowerCamelCase : Tuple = FlaxRegNetModelTester(self) __lowerCamelCase : Optional[int] = ConfigTester(self ,config_class=UpperCamelCase__ ,has_text_modality=UpperCamelCase__) def lowerCAmelCase ( self : int): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCAmelCase ( self : Tuple): return def lowerCAmelCase ( self : Optional[int]): __lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__) def lowerCAmelCase ( self : Optional[Any]): __lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__) @unittest.skip(reason='RegNet does not use inputs_embeds') def lowerCAmelCase ( self : Any): pass @unittest.skip(reason='RegNet does not support input and output embeddings') def lowerCAmelCase ( self : Any): pass def lowerCAmelCase ( self : str): __lowerCamelCase , __lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase : int = model_class(UpperCamelCase__) __lowerCamelCase : int = inspect.signature(model.__call__) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCamelCase : Tuple = [*signature.parameters.keys()] __lowerCamelCase : Any = ['pixel_values'] self.assertListEqual(arg_names[:1] ,UpperCamelCase__) def lowerCAmelCase ( self : Union[str, Any]): def check_hidden_states_output(SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Dict): __lowerCamelCase : List[str] = model_class(UpperCamelCase__) __lowerCamelCase : Optional[Any] = model(**self._prepare_for_class(UpperCamelCase__ ,UpperCamelCase__)) __lowerCamelCase : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __lowerCamelCase : Optional[int] = self.model_tester.num_stages self.assertEqual(len(UpperCamelCase__) ,expected_num_stages + 1) __lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase : List[Any] = True check_hidden_states_output(UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowerCamelCase : str = True check_hidden_states_output(UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__) def lowerCAmelCase ( self : Optional[int]): __lowerCamelCase , __lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): __lowerCamelCase : Optional[int] = self._prepare_for_class(UpperCamelCase__ ,UpperCamelCase__) __lowerCamelCase : Union[str, Any] = model_class(UpperCamelCase__) @jax.jit def model_jitted(SCREAMING_SNAKE_CASE__ : List[str] ,**SCREAMING_SNAKE_CASE__ : int): return model(pixel_values=UpperCamelCase__ ,**UpperCamelCase__) with self.subTest('JIT Enabled'): __lowerCamelCase : Union[str, Any] = model_jitted(**UpperCamelCase__).to_tuple() with self.subTest('JIT Disabled'): with jax.disable_jit(): __lowerCamelCase : str = model_jitted(**UpperCamelCase__).to_tuple() self.assertEqual(len(UpperCamelCase__) ,len(UpperCamelCase__)) for jitted_output, output in zip(UpperCamelCase__ ,UpperCamelCase__): self.assertEqual(jitted_output.shape ,output.shape) def SCREAMING_SNAKE_CASE__ ( ) -> Tuple: __lowerCamelCase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_flax class A_ ( unittest.TestCase ): @cached_property def lowerCAmelCase ( self : Union[str, Any]): return AutoImageProcessor.from_pretrained('facebook/regnet-y-040') if is_vision_available() else None @slow def lowerCAmelCase ( self : int): __lowerCamelCase : int = FlaxRegNetForImageClassification.from_pretrained('facebook/regnet-y-040') __lowerCamelCase : List[str] = self.default_image_processor __lowerCamelCase : List[str] = prepare_img() __lowerCamelCase : List[Any] = image_processor(images=UpperCamelCase__ ,return_tensors='np') __lowerCamelCase : int = model(**UpperCamelCase__) # verify the logits __lowerCamelCase : Optional[Any] = (1, 1_0_0_0) self.assertEqual(outputs.logits.shape ,UpperCamelCase__) __lowerCamelCase : Dict = jnp.array([-0.4180, -1.5051, -3.4836]) self.assertTrue(jnp.allclose(outputs.logits[0, :3] ,UpperCamelCase__ ,atol=1E-4))
73
import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def __UpperCamelCase ( _A , _A ): assert isinstance(_A , _A ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCAmelCase_ = JsonDatasetReader(_A , cache_dir=_A , keep_in_memory=_A ).read() _check_json_dataset(_A , _A ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase_ = features.copy() if features else default_expected_features lowerCAmelCase_ = ( Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase_ = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read() _check_json_dataset(_A , _A ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}, ] , ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''} lowerCAmelCase_ = features.copy() if features else default_expected_features lowerCAmelCase_ = ( Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase_ = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read() assert isinstance(_A , _A ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def __UpperCamelCase ( _A , _A ): # jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"} lowerCAmelCase_ = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''} lowerCAmelCase_ = features.copy() lowerCAmelCase_ = ( Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read() assert isinstance(_A , _A ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase_ = JsonDatasetReader(_A , cache_dir=_A , split=_A ).read() _check_json_dataset(_A , _A ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''' , [str, list] ) def __UpperCamelCase ( _A , _A , _A ): if issubclass(_A , _A ): lowerCAmelCase_ = jsonl_path elif issubclass(_A , _A ): lowerCAmelCase_ = [jsonl_path] lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase_ = JsonDatasetReader(_A , cache_dir=_A ).read() _check_json_dataset(_A , _A ) def __UpperCamelCase ( _A , _A , _A=("train",) ): assert isinstance(_A , _A ) for split in splits: lowerCAmelCase_ = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCAmelCase_ = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=_A , keep_in_memory=_A ).read() _check_json_datasetdict(_A , _A ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def __UpperCamelCase ( _A , _A , _A ): lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase_ = features.copy() if features else default_expected_features lowerCAmelCase_ = ( Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase_ = JsonDatasetReader({'''train''': jsonl_path} , features=_A , cache_dir=_A ).read() _check_json_datasetdict(_A , _A ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def __UpperCamelCase ( _A , _A , _A ): if split: lowerCAmelCase_ = {split: jsonl_path} else: lowerCAmelCase_ = '''train''' lowerCAmelCase_ = {'''train''': jsonl_path, '''test''': jsonl_path} lowerCAmelCase_ = tmp_path / '''cache''' lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase_ = JsonDatasetReader(_A , cache_dir=_A ).read() _check_json_datasetdict(_A , _A , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def __UpperCamelCase ( _A ): return json.load(_A ) def __UpperCamelCase ( _A ): return [json.loads(_A ) for line in buffer] class A : @pytest.mark.parametrize('''lines, load_json_function''', [(True, load_json_lines), (False, load_json)] ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ): """simple docstring""" with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, lines=UpperCamelCase__ ).write() buffer.seek(0 ) lowerCAmelCase_ = load_json_function(UpperCamelCase__ ) assert isinstance(UpperCamelCase__, UpperCamelCase__ ) assert isinstance(exported_content[0], UpperCamelCase__ ) assert len(UpperCamelCase__ ) == 10 @pytest.mark.parametrize( '''orient, container, keys, len_at''', [ ('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None), ('''split''', dict, {'''columns''', '''data'''}, '''data'''), ('''index''', dict, set('''0123456789''' ), None), ('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''), ('''values''', list, None, None), ('''table''', dict, {'''schema''', '''data'''}, '''data'''), ], ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ): """simple docstring""" with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, lines=UpperCamelCase__, orient=UpperCamelCase__ ).write() buffer.seek(0 ) lowerCAmelCase_ = load_json(UpperCamelCase__ ) assert isinstance(UpperCamelCase__, UpperCamelCase__ ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(UpperCamelCase__, '''keys''' ) and not hasattr(exported_content[0], '''keys''' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(UpperCamelCase__ ) == 10 @pytest.mark.parametrize('''lines, load_json_function''', [(True, load_json_lines), (False, load_json)] ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ): """simple docstring""" with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, lines=UpperCamelCase__, num_proc=2 ).write() buffer.seek(0 ) lowerCAmelCase_ = load_json_function(UpperCamelCase__ ) assert isinstance(UpperCamelCase__, UpperCamelCase__ ) assert isinstance(exported_content[0], UpperCamelCase__ ) assert len(UpperCamelCase__ ) == 10 @pytest.mark.parametrize( '''orient, container, keys, len_at''', [ ('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None), ('''split''', dict, {'''columns''', '''data'''}, '''data'''), ('''index''', dict, set('''0123456789''' ), None), ('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''), ('''values''', list, None, None), ('''table''', dict, {'''schema''', '''data'''}, '''data'''), ], ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ): """simple docstring""" with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, lines=UpperCamelCase__, orient=UpperCamelCase__, num_proc=2 ).write() buffer.seek(0 ) lowerCAmelCase_ = load_json(UpperCamelCase__ ) assert isinstance(UpperCamelCase__, UpperCamelCase__ ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(UpperCamelCase__, '''keys''' ) and not hasattr(exported_content[0], '''keys''' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(UpperCamelCase__ ) == 10 def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ): """simple docstring""" with pytest.raises(UpperCamelCase__ ): with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, num_proc=0 ) @pytest.mark.parametrize('''compression, extension''', [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ): """simple docstring""" lowerCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / f"test.json.{extension}" lowerCAmelCase_ = str(shared_datadir / f"test_file.json.{extension}" ) JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, compression=UpperCamelCase__ ).write() with fsspec.open(UpperCamelCase__, '''rb''', compression='''infer''' ) as f: lowerCAmelCase_ = f.read() with fsspec.open(UpperCamelCase__, '''rb''', compression='''infer''' ) as f: lowerCAmelCase_ = f.read() assert exported_content == original_content
278
0
import functools import gc import inspect import torch from .imports import is_npu_available, is_xpu_available def snake_case ( *snake_case__ :Optional[Any]) -> Any: if not isinstance(_A , _A): _A = list(_A) for i in range(len(_A)): _A = None gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() return objects def snake_case ( snake_case__ :Any) -> Tuple: _A = [ """CUDA out of memory.""", # CUDA OOM """cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU """DefaultCPUAllocator: can\'t allocate memory""", # CPU OOM ] if isinstance(_A , _A) and len(exception.args) == 1: return any(err in exception.args[0] for err in _statements) return False def snake_case ( snake_case__ :str = None , snake_case__ :Optional[Any] = 128) -> Tuple: if function is None: return functools.partial(_A , starting_batch_size=_A) _A = starting_batch_size def decorator(*snake_case__ :Any , **snake_case__ :str): nonlocal batch_size gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() _A = list(inspect.signature(_A).parameters.keys()) # Guard against user error if len(_A) < (len(_A) + 1): _A = """, """.join([F'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:])]) raise TypeError( F'''Batch size was passed into `{function.__name__}` as the first argument when called.''' F'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''') while True: if batch_size == 0: raise RuntimeError("""No executable batch size found, reached zero.""") try: return function(_A , *_A , **_A) except Exception as e: if should_reduce_batch_size(_A): gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() batch_size //= 2 else: raise return decorator
180
import importlib import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Union import torch from ..utils import BaseOutput _A = '''scheduler_config.json''' class A ( __UpperCAmelCase ): __snake_case = 1 __snake_case = 2 __snake_case = 3 __snake_case = 4 __snake_case = 5 __snake_case = 6 __snake_case = 7 __snake_case = 8 __snake_case = 9 __snake_case = 10 __snake_case = 11 __snake_case = 12 __snake_case = 13 __snake_case = 14 @dataclass class A ( __UpperCAmelCase ): __snake_case = 42 class A : __snake_case = SCHEDULER_CONFIG_NAME __snake_case = [] __snake_case = True @classmethod def SCREAMING_SNAKE_CASE__ ( cls, UpperCamelCase__ = None, UpperCamelCase__ = None, UpperCamelCase__=False, **UpperCamelCase__, ): """simple docstring""" lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = cls.load_config( pretrained_model_name_or_path=UpperCamelCase__, subfolder=UpperCamelCase__, return_unused_kwargs=UpperCamelCase__, return_commit_hash=UpperCamelCase__, **UpperCamelCase__, ) return cls.from_config(UpperCamelCase__, return_unused_kwargs=UpperCamelCase__, **UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = False, **UpperCamelCase__ ): """simple docstring""" self.save_config(save_directory=UpperCamelCase__, push_to_hub=UpperCamelCase__, **UpperCamelCase__ ) @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return self._get_compatibles() @classmethod def SCREAMING_SNAKE_CASE__ ( cls ): """simple docstring""" lowerCAmelCase_ = list(set([cls.__name__] + cls._compatibles ) ) lowerCAmelCase_ = importlib.import_module(__name__.split('''.''' )[0] ) lowerCAmelCase_ = [ getattr(UpperCamelCase__, UpperCamelCase__ ) for c in compatible_classes_str if hasattr(UpperCamelCase__, UpperCamelCase__ ) ] return compatible_classes
278
0
import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class lowercase ( unittest.TestCase ): def A__ ( self ,A__): lowercase = 3 lowercase = 2_5_0 lowercase = ids_tensor((batch_size, length) ,UpperCamelCase__) lowercase = torch.ones((batch_size, length) ,device=UpperCamelCase__ ,dtype=torch.float) / length return input_ids, scores def A__ ( self): lowercase , lowercase = self._get_tensors(5) lowercase = StoppingCriteriaList( [ MaxLengthCriteria(max_length=1_0), MaxTimeCriteria(max_time=0.1), ]) self.assertFalse(criteria(UpperCamelCase__ ,UpperCamelCase__)) lowercase , lowercase = self._get_tensors(9) self.assertFalse(criteria(UpperCamelCase__ ,UpperCamelCase__)) lowercase , lowercase = self._get_tensors(1_0) self.assertTrue(criteria(UpperCamelCase__ ,UpperCamelCase__)) def A__ ( self): lowercase = MaxLengthCriteria(max_length=1_0) lowercase , lowercase = self._get_tensors(5) self.assertFalse(criteria(UpperCamelCase__ ,UpperCamelCase__)) lowercase , lowercase = self._get_tensors(9) self.assertFalse(criteria(UpperCamelCase__ ,UpperCamelCase__)) lowercase , lowercase = self._get_tensors(1_0) self.assertTrue(criteria(UpperCamelCase__ ,UpperCamelCase__)) def A__ ( self): lowercase = MaxNewTokensCriteria(start_length=5 ,max_new_tokens=5) lowercase , lowercase = self._get_tensors(5) self.assertFalse(criteria(UpperCamelCase__ ,UpperCamelCase__)) lowercase , lowercase = self._get_tensors(9) self.assertFalse(criteria(UpperCamelCase__ ,UpperCamelCase__)) lowercase , lowercase = self._get_tensors(1_0) self.assertTrue(criteria(UpperCamelCase__ ,UpperCamelCase__)) lowercase = StoppingCriteriaList([criteria]) self.assertEqual(criteria_list.max_length ,1_0) def A__ ( self): lowercase , lowercase = self._get_tensors(5) lowercase = MaxTimeCriteria(max_time=0.1) self.assertFalse(criteria(UpperCamelCase__ ,UpperCamelCase__)) lowercase = MaxTimeCriteria(max_time=0.1 ,initial_timestamp=time.time() - 0.2) self.assertTrue(criteria(UpperCamelCase__ ,UpperCamelCase__)) def A__ ( self): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0)]) ,1_0) with self.assertWarns(UpperCamelCase__): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0)]) ,1_1) lowercase = validate_stopping_criteria(StoppingCriteriaList() ,1_1) self.assertEqual(len(UpperCamelCase__) ,1)
101
import unittest import numpy as np import torch from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class A ( unittest.TestCase ): @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCAmelCase_ = UNetaDModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=3, out_channels=3, down_block_types=('''DownBlock2D''', '''AttnDownBlock2D'''), up_block_types=('''AttnUpBlock2D''', '''UpBlock2D'''), ) return model def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = self.dummy_uncond_unet lowerCAmelCase_ = KarrasVeScheduler() lowerCAmelCase_ = KarrasVePipeline(unet=UpperCamelCase__, scheduler=UpperCamelCase__ ) pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowerCAmelCase_ = torch.manual_seed(0 ) lowerCAmelCase_ = pipe(num_inference_steps=2, generator=UpperCamelCase__, output_type='''numpy''' ).images lowerCAmelCase_ = torch.manual_seed(0 ) lowerCAmelCase_ = pipe(num_inference_steps=2, generator=UpperCamelCase__, output_type='''numpy''', return_dict=UpperCamelCase__ )[0] lowerCAmelCase_ = image[0, -3:, -3:, -1] lowerCAmelCase_ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCAmelCase_ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class A ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = '''google/ncsnpp-celebahq-256''' lowerCAmelCase_ = UNetaDModel.from_pretrained(UpperCamelCase__ ) lowerCAmelCase_ = KarrasVeScheduler() lowerCAmelCase_ = KarrasVePipeline(unet=UpperCamelCase__, scheduler=UpperCamelCase__ ) pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowerCAmelCase_ = torch.manual_seed(0 ) lowerCAmelCase_ = pipe(num_inference_steps=20, generator=UpperCamelCase__, output_type='''numpy''' ).images lowerCAmelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) lowerCAmelCase_ = np.array([0.578, 0.5_811, 0.5_924, 0.5_809, 0.587, 0.5_886, 0.5_861, 0.5_802, 0.586] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
278
0
from __future__ import annotations SCREAMING_SNAKE_CASE_ = [True] * 1_0_0_0_0_0_1 SCREAMING_SNAKE_CASE_ = 2 while i * i <= 1_0_0_0_0_0_0: if seive[i]: for j in range(i * i, 1_0_0_0_0_0_1, i): SCREAMING_SNAKE_CASE_ = False i += 1 def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Optional[int]: '''simple docstring''' return seive[n] def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Dict: '''simple docstring''' return any(digit in """02468""" for digit in str(_A ) ) def __lowercase ( _SCREAMING_SNAKE_CASE = 1_00_00_00 ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = [2] # result already includes the number 2. for num in range(3 , limit + 1 , 2 ): if is_prime(_A ) and not contains_an_even_digit(_A ): SCREAMING_SNAKE_CASE = str(_A ) SCREAMING_SNAKE_CASE = [int(str_num[j:] + str_num[:j] ) for j in range(len(_A ) )] if all(is_prime(_A ) for i in list_nums ): result.append(_A ) return result def __lowercase ( ) -> Dict: '''simple docstring''' return len(find_circular_primes() ) if __name__ == "__main__": print(F'''{len(find_circular_primes()) = }''')
296
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
278
0
"""simple docstring""" from collections import deque from math import floor from random import random from time import time class _UpperCAmelCase : def __init__( self : Optional[Any] ): __UpperCAmelCase = {} def a ( self : List[str] , _lowercase : List[Any] , _lowercase : Dict , _lowercase : Optional[int]=1 ): if self.graph.get(UpperCamelCase__ ): if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: __UpperCAmelCase = [[w, v]] if not self.graph.get(UpperCamelCase__ ): __UpperCAmelCase = [] def a ( self : Optional[int] ): return list(self.graph ) def a ( self : Dict , _lowercase : List[str] , _lowercase : str ): if self.graph.get(UpperCamelCase__ ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(UpperCamelCase__ ) def a ( self : Optional[int] , _lowercase : List[str]=-2 , _lowercase : Any=-1 ): if s == d: return [] __UpperCAmelCase = [] __UpperCAmelCase = [] if s == -2: __UpperCAmelCase = list(self.graph )[0] stack.append(UpperCamelCase__ ) visited.append(UpperCamelCase__ ) __UpperCAmelCase = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: __UpperCAmelCase = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(UpperCamelCase__ ) return visited else: stack.append(node[1] ) visited.append(node[1] ) __UpperCAmelCase = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(UpperCamelCase__ ) != 0: __UpperCAmelCase = stack[len(UpperCamelCase__ ) - 1] else: __UpperCAmelCase = ss # check if se have reached the starting point if len(UpperCamelCase__ ) == 0: return visited def a ( self : str , _lowercase : str=-1 ): if c == -1: __UpperCAmelCase = floor(random() * 1_00_00 ) + 10 for i in range(UpperCamelCase__ ): # every vertex has max 100 edges for _ in range(floor(random() * 1_02 ) + 1 ): __UpperCAmelCase = floor(random() * c ) + 1 if n != i: self.add_pair(UpperCamelCase__ , UpperCamelCase__ , 1 ) def a ( self : Union[str, Any] , _lowercase : int=-2 ): __UpperCAmelCase = deque() __UpperCAmelCase = [] if s == -2: __UpperCAmelCase = list(self.graph )[0] d.append(UpperCamelCase__ ) visited.append(UpperCamelCase__ ) while d: __UpperCAmelCase = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def a ( self : List[str] , _lowercase : Tuple ): __UpperCAmelCase = 0 for x in self.graph: for y in self.graph[x]: if y[1] == u: count += 1 return count def a ( self : List[str] , _lowercase : Dict ): return len(self.graph[u] ) def a ( self : Union[str, Any] , _lowercase : Optional[Any]=-2 ): __UpperCAmelCase = [] __UpperCAmelCase = [] if s == -2: __UpperCAmelCase = list(self.graph )[0] stack.append(UpperCamelCase__ ) visited.append(UpperCamelCase__ ) __UpperCAmelCase = s __UpperCAmelCase = [] while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: __UpperCAmelCase = s for node in self.graph[s]: if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) __UpperCAmelCase = node[1] break # check if all the children are visited if s == ss: sorted_nodes.append(stack.pop() ) if len(UpperCamelCase__ ) != 0: __UpperCAmelCase = stack[len(UpperCamelCase__ ) - 1] else: __UpperCAmelCase = ss # check if se have reached the starting point if len(UpperCamelCase__ ) == 0: return sorted_nodes def a ( self : Optional[Any] ): __UpperCAmelCase = [] __UpperCAmelCase = [] __UpperCAmelCase = list(self.graph )[0] stack.append(UpperCamelCase__ ) visited.append(UpperCamelCase__ ) __UpperCAmelCase = -2 __UpperCAmelCase = [] __UpperCAmelCase = s __UpperCAmelCase = False __UpperCAmelCase = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: __UpperCAmelCase = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): __UpperCAmelCase = len(UpperCamelCase__ ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) __UpperCAmelCase = node[1] break # check if all the children are visited if s == ss: stack.pop() __UpperCAmelCase = True if len(UpperCamelCase__ ) != 0: __UpperCAmelCase = stack[len(UpperCamelCase__ ) - 1] else: __UpperCAmelCase = False indirect_parents.append(UpperCamelCase__ ) __UpperCAmelCase = s __UpperCAmelCase = ss # check if se have reached the starting point if len(UpperCamelCase__ ) == 0: return list(UpperCamelCase__ ) def a ( self : List[Any] ): __UpperCAmelCase = [] __UpperCAmelCase = [] __UpperCAmelCase = list(self.graph )[0] stack.append(UpperCamelCase__ ) visited.append(UpperCamelCase__ ) __UpperCAmelCase = -2 __UpperCAmelCase = [] __UpperCAmelCase = s __UpperCAmelCase = False __UpperCAmelCase = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: __UpperCAmelCase = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): __UpperCAmelCase = len(UpperCamelCase__ ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) __UpperCAmelCase = node[1] break # check if all the children are visited if s == ss: stack.pop() __UpperCAmelCase = True if len(UpperCamelCase__ ) != 0: __UpperCAmelCase = stack[len(UpperCamelCase__ ) - 1] else: __UpperCAmelCase = False indirect_parents.append(UpperCamelCase__ ) __UpperCAmelCase = s __UpperCAmelCase = ss # check if se have reached the starting point if len(UpperCamelCase__ ) == 0: return False def a ( self : List[str] , _lowercase : int=-2 , _lowercase : str=-1 ): __UpperCAmelCase = time() self.dfs(UpperCamelCase__ , UpperCamelCase__ ) __UpperCAmelCase = time() return end - begin def a ( self : Optional[int] , _lowercase : List[Any]=-2 ): __UpperCAmelCase = time() self.bfs(UpperCamelCase__ ) __UpperCAmelCase = time() return end - begin class _UpperCAmelCase : def __init__( self : Dict ): __UpperCAmelCase = {} def a ( self : Union[str, Any] , _lowercase : Optional[int] , _lowercase : int , _lowercase : List[Any]=1 ): if self.graph.get(UpperCamelCase__ ): # if there already is a edge if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: # if u does not exist __UpperCAmelCase = [[w, v]] # add the other way if self.graph.get(UpperCamelCase__ ): # if there already is a edge if self.graph[v].count([w, u] ) == 0: self.graph[v].append([w, u] ) else: # if u does not exist __UpperCAmelCase = [[w, u]] def a ( self : Optional[int] , _lowercase : Optional[Any] , _lowercase : Tuple ): if self.graph.get(UpperCamelCase__ ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(UpperCamelCase__ ) # the other way round if self.graph.get(UpperCamelCase__ ): for _ in self.graph[v]: if _[1] == u: self.graph[v].remove(UpperCamelCase__ ) def a ( self : Tuple , _lowercase : int=-2 , _lowercase : List[Any]=-1 ): if s == d: return [] __UpperCAmelCase = [] __UpperCAmelCase = [] if s == -2: __UpperCAmelCase = list(self.graph )[0] stack.append(UpperCamelCase__ ) visited.append(UpperCamelCase__ ) __UpperCAmelCase = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: __UpperCAmelCase = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(UpperCamelCase__ ) return visited else: stack.append(node[1] ) visited.append(node[1] ) __UpperCAmelCase = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(UpperCamelCase__ ) != 0: __UpperCAmelCase = stack[len(UpperCamelCase__ ) - 1] else: __UpperCAmelCase = ss # check if se have reached the starting point if len(UpperCamelCase__ ) == 0: return visited def a ( self : List[Any] , _lowercase : List[str]=-1 ): if c == -1: __UpperCAmelCase = floor(random() * 1_00_00 ) + 10 for i in range(UpperCamelCase__ ): # every vertex has max 100 edges for _ in range(floor(random() * 1_02 ) + 1 ): __UpperCAmelCase = floor(random() * c ) + 1 if n != i: self.add_pair(UpperCamelCase__ , UpperCamelCase__ , 1 ) def a ( self : Union[str, Any] , _lowercase : Optional[Any]=-2 ): __UpperCAmelCase = deque() __UpperCAmelCase = [] if s == -2: __UpperCAmelCase = list(self.graph )[0] d.append(UpperCamelCase__ ) visited.append(UpperCamelCase__ ) while d: __UpperCAmelCase = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def a ( self : Dict , _lowercase : Optional[Any] ): return len(self.graph[u] ) def a ( self : Union[str, Any] ): __UpperCAmelCase = [] __UpperCAmelCase = [] __UpperCAmelCase = list(self.graph )[0] stack.append(UpperCamelCase__ ) visited.append(UpperCamelCase__ ) __UpperCAmelCase = -2 __UpperCAmelCase = [] __UpperCAmelCase = s __UpperCAmelCase = False __UpperCAmelCase = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: __UpperCAmelCase = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): __UpperCAmelCase = len(UpperCamelCase__ ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) __UpperCAmelCase = node[1] break # check if all the children are visited if s == ss: stack.pop() __UpperCAmelCase = True if len(UpperCamelCase__ ) != 0: __UpperCAmelCase = stack[len(UpperCamelCase__ ) - 1] else: __UpperCAmelCase = False indirect_parents.append(UpperCamelCase__ ) __UpperCAmelCase = s __UpperCAmelCase = ss # check if se have reached the starting point if len(UpperCamelCase__ ) == 0: return list(UpperCamelCase__ ) def a ( self : List[Any] ): __UpperCAmelCase = [] __UpperCAmelCase = [] __UpperCAmelCase = list(self.graph )[0] stack.append(UpperCamelCase__ ) visited.append(UpperCamelCase__ ) __UpperCAmelCase = -2 __UpperCAmelCase = [] __UpperCAmelCase = s __UpperCAmelCase = False __UpperCAmelCase = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: __UpperCAmelCase = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): __UpperCAmelCase = len(UpperCamelCase__ ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) __UpperCAmelCase = node[1] break # check if all the children are visited if s == ss: stack.pop() __UpperCAmelCase = True if len(UpperCamelCase__ ) != 0: __UpperCAmelCase = stack[len(UpperCamelCase__ ) - 1] else: __UpperCAmelCase = False indirect_parents.append(UpperCamelCase__ ) __UpperCAmelCase = s __UpperCAmelCase = ss # check if se have reached the starting point if len(UpperCamelCase__ ) == 0: return False def a ( self : str ): return list(self.graph ) def a ( self : str , _lowercase : Optional[Any]=-2 , _lowercase : Any=-1 ): __UpperCAmelCase = time() self.dfs(UpperCamelCase__ , UpperCamelCase__ ) __UpperCAmelCase = time() return end - begin def a ( self : Any , _lowercase : List[Any]=-2 ): __UpperCAmelCase = time() self.bfs(UpperCamelCase__ ) __UpperCAmelCase = time() return end - begin
332
import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def __UpperCamelCase ( _A = 3 ): if isinstance(_A , _A ): raise TypeError('''number of qubits must be a integer.''' ) if number_of_qubits <= 0: raise ValueError('''number of qubits must be > 0.''' ) if math.floor(_A ) != number_of_qubits: raise ValueError('''number of qubits must be exact integer.''' ) if number_of_qubits > 10: raise ValueError('''number of qubits too large to simulate(>10).''' ) lowerCAmelCase_ = QuantumRegister(_A , '''qr''' ) lowerCAmelCase_ = ClassicalRegister(_A , '''cr''' ) lowerCAmelCase_ = QuantumCircuit(_A , _A ) lowerCAmelCase_ = number_of_qubits for i in range(_A ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(_A ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , _A , _A ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(_A , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(_A , _A ) # simulate with 10000 shots lowerCAmelCase_ = Aer.get_backend('''qasm_simulator''' ) lowerCAmelCase_ = execute(_A , _A , shots=10000 ) return job.result().get_counts(_A ) if __name__ == "__main__": print( f"Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}" )
278
0
"""simple docstring""" import string def UpperCamelCase_ ( lowerCAmelCase__ : Tuple ) -> Any: """simple docstring""" lowerCAmelCase_ : List[str] = '' for i in sequence: lowerCAmelCase_ : Dict = ord(_A ) if 65 <= extract <= 90: output += chr(155 - extract ) elif 97 <= extract <= 122: output += chr(219 - extract ) else: output += i return output def UpperCamelCase_ ( lowerCAmelCase__ : List[str] ) -> Optional[int]: """simple docstring""" lowerCAmelCase_ : Optional[int] = string.ascii_letters lowerCAmelCase_ : Dict = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1] return "".join( letters_reversed[letters.index(_A )] if c in letters else c for c in sequence ) def UpperCamelCase_ ( ) -> int: """simple docstring""" from timeit import timeit print('Running performance benchmarks...' ) lowerCAmelCase_ : Optional[Any] = 'from string import printable ; from __main__ import atbash, atbash_slow' print(f"> atbash_slow(): {timeit('atbash_slow(printable)' , setup=_A )} seconds" ) print(f"> atbash(): {timeit('atbash(printable)' , setup=_A )} seconds" ) if __name__ == "__main__": for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"): print(f'{example} encrypted in atbash: {atbash(example)}') benchmark()
224
from functools import lru_cache @lru_cache def __UpperCamelCase ( _A ): if num < 0: raise ValueError('''Number should not be negative.''' ) return 1 if num in (0, 1) else num * factorial(num - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
278
0