code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType lowercase = logging.get_logger(__name__) lowercase = { "microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json", "microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json", "microsoft/deberta-v2-xlarge-mnli": ( "https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json" ), "microsoft/deberta-v2-xxlarge-mnli": ( "https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json" ), } class UpperCamelCase_ ( snake_case_ ): '''simple docstring''' lowerCAmelCase = '''deberta-v2''' def __init__( self , a=12_81_00 , a=15_36 , a=24 , a=24 , a=61_44 , a="gelu" , a=0.1 , a=0.1 , a=5_12 , a=0 , a=0.02 , a=1E-7 , a=False , a=-1 , a=0 , a=True , a=None , a=0 , a="gelu" , **a , ) -> Optional[int]: super().__init__(**a ) snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = max_position_embeddings snake_case_ = type_vocab_size snake_case_ = initializer_range snake_case_ = relative_attention snake_case_ = max_relative_positions snake_case_ = pad_token_id snake_case_ = position_biased_input # Backwards compatibility if type(a ) == str: snake_case_ = [x.strip() for x in pos_att_type.lower().split('|' )] snake_case_ = pos_att_type snake_case_ = vocab_size snake_case_ = layer_norm_eps snake_case_ = kwargs.get('pooler_hidden_size' , a ) snake_case_ = pooler_dropout snake_case_ = pooler_hidden_act class UpperCamelCase_ ( snake_case_ ): '''simple docstring''' @property def _UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": snake_case_ = {0: 'batch', 1: 'choice', 2: 'sequence'} else: snake_case_ = {0: 'batch', 1: 'sequence'} if self._config.type_vocab_size > 0: return OrderedDict( [('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)] ) else: return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)] ) @property def _UpperCamelCase ( self ) -> int: return 12 def _UpperCamelCase ( self , a , a = -1 , a = -1 , a = -1 , a = False , a = None , a = 3 , a = 40 , a = 40 , a = None , ) -> Mapping[str, Any]: snake_case_ = super().generate_dummy_inputs(preprocessor=a , framework=a ) if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs: del dummy_inputs["token_type_ids"] return dummy_inputs
178
import unittest from transformers import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device if is_torch_available(): import torch from transformers import AutoModelForImageClassification if is_vision_available(): from transformers import AutoImageProcessor @require_torch @require_vision class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _UpperCamelCase ( self ) -> int: snake_case_ = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' ) snake_case_ = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' ) model.to(a ) from datasets import load_dataset snake_case_ = load_dataset('nielsr/rvlcdip-demo' ) snake_case_ = dataset['train'][0]['image'].convert('RGB' ) snake_case_ = image_processor(a , return_tensors='pt' ).to(a ) # forward pass with torch.no_grad(): snake_case_ = model(**a ) snake_case_ = outputs.logits snake_case_ = torch.Size((1, 16) ) self.assertEqual(logits.shape , a ) snake_case_ = torch.tensor( [-0.4_158, -0.4_092, -0.4_347] , device=a , dtype=torch.float , ) self.assertTrue(torch.allclose(logits[0, :3] , a , atol=1E-4 ) )
178
1
import importlib.metadata import operator import re import sys from typing import Optional from packaging import version _UpperCAmelCase : List[Any] = { """<""": operator.lt, """<=""": operator.le, """==""": operator.eq, """!=""": operator.ne, """>=""": operator.ge, """>""": operator.gt, } def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' if got_ver is None or want_ver is None: raise ValueError( F'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider''' F''' reinstalling {pkg}.''' ) if not ops[op](version.parse(UpperCamelCase__ ) , version.parse(UpperCamelCase__ ) ): raise ImportError( F'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''' ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ = None ): '''simple docstring''' snake_case_ = F'''\n{hint}''' if hint is not None else '' # non-versioned check if re.match(r'^[\w_\-\d]+$' , UpperCamelCase__ ): snake_case_ , snake_case_ , snake_case_ = requirement, None, None else: snake_case_ = re.findall(r'^([^!=<>\s]+)([\s!=<>]{1,2}.+)' , UpperCamelCase__ ) if not match: raise ValueError( 'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but' F''' got {requirement}''' ) snake_case_ , snake_case_ = match[0] snake_case_ = want_full.split(',' ) # there could be multiple requirements snake_case_ = {} for w in want_range: snake_case_ = re.findall(r'^([\s!=<>]{1,2})(.+)' , UpperCamelCase__ ) if not match: raise ValueError( 'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,' F''' but got {requirement}''' ) snake_case_ , snake_case_ = match[0] snake_case_ = want_ver if op not in ops: raise ValueError(F'''{requirement}: need one of {list(ops.keys() )}, but got {op}''' ) # special case if pkg == "python": snake_case_ = '.'.join([str(UpperCamelCase__ ) for x in sys.version_info[:3]] ) for op, want_ver in wanted.items(): _compare_versions(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return # check if any version is installed try: snake_case_ = importlib.metadata.version(UpperCamelCase__ ) except importlib.metadata.PackageNotFoundError: raise importlib.metadata.PackageNotFoundError( F'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''' ) # check that the right version is installed if version number or a range was provided if want_ver is not None: for op, want_ver in wanted.items(): _compare_versions(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ = 'Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main' return require_version(UpperCamelCase__ , UpperCamelCase__ )
200
from math import sqrt def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ = 0 for i in range(1 , int(sqrt(UpperCamelCase__ ) + 1 ) ): if n % i == 0 and i != sqrt(UpperCamelCase__ ): total += i + n // i elif i == sqrt(UpperCamelCase__ ): total += i return total - n def __lowerCamelCase ( UpperCamelCase__ = 10000 ): '''simple docstring''' snake_case_ = sum( i for i in range(1 , UpperCamelCase__ ) if sum_of_divisors(sum_of_divisors(UpperCamelCase__ ) ) == i and sum_of_divisors(UpperCamelCase__ ) != i ) return total if __name__ == "__main__": print(solution(int(str(input()).strip())))
200
1
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available, is_torch_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow if is_tf_available(): from transformers import ( AutoConfig, BertConfig, GPTaConfig, TaConfig, TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST if is_torch_available(): from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelWithLMHead, BertForMaskedLM, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertModel, GPTaLMHeadModel, RobertaForMaskedLM, TaForConditionalGeneration, ) @is_pt_tf_cross_test class A__ ( unittest.TestCase): @slow def __lowerCamelCase ( self ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: __lowerCAmelCase : Tuple = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[str] = TFAutoModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[int] = AutoModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @slow def __lowerCamelCase ( self ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: __lowerCAmelCase : int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Tuple = TFAutoModelForPreTraining.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : str = AutoModelForPreTraining.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @slow def __lowerCamelCase ( self ): for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[str] = TFAutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase , __lowerCAmelCase : List[str] = TFAutoModelForCausalLM.from_pretrained( _SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Any = AutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase , __lowerCAmelCase : List[Any] = AutoModelForCausalLM.from_pretrained( _SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @slow def __lowerCamelCase ( self ): for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : int = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[int] = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @slow def __lowerCamelCase ( self ): for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCAmelCase : Optional[Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[Any] = TFAutoModelForMaskedLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase , __lowerCAmelCase : int = TFAutoModelForMaskedLM.from_pretrained( _SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Union[str, Any] = AutoModelForMaskedLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase , __lowerCAmelCase : str = AutoModelForMaskedLM.from_pretrained( _SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @slow def __lowerCamelCase ( self ): for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase , __lowerCAmelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained( _SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : int = AutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase , __lowerCAmelCase : Dict = AutoModelForSeqaSeqLM.from_pretrained( _SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @slow def __lowerCamelCase ( self ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: __lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[str] = TFAutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @slow def __lowerCamelCase ( self ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: __lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[Any] = TFAutoModelForQuestionAnswering.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Union[str, Any] = AutoModelForQuestionAnswering.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): __lowerCAmelCase : List[Any] = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 ) __lowerCAmelCase : Tuple = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 ) def __lowerCamelCase ( self ): __lowerCAmelCase : int = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 ) __lowerCAmelCase : Tuple = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 )
86
import argparse from argparse import Namespace import torch from torch import nn from transformers import XGLMConfig, XGLMForCausalLM def lowerCamelCase_ ( _a : List[Any] ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = [ """decoder.version""", """decoder.output_projection.weight""", """_float_tensor""", """decoder.embed_positions._float_tensor""", ] for k in ignore_keys: state_dict.pop(_a , _a ) def lowerCamelCase_ ( _a : Any ): '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = emb.weight.shape UpperCAmelCase_ : Tuple = nn.Linear(_a , _a , bias=_a ) UpperCAmelCase_ : List[Any] = emb.weight.data return lin_layer def lowerCamelCase_ ( _a : Dict ): '''simple docstring''' UpperCAmelCase_ : int = torch.load(_a , map_location="""cpu""" ) UpperCAmelCase_ : Dict = Namespace(**checkpoint["""cfg"""]["""model"""] ) UpperCAmelCase_ : Optional[int] = checkpoint["""model"""] remove_ignore_keys_(_a ) UpperCAmelCase_ : str = state_dict["""decoder.embed_tokens.weight"""].shape[0] UpperCAmelCase_ : List[str] = {key.replace("""decoder""" , """model""" ): val for key, val in state_dict.items()} UpperCAmelCase_ : int = XGLMConfig( vocab_size=_a , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""gelu""" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , ) UpperCAmelCase_ : List[str] = XGLMForCausalLM(_a ) UpperCAmelCase_ : Tuple = model.load_state_dict(_a , strict=_a ) print(_a ) UpperCAmelCase_ : Optional[Any] = make_linear_from_emb(model.model.embed_tokens ) return model if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') UpperCamelCase_ = parser.parse_args() UpperCamelCase_ = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path) model.save_pretrained(args.pytorch_dump_folder_path)
345
0
import sys import webbrowser import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": print("""Googling.....""") UpperCamelCase__ : List[Any] = """https://www.google.com/search?q=""" + """ """.join(sys.argv[1:]) UpperCamelCase__ : List[str] = requests.get(url, headers={"""UserAgent""": UserAgent().random}) # res.raise_for_status() with open("""project1a.html""", """wb""") as out_file: # only for knowing the class for data in res.iter_content(10_000): out_file.write(data) UpperCamelCase__ : Optional[int] = BeautifulSoup(res.text, """html.parser""") UpperCamelCase__ : List[Any] = list(soup.select(""".eZt8xd"""))[:5] print(len(links)) for link in links: if link.text == "Maps": webbrowser.open(link.get("""href""")) else: webbrowser.open(F"https://google.com{link.get('href')}")
351
import unittest from transformers import AutoTokenizer, is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow if is_flax_available(): import jax.numpy as jnp from transformers import FlaxXLMRobertaModel @require_sentencepiece @require_tokenizers @require_flax class lowerCamelCase_ ( unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE_ ( self : Tuple ): '''simple docstring''' a = FlaxXLMRobertaModel.from_pretrained('''xlm-roberta-base''' ) a = AutoTokenizer.from_pretrained('''xlm-roberta-base''' ) a = '''The dog is cute and lives in the garden house''' a = jnp.array([tokenizer.encode(__lowerCamelCase )] ) a = (1, 12, 7_68) # batch_size, sequence_length, embedding_vector_dim a = jnp.array( [[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] ) a = model(__lowerCamelCase )['''last_hidden_state'''] self.assertEqual(output.shape ,__lowerCamelCase ) # compare the actual values for a slice of last dim self.assertTrue(jnp.allclose(output[:, :, -1] ,__lowerCamelCase ,atol=1e-3 ) )
330
0
'''simple docstring''' import requests snake_case_ : int = '' # <-- Put your OpenWeatherMap appid here! snake_case_ : Dict = 'https://api.openweathermap.org/data/2.5/' def A__ ( UpperCAmelCase_ = "Chicago" , UpperCAmelCase_ = APPID ): return requests.get(URL_BASE + 'weather' , params=locals() ).json() def A__ ( UpperCAmelCase_ = "Kolkata, India" , UpperCAmelCase_ = APPID ): return requests.get(URL_BASE + 'forecast' , params=locals() ).json() def A__ ( UpperCAmelCase_ = 55.68 , UpperCAmelCase_ = 12.57 , UpperCAmelCase_ = APPID ): return requests.get(URL_BASE + 'onecall' , params=locals() ).json() if __name__ == "__main__": from pprint import pprint while True: snake_case_ : Union[str, Any] = input('Enter a location:').strip() if location: pprint(current_weather(location)) else: break
83
'''simple docstring''' import os import tempfile import unittest from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter from transformers.testing_utils import slow from transformers.utils import cached_property @unittest.skipUnless(os.path.exists(lowercase ) , """Tatoeba directory does not exist.""" ) class lowercase__ ( unittest.TestCase ): @cached_property def UpperCamelCase_ ( self : Optional[int] ): '''simple docstring''' _UpperCamelCase : str = tempfile.mkdtemp() return TatoebaConverter(save_dir=lowerCamelCase__ ) @slow def UpperCamelCase_ ( self : Any ): '''simple docstring''' self.resolver.convert_models(['heb-eng'] ) @slow def UpperCamelCase_ ( self : Dict ): '''simple docstring''' _UpperCamelCase , _UpperCamelCase : Dict = self.resolver.write_model_card('opus-mt-he-en' ,dry_run=lowerCamelCase__ ) assert mmeta["long_pair"] == "heb-eng"
83
1
class UpperCAmelCase_ : """simple docstring""" def __init__( self ) -> None: _a : dict[str, TrieNode] = {} # Mapping from char to TrieNode _a : Optional[int] = False def __lowercase ( self , _a ) -> None: for word in words: self.insert(_a ) def __lowercase ( self , _a ) -> None: _a : Tuple = self for char in word: if char not in curr.nodes: _a : Union[str, Any] = TrieNode() _a : str = curr.nodes[char] _a : Any = True def __lowercase ( self , _a ) -> bool: _a : Tuple = self for char in word: if char not in curr.nodes: return False _a : Dict = curr.nodes[char] return curr.is_leaf def __lowercase ( self , _a ) -> None: def _delete(_a , _a , _a ) -> bool: if index == len(_a ): # If word does not exist if not curr.is_leaf: return False _a : List[str] = False return len(curr.nodes ) == 0 _a : Union[str, Any] = word[index] _a : Dict = curr.nodes.get(_a ) # If char not in current trie node if not char_node: return False # Flag to check if node can be deleted _a : Tuple = _delete(_a , _a , index + 1 ) if delete_curr: del curr.nodes[char] return len(curr.nodes ) == 0 return delete_curr _delete(self , _a , 0 ) def __UpperCAmelCase ( __a : TrieNode ,__a : str ) -> None: """simple docstring""" if node.is_leaf: print(__a ,end=''' ''' ) for key, value in node.nodes.items(): print_words(__a ,word + key ) def __UpperCAmelCase ( ) -> bool: """simple docstring""" _a : int = '''banana bananas bandana band apple all beast'''.split() _a : Union[str, Any] = TrieNode() root.insert_many(__a ) # print_words(root, "") assert all(root.find(__a ) for word in words ) assert root.find('''banana''' ) assert not root.find('''bandanas''' ) assert not root.find('''apps''' ) assert root.find('''apple''' ) assert root.find('''all''' ) root.delete('''all''' ) assert not root.find('''all''' ) root.delete('''banana''' ) assert not root.find('''banana''' ) assert root.find('''bananas''' ) return True def __UpperCAmelCase ( __a : str ,__a : bool ) -> None: """simple docstring""" print(str(__a ) ,'''works!''' if passes else '''doesn\'t work :(''' ) def __UpperCAmelCase ( ) -> None: """simple docstring""" assert test_trie() def __UpperCAmelCase ( ) -> None: """simple docstring""" print_results('''Testing trie functionality''' ,test_trie() ) if __name__ == "__main__": main()
365
import re import tempfile from pathlib import Path import pytest import yaml from datasets.utils.readme import ReadMe # @pytest.fixture # def example_yaml_structure(): a__ = yaml.safe_load( '''\ name: "" allow_empty: false allow_empty_text: true subsections: - name: "Dataset Card for X" # First-level markdown heading allow_empty: false allow_empty_text: true subsections: - name: "Table of Contents" allow_empty: false allow_empty_text: false subsections: null - name: "Dataset Description" allow_empty: false allow_empty_text: false subsections: - name: "Dataset Summary" allow_empty: false allow_empty_text: false subsections: null - name: "Supported Tasks and Leaderboards" allow_empty: true allow_empty_text: true subsections: null - name: Languages allow_empty: false allow_empty_text: true subsections: null ''' ) a__ = { '''name''': '''root''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ { '''name''': '''Dataset Card for My Dataset''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ {'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []}, { '''name''': '''Dataset Description''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Dataset Summary''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [], }, { '''name''': '''Supported Tasks and Leaderboards''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], }, {'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []}, ], }, ], } ], } a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. #### Extra Ignored Subsection ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = { '''name''': '''root''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ { '''name''': '''Dataset Card for My Dataset''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ {'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []}, { '''name''': '''Dataset Description''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Dataset Summary''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Extra Ignored Subsection''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], } ], }, { '''name''': '''Supported Tasks and Leaderboards''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], }, {'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []}, ], }, ], } ], } a__ = '''\ --- --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = ( '''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.''' ) a__ = '''\ # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = ( '''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.''' ) a__ = '''\ --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.''' a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).''' a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.''' a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Languages Language Text ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.''' a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.''' a__ = '''\ --- language: - zh - en --- ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.''' a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text # Dataset Card My Dataset ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.''' a__ = '''\ --- language: - zh - en --- # Dataset Card My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.''' a__ = '''''' a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.''' a__ = '''\ --- language: - zh - en --- # Dataset Card for My Dataset # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' a__ = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.''' @pytest.mark.parametrize( '''readme_md, expected_dict''' ,[ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] ,) def __UpperCAmelCase ( __a : Union[str, Any] ,__a : List[str] ) -> Optional[int]: """simple docstring""" assert ReadMe.from_string(__a ,__a ).to_dict() == expected_dict @pytest.mark.parametrize( '''readme_md, expected_error''' ,[ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] ,) def __UpperCAmelCase ( __a : List[str] ,__a : Optional[Any] ) -> Union[str, Any]: """simple docstring""" with pytest.raises(__a ,match=re.escape(expected_error.format(path='''root''' ) ) ): _a : List[Any] = ReadMe.from_string(__a ,__a ) readme.validate() @pytest.mark.parametrize( '''readme_md, expected_error''' ,[ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] ,) def __UpperCAmelCase ( __a : Dict ,__a : Dict ) -> Tuple: """simple docstring""" with pytest.raises(__a ,match=re.escape(expected_error.format(path='''root''' ) ) ): ReadMe.from_string(__a ,__a ) @pytest.mark.parametrize( '''readme_md,''' ,[ (README_MULTIPLE_SAME_HEADING_1), ] ,) def __UpperCAmelCase ( __a : Optional[Any] ) -> Tuple: """simple docstring""" ReadMe.from_string(__a ,__a ,suppress_parsing_errors=__a ) @pytest.mark.parametrize( '''readme_md, expected_dict''' ,[ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] ,) def __UpperCAmelCase ( __a : Union[str, Any] ,__a : Any ) -> Optional[int]: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: _a : Tuple = Path(__a ) / '''README.md''' with open(__a ,'''w+''' ) as readme_file: readme_file.write(__a ) _a : Optional[Any] = ReadMe.from_readme(__a ,__a ).to_dict() assert out["name"] == path assert out["text"] == "" assert out["is_empty_text"] assert out["subsections"] == expected_dict["subsections"] @pytest.mark.parametrize( '''readme_md, expected_error''' ,[ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] ,) def __UpperCAmelCase ( __a : List[Any] ,__a : List[Any] ) -> int: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: _a : int = Path(__a ) / '''README.md''' with open(__a ,'''w+''' ) as readme_file: readme_file.write(__a ) _a : Optional[int] = expected_error.format(path=__a ) with pytest.raises(__a ,match=re.escape(__a ) ): _a : Any = ReadMe.from_readme(__a ,__a ) readme.validate() @pytest.mark.parametrize( '''readme_md, expected_error''' ,[ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] ,) def __UpperCAmelCase ( __a : str ,__a : Union[str, Any] ) -> Dict: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: _a : Optional[Any] = Path(__a ) / '''README.md''' with open(__a ,'''w+''' ) as readme_file: readme_file.write(__a ) _a : str = expected_error.format(path=__a ) with pytest.raises(__a ,match=re.escape(__a ) ): ReadMe.from_readme(__a ,__a ) @pytest.mark.parametrize( '''readme_md,''' ,[ (README_MULTIPLE_SAME_HEADING_1), ] ,) def __UpperCAmelCase ( __a : Optional[Any] ) -> str: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: _a : int = Path(__a ) / '''README.md''' with open(__a ,'''w+''' ) as readme_file: readme_file.write(__a ) ReadMe.from_readme(__a ,__a ,suppress_parsing_errors=__a )
15
0
"""simple docstring""" import copy import os import cva import numpy as np from matplotlib import pyplot as plt class A_ : """simple docstring""" def __init__( self :List[Any] ): """simple docstring""" lowerCamelCase__ : str ='' lowerCamelCase__ : Tuple ='' lowerCamelCase__ : List[Any] =[] lowerCamelCase__ : Dict =0 lowerCamelCase__ : Tuple =256 lowerCamelCase__ : str =0 lowerCamelCase__ : Optional[Any] =0 lowerCamelCase__ : Optional[Any] =0 lowerCamelCase__ : Dict =0 def UpperCAmelCase__ ( self :Optional[int] , lowerCamelCase_ :Dict ): """simple docstring""" lowerCamelCase__ : Optional[Any] =cva.imread(lowerCamelCase_ , 0 ) lowerCamelCase__ : List[Any] =copy.deepcopy(self.img ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] =plt.hist(self.img.ravel() , 256 , [0, 256] , label='x' ) lowerCamelCase__ : List[str] =np.sum(lowerCamelCase_ ) for i in range(len(lowerCamelCase_ ) ): lowerCamelCase__ : Tuple =x[i] / self.k self.sk += prk lowerCamelCase__ : Tuple =(self.L - 1) * self.sk if self.rem != 0: lowerCamelCase__ : Dict =int(last % last ) lowerCamelCase__ : str =int(last + 1 if self.rem >= 0.5 else last ) self.last_list.append(lowerCamelCase_ ) lowerCamelCase__ : Optional[int] =int(np.ma.count(self.img ) / self.img[1].size ) lowerCamelCase__ : List[str] =self.img[1].size for i in range(self.number_of_cols ): for j in range(self.number_of_rows ): lowerCamelCase__ : str =self.img[j][i] if num != self.last_list[num]: lowerCamelCase__ : Optional[int] =self.last_list[num] cva.imwrite('output_data/output.jpg' , self.img ) def UpperCAmelCase__ ( self :Optional[int] ): """simple docstring""" plt.hist(self.img.ravel() , 256 , [0, 256] ) def UpperCAmelCase__ ( self :List[str] ): """simple docstring""" cva.imshow('Output-Image' , self.img ) cva.imshow('Input-Image' , self.original_image ) cva.waitKey(5_000 ) cva.destroyAllWindows() if __name__ == "__main__": lowerCAmelCase = os.path.join(os.path.basename(__file__), """image_data/input.jpg""") lowerCAmelCase = ConstantStretch() stretcher.stretch(file_path) stretcher.plot_histogram() stretcher.show_image()
126
"""simple docstring""" from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging lowerCAmelCase = logging.get_logger(__name__) class A_ ( A__ ): """simple docstring""" SCREAMING_SNAKE_CASE_ = ["""audio_values""", """audio_mask"""] def __init__( self :List[str] , lowerCamelCase_ :List[str]=2_048 , lowerCamelCase_ :Dict=1 , lowerCamelCase_ :int=[16, 16] , lowerCamelCase_ :str=128 , lowerCamelCase_ :Union[str, Any]=44_100 , lowerCamelCase_ :Optional[Any]=86 , lowerCamelCase_ :Dict=2_048 , lowerCamelCase_ :Union[str, Any]=0.0 , **lowerCamelCase_ :Tuple , ): """simple docstring""" super().__init__( feature_size=lowerCamelCase_ , sampling_rate=lowerCamelCase_ , padding_value=lowerCamelCase_ , **lowerCamelCase_ , ) lowerCamelCase__ : List[str] =spectrogram_length lowerCamelCase__ : Dict =num_channels lowerCamelCase__ : List[Any] =patch_size lowerCamelCase__ : Union[str, Any] =feature_size // self.patch_size[1] lowerCamelCase__ : int =n_fft lowerCamelCase__ : List[str] =sampling_rate // hop_length_to_sampling_rate lowerCamelCase__ : str =sampling_rate lowerCamelCase__ : int =padding_value lowerCamelCase__ : Dict =mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCamelCase_ , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=lowerCamelCase_ , norm='slaney' , mel_scale='slaney' , ).T def UpperCAmelCase__ ( self :Dict , lowerCamelCase_ :np.array ): """simple docstring""" lowerCamelCase__ : List[Any] =spectrogram( lowerCamelCase_ , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=80.0 , ) lowerCamelCase__ : Any =log_spec[:, :-1] lowerCamelCase__ : Tuple =log_spec - 20.0 lowerCamelCase__ : List[str] =np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self :Optional[Any] , lowerCamelCase_ :Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCamelCase_ :Optional[Union[str, TensorType]] = None , lowerCamelCase_ :Optional[bool] = True , lowerCamelCase_ :Optional[int] = None , lowerCamelCase_ :bool = False , lowerCamelCase_ :bool = False , **lowerCamelCase_ :Tuple , ): """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( 'This feature extractor is set to support sampling rate' f""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled""" f""" with {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) lowerCamelCase__ : Dict =isinstance(lowerCamelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) lowerCamelCase__ : Union[str, Any] =is_batched_numpy or ( isinstance(lowerCamelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: lowerCamelCase__ : Optional[Any] =[np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(lowerCamelCase_ , np.ndarray ): lowerCamelCase__ : Optional[Any] =np.asarray(lowerCamelCase_ , dtype=np.floataa ) elif isinstance(lowerCamelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): lowerCamelCase__ : Union[str, Any] =raw_speech.astype(np.floataa ) # always return batch if not is_batched: lowerCamelCase__ : List[str] =[np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis lowerCamelCase__ : Any =[ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , lowerCamelCase_ ): lowerCamelCase__ : Dict =[np.asarray(lowerCamelCase_ , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask lowerCamelCase__ : Optional[Any] =max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: lowerCamelCase__ : Any =[ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] lowerCamelCase__ : Union[str, Any] =np.array(lowerCamelCase_ ).astype(np.floataa ) # convert into correct format for padding lowerCamelCase__ : Tuple =max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch lowerCamelCase__ : str =np.ones([len(lowerCamelCase_ ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) lowerCamelCase__ : Dict =padded_audio_features * self.padding_value for i in range(len(lowerCamelCase_ ) ): lowerCamelCase__ : Union[str, Any] =audio_features[i] lowerCamelCase__ : Union[str, Any] =feature # return as BatchFeature if return_attention_mask: lowerCamelCase__ : int ={'audio_values': padded_audio_features, 'audio_mask': audio_mask} else: lowerCamelCase__ : Tuple ={'audio_values': padded_audio_features} lowerCamelCase__ : Union[str, Any] =BatchFeature(data=lowerCamelCase_ , tensor_type=lowerCamelCase_ ) return encoded_inputs
126
1
import os import sys import unittest UpperCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) UpperCAmelCase = os.path.join("""tests""", """models""", """bert""", """test_modeling_bert.py""") UpperCAmelCase = os.path.join("""tests""", """models""", """blip""", """test_modeling_blip.py""") class UpperCAmelCase_ ( unittest.TestCase): def _UpperCamelCase ( self : List[Any] ) -> Union[str, Any]: _UpperCamelCase = get_test_to_tester_mapping(__UpperCamelCase ) _UpperCamelCase = get_test_to_tester_mapping(__UpperCamelCase ) _UpperCamelCase = {'''BertModelTest''': '''BertModelTester'''} _UpperCamelCase = { '''BlipModelTest''': '''BlipModelTester''', '''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''', '''BlipTextModelTest''': '''BlipTextModelTester''', '''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''', '''BlipVQAModelTest''': '''BlipVQAModelTester''', '''BlipVisionModelTest''': '''BlipVisionModelTester''', } self.assertEqual(get_test_info.to_json(__UpperCamelCase ) , __UpperCamelCase ) self.assertEqual(get_test_info.to_json(__UpperCamelCase ) , __UpperCamelCase ) def _UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]: _UpperCamelCase = get_model_to_test_mapping(__UpperCamelCase ) _UpperCamelCase = get_model_to_test_mapping(__UpperCamelCase ) _UpperCamelCase = { '''BertForMaskedLM''': ['''BertModelTest'''], '''BertForMultipleChoice''': ['''BertModelTest'''], '''BertForNextSentencePrediction''': ['''BertModelTest'''], '''BertForPreTraining''': ['''BertModelTest'''], '''BertForQuestionAnswering''': ['''BertModelTest'''], '''BertForSequenceClassification''': ['''BertModelTest'''], '''BertForTokenClassification''': ['''BertModelTest'''], '''BertLMHeadModel''': ['''BertModelTest'''], '''BertModel''': ['''BertModelTest'''], } _UpperCamelCase = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''], '''BlipModel''': ['''BlipModelTest'''], '''BlipTextModel''': ['''BlipTextModelTest'''], '''BlipVisionModel''': ['''BlipVisionModelTest'''], } self.assertEqual(get_test_info.to_json(__UpperCamelCase ) , __UpperCamelCase ) self.assertEqual(get_test_info.to_json(__UpperCamelCase ) , __UpperCamelCase ) def _UpperCamelCase ( self : Tuple ) -> int: _UpperCamelCase = get_model_to_tester_mapping(__UpperCamelCase ) _UpperCamelCase = get_model_to_tester_mapping(__UpperCamelCase ) _UpperCamelCase = { '''BertForMaskedLM''': ['''BertModelTester'''], '''BertForMultipleChoice''': ['''BertModelTester'''], '''BertForNextSentencePrediction''': ['''BertModelTester'''], '''BertForPreTraining''': ['''BertModelTester'''], '''BertForQuestionAnswering''': ['''BertModelTester'''], '''BertForSequenceClassification''': ['''BertModelTester'''], '''BertForTokenClassification''': ['''BertModelTester'''], '''BertLMHeadModel''': ['''BertModelTester'''], '''BertModel''': ['''BertModelTester'''], } _UpperCamelCase = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''], '''BlipModel''': ['''BlipModelTester'''], '''BlipTextModel''': ['''BlipTextModelTester'''], '''BlipVisionModel''': ['''BlipVisionModelTester'''], } self.assertEqual(get_test_info.to_json(__UpperCamelCase ) , __UpperCamelCase ) self.assertEqual(get_test_info.to_json(__UpperCamelCase ) , __UpperCamelCase )
353
"""simple docstring""" from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef import datasets UpperCAmelCase = """\ @inproceedings{wang2019glue, title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding}, author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.}, note={In the Proceedings of ICLR.}, year={2019} } """ UpperCAmelCase = """\ GLUE, the General Language Understanding Evaluation benchmark (https://gluebenchmark.com/) is a collection of resources for training, evaluating, and analyzing natural language understanding systems. """ UpperCAmelCase = """ Compute GLUE evaluation metric associated to each GLUE dataset. Args: predictions: list of predictions to score. Each translation should be tokenized into a list of tokens. references: list of lists of references for each translation. Each reference should be tokenized into a list of tokens. Returns: depending on the GLUE subset, one or several of: \"accuracy\": Accuracy \"f1\": F1 score \"pearson\": Pearson Correlation \"spearmanr\": Spearman Correlation \"matthews_correlation\": Matthew Correlation Examples: >>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"] >>> references = [0, 1] >>> predictions = [0, 1] >>> results = glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0} >>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp' >>> references = [0, 1] >>> predictions = [0, 1] >>> results = glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0, 'f1': 1.0} >>> glue_metric = datasets.load_metric('glue', 'stsb') >>> references = [0., 1., 2., 3., 4., 5.] >>> predictions = [0., 1., 2., 3., 4., 5.] >>> results = glue_metric.compute(predictions=predictions, references=references) >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)}) {'pearson': 1.0, 'spearmanr': 1.0} >>> glue_metric = datasets.load_metric('glue', 'cola') >>> references = [0, 1] >>> predictions = [0, 1] >>> results = glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'matthews_correlation': 1.0} """ def lowercase ( a__ : int , a__ : Tuple ) -> Optional[Any]: return float((preds == labels).mean() ) def lowercase ( a__ : Optional[Any] , a__ : int ) -> Optional[int]: _UpperCamelCase = simple_accuracy(a__ , a__ ) _UpperCamelCase = float(fa_score(y_true=a__ , y_pred=a__ ) ) return { "accuracy": acc, "f1": fa, } def lowercase ( a__ : Any , a__ : Union[str, Any] ) -> Any: _UpperCamelCase = float(pearsonr(a__ , a__ )[0] ) _UpperCamelCase = float(spearmanr(a__ , a__ )[0] ) return { "pearson": pearson_corr, "spearmanr": spearman_corr, } @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class UpperCAmelCase_ ( datasets.Metric): def _UpperCamelCase ( self : Optional[int] ) -> Optional[int]: if self.config_name not in [ "sst2", "mnli", "mnli_mismatched", "mnli_matched", "cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans", ]: raise KeyError( '''You should supply a configuration name selected in ''' '''["sst2", "mnli", "mnli_mismatched", "mnli_matched", ''' '''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ), '''references''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ), } ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , ) def _UpperCamelCase ( self : int , __UpperCamelCase : int , __UpperCamelCase : List[Any] ) -> Any: if self.config_name == "cola": return {"matthews_correlation": matthews_corrcoef(__UpperCamelCase , __UpperCamelCase )} elif self.config_name == "stsb": return pearson_and_spearman(__UpperCamelCase , __UpperCamelCase ) elif self.config_name in ["mrpc", "qqp"]: return acc_and_fa(__UpperCamelCase , __UpperCamelCase ) elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]: return {"accuracy": simple_accuracy(__UpperCamelCase , __UpperCamelCase )} else: raise KeyError( '''You should supply a configuration name selected in ''' '''["sst2", "mnli", "mnli_mismatched", "mnli_matched", ''' '''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
54
0
import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed SCREAMING_SNAKE_CASE_ = { '''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), '''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), '''bert''': (BertConfig, BertForMaskedLM, BertTokenizer), '''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def __lowercase ( _SCREAMING_SNAKE_CASE ) -> int: '''simple docstring''' assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts ) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config ) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights ) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict: '''simple docstring''' if args.student_type == "roberta": SCREAMING_SNAKE_CASE = False elif args.student_type == "gpt2": SCREAMING_SNAKE_CASE = False def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: '''simple docstring''' if args.student_type == "roberta": SCREAMING_SNAKE_CASE = False def __lowercase ( ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="""Training""" ) parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""" ) parser.add_argument( """--dump_path""" , type=A_ , required=A_ , help="""The output directory (log, checkpoints, parameters, etc.)""" ) parser.add_argument( """--data_file""" , type=A_ , required=A_ , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , ) parser.add_argument( """--student_type""" , type=A_ , choices=["""distilbert""", """roberta""", """gpt2"""] , required=A_ , help="""The student type (DistilBERT, RoBERTa).""" , ) parser.add_argument("""--student_config""" , type=A_ , required=A_ , help="""Path to the student configuration.""" ) parser.add_argument( """--student_pretrained_weights""" , default=A_ , type=A_ , help="""Load student initialization checkpoint.""" ) parser.add_argument( """--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=A_ , help="""Teacher type (BERT, RoBERTa).""" ) parser.add_argument("""--teacher_name""" , type=A_ , required=A_ , help="""The teacher model.""" ) parser.add_argument("""--temperature""" , default=2.0 , type=A_ , help="""Temperature for the softmax temperature.""" ) parser.add_argument( """--alpha_ce""" , default=0.5 , type=A_ , help="""Linear weight for the distillation loss. Must be >=0.""" ) parser.add_argument( """--alpha_mlm""" , default=0.0 , type=A_ , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , ) parser.add_argument("""--alpha_clm""" , default=0.5 , type=A_ , help="""Linear weight for the CLM loss. Must be >=0.""" ) parser.add_argument("""--alpha_mse""" , default=0.0 , type=A_ , help="""Linear weight of the MSE loss. Must be >=0.""" ) parser.add_argument( """--alpha_cos""" , default=0.0 , type=A_ , help="""Linear weight of the cosine embedding loss. Must be >=0.""" ) parser.add_argument( """--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" ) parser.add_argument( """--mlm_mask_prop""" , default=0.15 , type=A_ , help="""Proportion of tokens for which we need to make a prediction.""" , ) parser.add_argument("""--word_mask""" , default=0.8 , type=A_ , help="""Proportion of tokens to mask out.""" ) parser.add_argument("""--word_keep""" , default=0.1 , type=A_ , help="""Proportion of tokens to keep.""" ) parser.add_argument("""--word_rand""" , default=0.1 , type=A_ , help="""Proportion of tokens to randomly replace.""" ) parser.add_argument( """--mlm_smoothing""" , default=0.7 , type=A_ , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , ) parser.add_argument("""--token_counts""" , type=A_ , help="""The token counts in the data_file for MLM.""" ) parser.add_argument( """--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , ) parser.add_argument( """--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.""" , ) parser.add_argument( """--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.""" , ) parser.add_argument("""--n_epoch""" , type=A_ , default=3 , help="""Number of pass on the whole dataset.""" ) parser.add_argument("""--batch_size""" , type=A_ , default=5 , help="""Batch size (for each process).""" ) parser.add_argument( """--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , ) parser.add_argument( """--gradient_accumulation_steps""" , type=A_ , default=50 , help="""Gradient accumulation for larger training batches.""" , ) parser.add_argument("""--warmup_prop""" , default=0.05 , type=A_ , help="""Linear warmup proportion.""" ) parser.add_argument("""--weight_decay""" , default=0.0 , type=A_ , help="""Weight decay if we apply some.""" ) parser.add_argument("""--learning_rate""" , default=5E-4 , type=A_ , help="""The initial learning rate for Adam.""" ) parser.add_argument("""--adam_epsilon""" , default=1E-6 , type=A_ , help="""Epsilon for Adam optimizer.""" ) parser.add_argument("""--max_grad_norm""" , default=5.0 , type=A_ , help="""Max gradient norm.""" ) parser.add_argument("""--initializer_range""" , default=0.02 , type=A_ , help="""Random initialization range.""" ) parser.add_argument( """--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , ) parser.add_argument( """--fp16_opt_level""" , type=A_ , default="""O1""" , help=( """For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].""" """See details at https://nvidia.github.io/apex/amp.html""" ) , ) parser.add_argument("""--n_gpu""" , type=A_ , default=1 , help="""Number of GPUs in the node.""" ) parser.add_argument("""--local_rank""" , type=A_ , default=-1 , help="""Distributed training - Local rank""" ) parser.add_argument("""--seed""" , type=A_ , default=56 , help="""Random seed""" ) parser.add_argument("""--log_interval""" , type=A_ , default=5_00 , help="""Tensorboard logging interval.""" ) parser.add_argument("""--checkpoint_interval""" , type=A_ , default=40_00 , help="""Checkpoint interval.""" ) SCREAMING_SNAKE_CASE = parser.parse_args() sanity_checks(A_ ) # ARGS # init_gpu_params(A_ ) set_seed(A_ ) if args.is_master: if os.path.exists(args.dump_path ): if not args.force: raise ValueError( F"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite""" """ itUse `--force` if you want to overwrite it""" ) else: shutil.rmtree(args.dump_path ) if not os.path.exists(args.dump_path ): os.makedirs(args.dump_path ) logger.info(F"""Experiment will be dumped and logged in {args.dump_path}""" ) # SAVE PARAMS # logger.info(F"""Param: {args}""" ) with open(os.path.join(args.dump_path , """parameters.json""" ) , """w""" ) as f: json.dump(vars(A_ ) , A_ , indent=4 ) git_log(args.dump_path ) SCREAMING_SNAKE_CASE = MODEL_CLASSES[args.student_type] SCREAMING_SNAKE_CASE = MODEL_CLASSES[args.teacher_type] # TOKENIZER # SCREAMING_SNAKE_CASE = teacher_tokenizer_class.from_pretrained(args.teacher_name ) SCREAMING_SNAKE_CASE = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): SCREAMING_SNAKE_CASE = tokenizer.all_special_tokens.index(A_ ) SCREAMING_SNAKE_CASE = tokenizer.all_special_ids[idx] logger.info(F"""Special tokens {special_tok_ids}""" ) SCREAMING_SNAKE_CASE = special_tok_ids SCREAMING_SNAKE_CASE = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(F"""Loading data from {args.data_file}""" ) with open(args.data_file , """rb""" ) as fp: SCREAMING_SNAKE_CASE = pickle.load(A_ ) if args.mlm: logger.info(F"""Loading token counts from {args.token_counts} (already pre-computed)""" ) with open(args.token_counts , """rb""" ) as fp: SCREAMING_SNAKE_CASE = pickle.load(A_ ) SCREAMING_SNAKE_CASE = np.maximum(A_ , 1 ) ** -args.mlm_smoothing for idx in special_tok_ids.values(): SCREAMING_SNAKE_CASE = 0.0 # do not predict special tokens SCREAMING_SNAKE_CASE = torch.from_numpy(A_ ) else: SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = LmSeqsDataset(params=A_ , data=A_ ) logger.info("""Data loader created.""" ) # STUDENT # logger.info(F"""Loading student config from {args.student_config}""" ) SCREAMING_SNAKE_CASE = student_config_class.from_pretrained(args.student_config ) SCREAMING_SNAKE_CASE = True if args.student_pretrained_weights is not None: logger.info(F"""Loading pretrained weights from {args.student_pretrained_weights}""" ) SCREAMING_SNAKE_CASE = student_model_class.from_pretrained(args.student_pretrained_weights , config=A_ ) else: SCREAMING_SNAKE_CASE = student_model_class(A_ ) if args.n_gpu > 0: student.to(F"""cuda:{args.local_rank}""" ) logger.info("""Student loaded.""" ) # TEACHER # SCREAMING_SNAKE_CASE = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=A_ ) if args.n_gpu > 0: teacher.to(F"""cuda:{args.local_rank}""" ) logger.info(F"""Teacher loaded from {args.teacher_name}.""" ) # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(A_ , A_ ) if args.freeze_token_type_embds: freeze_token_type_embeddings(A_ , A_ ) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0 ) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() SCREAMING_SNAKE_CASE = Distiller( params=A_ , dataset=A_ , token_probs=A_ , student=A_ , teacher=A_ ) distiller.train() logger.info("""Let\'s go get some drinks.""" ) if __name__ == "__main__": main()
296
"""simple docstring""" from collections import defaultdict from math import ceil, sqrt def __SCREAMING_SNAKE_CASE ( A_ = 1_00_00_00 , A_ = 10 ): lowerCAmelCase__ : defaultdict = defaultdict(A_ ) for outer_width in range(3 , (t_limit // 4) + 2 ): if outer_width * outer_width > t_limit: lowerCAmelCase__ : int = max( ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 ) else: lowerCAmelCase__ : Tuple = 1 hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2 for hole_width in range(A_ , outer_width - 1 , 2 ): count[outer_width * outer_width - hole_width * hole_width] += 1 return sum(1 for n in count.values() if 1 <= n <= 10 ) if __name__ == "__main__": print(F'''{solution() = }''')
106
0
'''simple docstring''' import argparse from copy import deepcopy import numpy as np from datasets import ClassLabel, DatasetDict, load_dataset from evaluate import load from transformers import ( AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, Trainer, TrainerCallback, TrainingArguments, set_seed, ) def __lowerCamelCase ( ) -> List[Any]: _a : Optional[Any] = argparse.ArgumentParser() parser.add_argument('--model_ckpt' , type=lowerCAmelCase_ , default='microsoft/unixcoder-base-nine' ) parser.add_argument('--num_epochs' , type=lowerCAmelCase_ , default=5 ) parser.add_argument('--batch_size' , type=lowerCAmelCase_ , default=6 ) parser.add_argument('--gradient_accumulation_steps' , type=lowerCAmelCase_ , default=1 ) parser.add_argument('--freeze' , type=lowerCAmelCase_ , default=lowerCAmelCase_ ) parser.add_argument('--learning_rate' , type=lowerCAmelCase_ , default=5E-4 ) parser.add_argument('--seed' , type=lowerCAmelCase_ , default=0 ) parser.add_argument('--lr_scheduler_type' , type=lowerCAmelCase_ , default='cosine' ) parser.add_argument('--num_warmup_steps' , type=lowerCAmelCase_ , default=10 ) parser.add_argument('--weight_decay' , type=lowerCAmelCase_ , default=0.01 ) parser.add_argument('--output_dir' , type=lowerCAmelCase_ , default='./results' ) return parser.parse_args() __lowerCAmelCase = load('''accuracy''') def __lowerCamelCase ( lowerCAmelCase_ ) -> int: _a , _a : str = eval_pred _a : Dict = np.argmax(lowerCAmelCase_ , axis=1 ) return metric.compute(predictions=lowerCAmelCase_ , references=lowerCAmelCase_ ) class __magic_name__ ( _UpperCamelCase ): def __init__( self : Optional[Any] ,_UpperCAmelCase : Union[str, Any] ): super().__init__() _a : List[str] = trainer def __lowercase ( self : List[Any] ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Dict ,_UpperCAmelCase : str ,**_UpperCAmelCase : int ): if control.should_evaluate: _a : Dict = deepcopy(_UpperCAmelCase ) self._trainer.evaluate(eval_dataset=self._trainer.train_dataset ,metric_key_prefix='train' ) return control_copy def __lowerCamelCase ( ) -> Any: _a : Tuple = get_args() set_seed(args.seed ) _a : List[Any] = load_dataset('codeparrot/codecomplex' , split='train' ) _a : Tuple = dataset.train_test_split(test_size=0.2 ) _a : List[Any] = train_test['test'].train_test_split(test_size=0.5 ) _a : str = DatasetDict( { 'train': train_test['train'], 'test': test_validation['train'], 'valid': test_validation['test'], } ) print('Loading tokenizer and model' ) _a : Any = AutoTokenizer.from_pretrained(args.model_ckpt ) _a : Tuple = tokenizer.eos_token _a : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 ) _a : Any = model.config.eos_token_id if args.freeze: for param in model.roberta.parameters(): _a : Dict = False _a : List[Any] = ClassLabel(num_classes=7 , names=list(set(train_test_validation['train']['complexity'] ) ) ) def tokenize(lowerCAmelCase_ ): _a : Dict = tokenizer(example['src'] , truncation=lowerCAmelCase_ , max_length=1024 ) _a : Optional[Any] = labels.straint(example['complexity'] ) return { "input_ids": inputs["input_ids"], "attention_mask": inputs["attention_mask"], "label": label, } _a : str = train_test_validation.map( lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=train_test_validation['train'].column_names , ) _a : Tuple = DataCollatorWithPadding(tokenizer=lowerCAmelCase_ ) _a : Tuple = TrainingArguments( output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='epoch' , save_strategy='epoch' , logging_strategy='epoch' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model='accuracy' , run_name='complexity-java' , report_to='wandb' , ) _a : Optional[int] = Trainer( model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=tokenized_datasets['train'] , eval_dataset=tokenized_datasets['valid'] , tokenizer=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , compute_metrics=lowerCAmelCase_ , ) print('Training...' ) trainer.add_callback(CustomCallback(lowerCAmelCase_ ) ) trainer.train() if __name__ == "__main__": main()
107
'''simple docstring''' from math import sqrt def __lowerCamelCase ( lowerCAmelCase_ ) -> int: _a : Dict = 0 for i in range(1 , int(sqrt(lowerCAmelCase_ ) + 1 ) ): if n % i == 0 and i != sqrt(lowerCAmelCase_ ): total += i + n // i elif i == sqrt(lowerCAmelCase_ ): total += i return total - n def __lowerCamelCase ( lowerCAmelCase_ = 10000 ) -> int: _a : Union[str, Any] = sum( i for i in range(1 , lowerCAmelCase_ ) if sum_of_divisors(sum_of_divisors(lowerCAmelCase_ ) ) == i and sum_of_divisors(lowerCAmelCase_ ) != i ) return total if __name__ == "__main__": print(solution(int(str(input()).strip())))
107
1
'''simple docstring''' from collections import defaultdict class lowercase : """simple docstring""" def __init__( self , UpperCamelCase_ , UpperCamelCase_ ): '''simple docstring''' UpperCamelCase__ :List[Any] = total # total no of tasks (N) # DP table will have a dimension of (2^M)*N # initially all values are set to -1 UpperCamelCase__ :Union[str, Any] = [ [-1 for i in range(total + 1 )] for j in range(2 ** len(UpperCamelCase_ ) ) ] UpperCamelCase__ :str = defaultdict(UpperCamelCase_ ) # stores the list of persons for each task # final_mask is used to check if all persons are included by setting all bits # to 1 UpperCamelCase__ :Optional[int] = (1 << len(UpperCamelCase_ )) - 1 def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ): '''simple docstring''' if mask == self.final_mask: return 1 # if not everyone gets the task and no more tasks are available, return 0 if task_no > self.total_tasks: return 0 # if case already considered if self.dp[mask][task_no] != -1: return self.dp[mask][task_no] # Number of ways when we don't this task in the arrangement UpperCamelCase__ :str = self.count_ways_until(UpperCamelCase_ , task_no + 1 ) # now assign the tasks one by one to all possible persons and recursively # assign for the remaining tasks. if task_no in self.task: for p in self.task[task_no]: # if p is already given a task if mask & (1 << p): continue # assign this task to p and change the mask value. And recursively # assign tasks with the new mask value. total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 ) # save the value. UpperCamelCase__ :Optional[int] = total_ways_util return self.dp[mask][task_no] def lowerCAmelCase__ ( self , UpperCamelCase_ ): '''simple docstring''' for i in range(len(UpperCamelCase_ ) ): for j in task_performed[i]: self.task[j].append(UpperCamelCase_ ) # call the function to fill the DP table, final answer is stored in dp[0][1] return self.count_ways_until(0 , 1 ) if __name__ == "__main__": __snake_case = 5 # total no of tasks (the value of N) # the list of tasks that can be done by M persons. __snake_case = [[1, 3, 4], [1, 2, 5], [3, 4]] print( AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways( task_performed ) )
97
'''simple docstring''' from urllib.parse import quote import pytest from datasets.utils.hub import hf_hub_url @pytest.mark.parametrize("""repo_id""" , ["""canonical_dataset_name""", """org-name/dataset-name"""] ) @pytest.mark.parametrize("""path""" , ["""filename.csv""", """filename with blanks.csv"""] ) @pytest.mark.parametrize("""revision""" , [None, """v2"""] ) def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] ) -> Optional[int]: lowercase_ : Union[str, Any] = hf_hub_url(repo_id=UpperCAmelCase__ , path=UpperCAmelCase__ , revision=UpperCAmelCase__ ) assert url == F'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(UpperCAmelCase__ )}'''
239
0
'''simple docstring''' import random import unittest import numpy as np import transformers from transformers import is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax if is_flax_available(): import os import jax.numpy as jnp from jax import jit from transformers import AutoTokenizer, FlaxAutoModelForCausalLM from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model snake_case__ = """0.12""" # assumed parallelism: 8 if is_torch_available(): import torch def snake_case__ ( lowerCamelCase__ : Dict , lowerCamelCase__ : Dict , lowerCamelCase__ : str=None ) -> Union[str, Any]: if rng is None: A_ : List[Any] = random.Random() A_ : Any = 1 for dim in shape: total_dims *= dim A_ : Tuple = [] for _ in range(lowercase_ ): values.append(rng.randint(0 , vocab_size - 1 ) ) A_ : Optional[Any] = np.array(lowercase_ , dtype=jnp.intaa ).reshape(lowercase_ ) return output def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : Dict=None ) -> str: A_ : Dict = ids_tensor(lowercase_ , vocab_size=2 , rng=lowercase_ ) # make sure that at least one token is attended to for each batch A_ : str = 1 return attn_mask @require_flax class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = None _lowerCAmelCase = () def _a ( self : int ): """simple docstring""" A_ ,A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() # cut to half length & take max batch_size 3 A_ : Tuple = 2 A_ : str = inputs['''input_ids'''].shape[-1] // 2 A_ : str = inputs['''input_ids'''][:max_batch_size, :sequence_length] A_ : Optional[int] = jnp.ones_like(_lowerCamelCase ) A_ : Any = attention_mask[:max_batch_size, :sequence_length] # generate max 5 tokens A_ : List[str] = input_ids.shape[-1] + 5 if config.eos_token_id is not None and config.pad_token_id is None: # hack to allow generate for models such as GPT2 as is done in `generate()` A_ : Union[str, Any] = config.eos_token_id return config, input_ids, attention_mask, max_length @is_pt_flax_cross_test def _a ( self : Optional[Any] ): """simple docstring""" A_ ,A_ ,A_ ,A_ : Any = self._get_input_ids_and_config() A_ : Union[str, Any] = False A_ : Any = max_length A_ : Optional[Any] = 0 for model_class in self.all_generative_model_classes: A_ : List[Any] = model_class(_lowerCamelCase ) A_ : Optional[Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning A_ : str = getattr(_lowerCamelCase , _lowerCamelCase ) A_ : List[str] = pt_model_class(_lowerCamelCase ).eval() A_ : List[str] = load_flax_weights_in_pytorch_model(_lowerCamelCase , flax_model.params ) A_ : List[Any] = flax_model.generate(_lowerCamelCase ).sequences A_ : List[Any] = pt_model.generate(torch.tensor(_lowerCamelCase , dtype=torch.long ) ) if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]: A_ : Any = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]] self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() ) def _a ( self : str ): """simple docstring""" A_ ,A_ ,A_ ,A_ : str = self._get_input_ids_and_config() A_ : Any = False A_ : Tuple = max_length for model_class in self.all_generative_model_classes: A_ : Any = model_class(_lowerCamelCase ) A_ : Union[str, Any] = model.generate(_lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase ) A_ : Union[str, Any] = jit(model.generate ) A_ : Dict = jit_generate(_lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : Tuple ): """simple docstring""" A_ ,A_ ,A_ ,A_ : Tuple = self._get_input_ids_and_config() A_ : List[Any] = True A_ : List[Any] = max_length for model_class in self.all_generative_model_classes: A_ : Any = model_class(_lowerCamelCase ) A_ : Optional[int] = model.generate(_lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase ) A_ : Tuple = jit(model.generate ) A_ : List[str] = jit_generate(_lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : Union[str, Any] ): """simple docstring""" A_ ,A_ ,A_ ,A_ : List[str] = self._get_input_ids_and_config() A_ : str = False A_ : Any = max_length A_ : Any = 2 for model_class in self.all_generative_model_classes: A_ : Dict = model_class(_lowerCamelCase ) A_ : Optional[int] = model.generate(_lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase ) A_ : Optional[int] = jit(model.generate ) A_ : Dict = jit_generate(_lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : List[Any] ): """simple docstring""" A_ ,A_ ,A_ ,A_ : List[Any] = self._get_input_ids_and_config() A_ : Dict = False A_ : str = max_length A_ : str = 2 A_ : Optional[Any] = 2 for model_class in self.all_generative_model_classes: A_ : int = model_class(_lowerCamelCase ) A_ : Dict = model.generate(_lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences ) def _a ( self : Optional[Any] ): """simple docstring""" A_ ,A_ ,A_ ,A_ : Union[str, Any] = self._get_input_ids_and_config() A_ : List[str] = True A_ : List[str] = max_length A_ : str = 0.8 A_ : Dict = 10 A_ : Union[str, Any] = 0.3 A_ : int = 1 A_ : Any = 8 A_ : Dict = 9 for model_class in self.all_generative_model_classes: A_ : Optional[int] = model_class(_lowerCamelCase ) A_ : List[Any] = model.generate(_lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase ) A_ : Dict = jit(model.generate ) A_ : Optional[Any] = jit_generate(_lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : Optional[int] ): """simple docstring""" A_ ,A_ ,A_ ,A_ : Optional[Any] = self._get_input_ids_and_config() A_ : int = max_length A_ : str = 1 A_ : str = 8 A_ : Union[str, Any] = 9 for model_class in self.all_generative_model_classes: A_ : Optional[int] = model_class(_lowerCamelCase ) A_ : List[Any] = model.generate(_lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase ) A_ : str = jit(model.generate ) A_ : List[Any] = jit_generate(_lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : List[str] ): """simple docstring""" A_ ,A_ ,A_ ,A_ : List[str] = self._get_input_ids_and_config() A_ : Optional[int] = max_length A_ : Optional[int] = 2 A_ : str = 1 A_ : Any = 8 A_ : str = 9 for model_class in self.all_generative_model_classes: A_ : List[Any] = model_class(_lowerCamelCase ) A_ : Tuple = model.generate(_lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase ) A_ : List[Any] = jit(model.generate ) A_ : Optional[int] = jit_generate(_lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : List[str] ): """simple docstring""" A_ ,A_ ,A_ ,A_ : Dict = self._get_input_ids_and_config() # pad attention mask on the left A_ : List[str] = attention_mask.at[(0, 0)].set(0 ) A_ : List[str] = False A_ : int = max_length for model_class in self.all_generative_model_classes: A_ : Tuple = model_class(_lowerCamelCase ) A_ : Tuple = model.generate(_lowerCamelCase , attention_mask=_lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase ) A_ : Any = jit(model.generate ) A_ : Union[str, Any] = jit_generate(_lowerCamelCase , attention_mask=_lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : List[str] ): """simple docstring""" A_ ,A_ ,A_ ,A_ : Optional[int] = self._get_input_ids_and_config() # pad attention mask on the left A_ : List[str] = attention_mask.at[(0, 0)].set(0 ) A_ : List[Any] = True A_ : Any = max_length for model_class in self.all_generative_model_classes: A_ : Dict = model_class(_lowerCamelCase ) A_ : int = model.generate(_lowerCamelCase , attention_mask=_lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase ) A_ : Optional[Any] = jit(model.generate ) A_ : Tuple = jit_generate(_lowerCamelCase , attention_mask=_lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : str ): """simple docstring""" A_ ,A_ ,A_ ,A_ : List[str] = self._get_input_ids_and_config() # pad attention mask on the left A_ : Dict = attention_mask.at[(0, 0)].set(0 ) A_ : List[Any] = 2 A_ : str = max_length for model_class in self.all_generative_model_classes: A_ : Union[str, Any] = model_class(_lowerCamelCase ) A_ : Dict = model.generate(_lowerCamelCase , attention_mask=_lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase ) A_ : List[str] = jit(model.generate ) A_ : Optional[Any] = jit_generate(_lowerCamelCase , attention_mask=_lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) @require_flax class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : Union[str, Any] ): """simple docstring""" A_ : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''' ) A_ : Tuple = FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' ) A_ : Any = '''Hello world''' A_ : int = tokenizer(_lowerCamelCase , return_tensors='''np''' ).input_ids # typos are quickly detected (the correct argument is `do_sample`) with self.assertRaisesRegex(_lowerCamelCase , '''do_samples''' ): model.generate(_lowerCamelCase , do_samples=_lowerCamelCase ) # arbitrary arguments that will not be used anywhere are also not accepted with self.assertRaisesRegex(_lowerCamelCase , '''foo''' ): A_ : Optional[int] = {'''foo''': '''bar'''} model.generate(_lowerCamelCase , **_lowerCamelCase )
361
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = ( 'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.' 'It takes two arguments named `image` which should be the original image, and `label` which should be a text ' 'describing the elements what should be identified in the segmentation mask. The tool returns the mask.' ) _lowerCAmelCase = 'CIDAS/clipseg-rd64-refined' _lowerCAmelCase = 'image_segmenter' _lowerCAmelCase = CLIPSegForImageSegmentation _lowerCAmelCase = ['image', 'text'] _lowerCAmelCase = ['image'] def __init__( self : Optional[int] , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : Union[str, Any] ): """simple docstring""" requires_backends(self , ['''vision'''] ) super().__init__(*_lowerCamelCase , **_lowerCamelCase ) def _a ( self : List[str] , _lowerCamelCase : "Image" , _lowerCamelCase : str ): """simple docstring""" return self.pre_processor(text=[label] , images=[image] , padding=_lowerCamelCase , return_tensors='''pt''' ) def _a ( self : Union[str, Any] , _lowerCamelCase : Optional[int] ): """simple docstring""" with torch.no_grad(): A_ : Optional[int] = self.model(**_lowerCamelCase ).logits return logits def _a ( self : List[str] , _lowerCamelCase : Optional[int] ): """simple docstring""" A_ : int = outputs.cpu().detach().numpy() A_ : Tuple = 0 A_ : List[str] = 1 return Image.fromarray((array * 255).astype(np.uinta ) )
4
0
"""simple docstring""" from argparse import ArgumentParser, Namespace from ..utils import logging from . import BaseTransformersCLICommand def lowerCamelCase ( _UpperCamelCase : Optional[Any] ) -> Optional[int]: '''simple docstring''' return ConvertCommand( args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name ) UpperCAmelCase : Union[str, Any] = '\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n' class lowerCamelCase__ ( __A ): """simple docstring""" @staticmethod def lowerCamelCase__ ( UpperCamelCase : ArgumentParser ): '''simple docstring''' __UpperCAmelCase : Dict = parser.add_parser( """convert""" , help="""CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.""" , ) train_parser.add_argument("""--model_type""" , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help="""Model's type.""" ) train_parser.add_argument( """--tf_checkpoint""" , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help="""TensorFlow checkpoint path or folder.""" ) train_parser.add_argument( """--pytorch_dump_output""" , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help="""Path to the PyTorch saved model output.""" ) train_parser.add_argument("""--config""" , type=lowerCAmelCase_ , default="""""" , help="""Configuration file path or folder.""" ) train_parser.add_argument( """--finetuning_task_name""" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , help="""Optional fine-tuning task name if the TF model was a finetuned model.""" , ) train_parser.set_defaults(func=lowerCAmelCase_ ) def __init__( self : Optional[int] , UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : str , *UpperCamelCase : Optional[int] , ): '''simple docstring''' __UpperCAmelCase : List[Any] = logging.get_logger("""transformers-cli/converting""" ) self._logger.info(f'''Loading model {model_type}''' ) __UpperCAmelCase : Any = model_type __UpperCAmelCase : Dict = tf_checkpoint __UpperCAmelCase : Dict = pytorch_dump_output __UpperCAmelCase : str = config __UpperCAmelCase : Optional[int] = finetuning_task_name def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' if self._model_type == "albert": try: from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(lowerCAmelCase_ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "bert": try: from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(lowerCAmelCase_ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "funnel": try: from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(lowerCAmelCase_ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "t5": try: from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch except ImportError: raise ImportError(lowerCAmelCase_ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "gpt": from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import ( convert_openai_checkpoint_to_pytorch, ) convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "transfo_xl": try: from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import ( convert_transfo_xl_checkpoint_to_pytorch, ) except ImportError: raise ImportError(lowerCAmelCase_ ) if "ckpt" in self._tf_checkpoint.lower(): __UpperCAmelCase : Dict = self._tf_checkpoint __UpperCAmelCase : Optional[int] = "" else: __UpperCAmelCase : str = self._tf_checkpoint __UpperCAmelCase : str = "" convert_transfo_xl_checkpoint_to_pytorch( lowerCAmelCase_ , self._config , self._pytorch_dump_output , lowerCAmelCase_ ) elif self._model_type == "gpt2": try: from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import ( convert_gpta_checkpoint_to_pytorch, ) except ImportError: raise ImportError(lowerCAmelCase_ ) convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "xlnet": try: from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import ( convert_xlnet_checkpoint_to_pytorch, ) except ImportError: raise ImportError(lowerCAmelCase_ ) convert_xlnet_checkpoint_to_pytorch( self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name ) elif self._model_type == "xlm": from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import ( convert_xlm_checkpoint_to_pytorch, ) convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output ) elif self._model_type == "lxmert": from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import ( convert_lxmert_checkpoint_to_pytorch, ) convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output ) elif self._model_type == "rembert": from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import ( convert_rembert_tf_checkpoint_to_pytorch, ) convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) else: raise ValueError( """--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]""" )
115
"""simple docstring""" from __future__ import annotations class UpperCamelCase_ : def __init__( self : Any , lowerCAmelCase_ : int ) -> None: UpperCAmelCase_ : Any = data UpperCAmelCase_ : Node | None = None UpperCAmelCase_ : Node | None = None def snake_case ( A__ ): # In Order traversal of the tree if tree: display(tree.left ) print(tree.data ) display(tree.right ) def snake_case ( A__ ): return 1 + max(depth_of_tree(tree.left ) ,depth_of_tree(tree.right ) ) if tree else 0 def snake_case ( A__ ): if not tree: return True if tree.left and tree.right: return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right ) else: return not tree.left and not tree.right def snake_case ( ): # Main function for testing. UpperCAmelCase_ : List[str] = Node(1 ) UpperCAmelCase_ : Any = Node(2 ) UpperCAmelCase_ : Optional[Any] = Node(3 ) UpperCAmelCase_ : Union[str, Any] = Node(4 ) UpperCAmelCase_ : int = Node(5 ) UpperCAmelCase_ : Optional[int] = Node(6 ) UpperCAmelCase_ : Any = Node(7 ) UpperCAmelCase_ : List[str] = Node(8 ) UpperCAmelCase_ : List[Any] = Node(9 ) print(is_full_binary_tree(A__ ) ) print(depth_of_tree(A__ ) ) print("Tree is: " ) display(A__ ) if __name__ == "__main__": main()
268
0
"""simple docstring""" import argparse import shutil from pathlib import Path from tqdm import tqdm from transformers import AutoTokenizer def __lowercase ( _a , _a , _a , _a=1_024 ): snake_case_ : str = [], [] snake_case_ : Union[str, Any] = list(zip(_a , _a ) ) snake_case_ : Tuple = sorted_examples[0] def is_too_big(_a ): return tok(_a , return_tensors='''pt''' ).input_ids.shape[1] > max_tokens for src, tgt in tqdm(sorted_examples[1:] ): snake_case_ : Optional[int] = new_src + ''' ''' + src snake_case_ : List[str] = new_tgt + ''' ''' + tgt if is_too_big(_a ) or is_too_big(_a ): # cant fit, finalize example finished_src.append(_a ) finished_tgt.append(_a ) snake_case_ : Optional[Any] = src, tgt else: # can fit, keep adding snake_case_ : Tuple = cand_src, cand_tgt # cleanup if new_src: assert new_tgt finished_src.append(_a ) finished_tgt.append(_a ) return finished_src, finished_tgt def __lowercase ( _a , _a , _a , _a ): snake_case_ : Any = Path(_a ) save_path.mkdir(exist_ok=_a ) for split in ["train"]: snake_case_ : str = data_dir / f"{split}.source", data_dir / f"{split}.target" snake_case_ : int = [x.rstrip() for x in Path(_a ).open().readlines()] snake_case_ : Optional[Any] = [x.rstrip() for x in Path(_a ).open().readlines()] snake_case_ : List[str] = pack_examples(_a , _a , _a , _a ) print(f"packed {split} split from {len(_a )} examples -> {len(_a )}." ) Path(save_path / f"{split}.source" ).open('''w''' ).write('''\n'''.join(_a ) ) Path(save_path / f"{split}.target" ).open('''w''' ).write('''\n'''.join(_a ) ) for split in ["val", "test"]: snake_case_ : Optional[int] = data_dir / f"{split}.source", data_dir / f"{split}.target" shutil.copyfile(_a , save_path / f"{split}.source" ) shutil.copyfile(_a , save_path / f"{split}.target" ) def __lowercase ( ): snake_case_ : List[str] = argparse.ArgumentParser() parser.add_argument('''--tok_name''' , type=_a , help='''like facebook/bart-large-cnn,t5-base, etc.''' ) parser.add_argument('''--max_seq_len''' , type=_a , default=128 ) parser.add_argument('''--data_dir''' , type=_a ) parser.add_argument('''--save_path''' , type=_a ) snake_case_ : Tuple = parser.parse_args() snake_case_ : Union[str, Any] = AutoTokenizer.from_pretrained(args.tok_name ) return pack_data_dir(_a , Path(args.data_dir ) , args.max_seq_len , args.save_path ) if __name__ == "__main__": packer_cli()
356
"""simple docstring""" import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class _UpperCAmelCase ( unittest.TestCase): def _snake_case ( self : List[str] ): # clean up the VRAM after each test super().tearDown() gc.collect() def _snake_case ( self : List[str] ): snake_case_, snake_case_ : List[str] = FlaxStableDiffusionPipeline.from_pretrained( '''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , ) snake_case_ : Union[str, Any] = '''A painting of a squirrel eating a burger''' snake_case_ : Tuple = jax.device_count() snake_case_ : Dict = num_samples * [prompt] snake_case_ : Tuple = sd_pipe.prepare_inputs(lowercase_ ) snake_case_ : str = replicate(lowercase_ ) snake_case_ : Any = shard(lowercase_ ) snake_case_ : Optional[int] = jax.random.PRNGKey(0 ) snake_case_ : Union[str, Any] = jax.random.split(lowercase_ , jax.device_count() ) snake_case_ : Optional[Any] = sd_pipe(lowercase_ , lowercase_ , lowercase_ , num_inference_steps=25 , jit=lowercase_ )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) snake_case_ : str = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) snake_case_ : str = images[0, 253:256, 253:256, -1] snake_case_ : Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) ) snake_case_ : int = jnp.array([0.42_38, 0.44_14, 0.43_95, 0.44_53, 0.46_29, 0.45_90, 0.45_31, 0.4_55_08, 0.45_12] ) print(f"output_slice: {output_slice}" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 def _snake_case ( self : str ): snake_case_ : Optional[Any] = '''stabilityai/stable-diffusion-2''' snake_case_, snake_case_ : Union[str, Any] = FlaxDPMSolverMultistepScheduler.from_pretrained(lowercase_ , subfolder='''scheduler''' ) snake_case_, snake_case_ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained( lowercase_ , scheduler=lowercase_ , revision='''bf16''' , dtype=jnp.bfloataa , ) snake_case_ : List[Any] = scheduler_params snake_case_ : int = '''A painting of a squirrel eating a burger''' snake_case_ : str = jax.device_count() snake_case_ : Union[str, Any] = num_samples * [prompt] snake_case_ : int = sd_pipe.prepare_inputs(lowercase_ ) snake_case_ : List[str] = replicate(lowercase_ ) snake_case_ : List[Any] = shard(lowercase_ ) snake_case_ : int = jax.random.PRNGKey(0 ) snake_case_ : Tuple = jax.random.split(lowercase_ , jax.device_count() ) snake_case_ : int = sd_pipe(lowercase_ , lowercase_ , lowercase_ , num_inference_steps=25 , jit=lowercase_ )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) snake_case_ : Optional[int] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) snake_case_ : List[str] = images[0, 253:256, 253:256, -1] snake_case_ : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) snake_case_ : Optional[int] = jnp.array([0.43_36, 0.4_29_69, 0.44_53, 0.41_99, 0.42_97, 0.45_31, 0.44_34, 0.44_34, 0.42_97] ) print(f"output_slice: {output_slice}" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
155
0
import argparse import requests import torch from PIL import Image from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel def lowerCAmelCase__ ( lowerCamelCase_ : Any): '''simple docstring''' if "img_encoder.pos_embed" in name: lowerCAmelCase__ : Dict = name.replace('''img_encoder.pos_embed''' ,'''vision_model.embeddings.position_embeddings''') if "img_encoder.patch_embed.proj" in name: lowerCAmelCase__ : int = name.replace('''img_encoder.patch_embed.proj''' ,'''vision_model.embeddings.patch_embeddings.projection''') if "img_encoder.patch_embed.norm" in name: lowerCAmelCase__ : Optional[int] = name.replace('''img_encoder.patch_embed.norm''' ,'''vision_model.embeddings.layernorm''') if "img_encoder.layers" in name: lowerCAmelCase__ : Tuple = name.replace('''img_encoder.layers''' ,'''vision_model.encoder.stages''') if "blocks" in name and "res" not in name: lowerCAmelCase__ : Dict = name.replace('''blocks''' ,'''layers''') if "attn" in name and "pre_assign" not in name: lowerCAmelCase__ : Optional[int] = name.replace('''attn''' ,'''self_attn''') if "proj" in name and "self_attn" in name and "text" not in name: lowerCAmelCase__ : Union[str, Any] = name.replace('''proj''' ,'''out_proj''') if "pre_assign_attn.attn.proj" in name: lowerCAmelCase__ : List[Any] = name.replace('''pre_assign_attn.attn.proj''' ,'''pre_assign_attn.attn.out_proj''') if "norm1" in name: lowerCAmelCase__ : Union[str, Any] = name.replace('''norm1''' ,'''layer_norm1''') if "norm2" in name and "pre_assign" not in name: lowerCAmelCase__ : int = name.replace('''norm2''' ,'''layer_norm2''') if "img_encoder.norm" in name: lowerCAmelCase__ : List[Any] = name.replace('''img_encoder.norm''' ,'''vision_model.layernorm''') # text encoder if "text_encoder.token_embedding" in name: lowerCAmelCase__ : List[Any] = name.replace('''text_encoder.token_embedding''' ,'''text_model.embeddings.token_embedding''') if "text_encoder.positional_embedding" in name: lowerCAmelCase__ : Tuple = name.replace('''text_encoder.positional_embedding''' ,'''text_model.embeddings.position_embedding.weight''') if "text_encoder.transformer.resblocks." in name: lowerCAmelCase__ : Union[str, Any] = name.replace('''text_encoder.transformer.resblocks.''' ,'''text_model.encoder.layers.''') if "ln_1" in name: lowerCAmelCase__ : Union[str, Any] = name.replace('''ln_1''' ,'''layer_norm1''') if "ln_2" in name: lowerCAmelCase__ : Union[str, Any] = name.replace('''ln_2''' ,'''layer_norm2''') if "c_fc" in name: lowerCAmelCase__ : Optional[Any] = name.replace('''c_fc''' ,'''fc1''') if "c_proj" in name: lowerCAmelCase__ : List[str] = name.replace('''c_proj''' ,'''fc2''') if "text_encoder" in name: lowerCAmelCase__ : str = name.replace('''text_encoder''' ,'''text_model''') if "ln_final" in name: lowerCAmelCase__ : Union[str, Any] = name.replace('''ln_final''' ,'''final_layer_norm''') # projection layers if "img_projector.linear_hidden." in name: lowerCAmelCase__ : Tuple = name.replace('''img_projector.linear_hidden.''' ,'''visual_projection.''') if "img_projector.linear_out." in name: lowerCAmelCase__ : Optional[Any] = name.replace('''img_projector.linear_out.''' ,'''visual_projection.3.''') if "text_projector.linear_hidden" in name: lowerCAmelCase__ : Tuple = name.replace('''text_projector.linear_hidden''' ,'''text_projection''') if "text_projector.linear_out" in name: lowerCAmelCase__ : Dict = name.replace('''text_projector.linear_out''' ,'''text_projection.3''') return name def lowerCAmelCase__ ( lowerCamelCase_ : Optional[Any] ,lowerCamelCase_ : List[str]): '''simple docstring''' for key in orig_state_dict.copy().keys(): lowerCAmelCase__ : List[str] = orig_state_dict.pop(lowerCamelCase_) if "qkv" in key: # weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors lowerCAmelCase__ : Tuple = key.split('''.''') lowerCAmelCase__ , lowerCAmelCase__ : List[str] = int(key_split[2]), int(key_split[4]) lowerCAmelCase__ : Any = config.vision_config.hidden_size if "weight" in key: lowerCAmelCase__ : Tuple = val[:dim, :] lowerCAmelCase__ : Dict = val[dim : dim * 2, :] lowerCAmelCase__ : List[str] = val[-dim:, :] else: lowerCAmelCase__ : List[Any] = val[:dim] lowerCAmelCase__ : List[str] = val[dim : dim * 2] lowerCAmelCase__ : Tuple = val[-dim:] elif "in_proj" in key: # weights and biases of the key, value and query projections of text encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors lowerCAmelCase__ : Dict = key.split('''.''') lowerCAmelCase__ : List[str] = int(key_split[3]) lowerCAmelCase__ : Any = config.text_config.hidden_size if "weight" in key: lowerCAmelCase__ : Tuple = val[:dim, :] lowerCAmelCase__ : Union[str, Any] = val[ dim : dim * 2, : ] lowerCAmelCase__ : List[Any] = val[-dim:, :] else: lowerCAmelCase__ : Union[str, Any] = val[:dim] lowerCAmelCase__ : List[str] = val[dim : dim * 2] lowerCAmelCase__ : str = val[-dim:] else: lowerCAmelCase__ : int = rename_key(lowerCamelCase_) # squeeze if necessary if ( "text_projection.0" in new_name or "text_projection.3" in new_name or "visual_projection.0" in new_name or "visual_projection.3" in new_name ): lowerCAmelCase__ : Dict = val.squeeze_() else: lowerCAmelCase__ : Tuple = val return orig_state_dict def lowerCAmelCase__ ( ): '''simple docstring''' lowerCAmelCase__ : Dict = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowerCAmelCase__ : str = Image.open(requests.get(lowerCamelCase_ ,stream=lowerCamelCase_).raw) return im @torch.no_grad() def lowerCAmelCase__ ( lowerCamelCase_ : str ,lowerCamelCase_ : int ,lowerCamelCase_ : Tuple="groupvit-gcc-yfcc" ,lowerCamelCase_ : int=False): '''simple docstring''' lowerCAmelCase__ : Dict = GroupViTConfig() lowerCAmelCase__ : Dict = GroupViTModel(lowerCamelCase_).eval() lowerCAmelCase__ : Optional[int] = torch.load(lowerCamelCase_ ,map_location='''cpu''')['''model'''] lowerCAmelCase__ : List[Any] = convert_state_dict(lowerCamelCase_ ,lowerCamelCase_) lowerCAmelCase__ , lowerCAmelCase__ : Any = model.load_state_dict(lowerCamelCase_ ,strict=lowerCamelCase_) assert missing_keys == ["text_model.embeddings.position_ids"] assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowerCamelCase_) == 0) # verify result lowerCAmelCase__ : Optional[Any] = CLIPProcessor.from_pretrained('''openai/clip-vit-base-patch32''') lowerCAmelCase__ : Tuple = prepare_img() lowerCAmelCase__ : Dict = processor(text=['''a photo of a cat''', '''a photo of a dog'''] ,images=lowerCamelCase_ ,padding=lowerCamelCase_ ,return_tensors='''pt''') with torch.no_grad(): lowerCAmelCase__ : str = model(**lowerCamelCase_) if model_name == "groupvit-gcc-yfcc": lowerCAmelCase__ : Union[str, Any] = torch.tensor([[13.3523, 6.3629]]) elif model_name == "groupvit-gcc-redcaps": lowerCAmelCase__ : Tuple = torch.tensor([[16.1873, 8.6230]]) else: raise ValueError(f"""Model name {model_name} not supported.""") assert torch.allclose(outputs.logits_per_image ,lowerCamelCase_ ,atol=1E-3) processor.save_pretrained(lowerCamelCase_) model.save_pretrained(lowerCamelCase_) print('''Successfully saved processor and model to''' ,lowerCamelCase_) if push_to_hub: print('''Pushing to the hub...''') processor.push_to_hub(lowerCamelCase_ ,organization='''nielsr''') model.push_to_hub(lowerCamelCase_ ,organization='''nielsr''') if __name__ == "__main__": __snake_case : int =argparse.ArgumentParser() parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.' ) parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint') parser.add_argument( '--model_name', default='groupvit-gccy-fcc', type=str, help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'', ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.', ) __snake_case : Tuple =parser.parse_args() convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
129
from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class lowerCamelCase__ ( lowerCamelCase__): '''simple docstring''' snake_case_ =["""image_processor""", """tokenizer"""] snake_case_ ="""Pix2StructImageProcessor""" snake_case_ =("""T5Tokenizer""", """T5TokenizerFast""") def __init__(self ,__lowerCamelCase ,__lowerCamelCase ) -> List[str]: """simple docstring""" lowerCAmelCase__ : str = False super().__init__(__lowerCamelCase ,__lowerCamelCase ) def __call__(self ,__lowerCamelCase=None ,__lowerCamelCase = None ,__lowerCamelCase = True ,__lowerCamelCase = False ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = 20_48 ,__lowerCamelCase = 0 ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = False ,__lowerCamelCase = False ,__lowerCamelCase = False ,__lowerCamelCase = False ,__lowerCamelCase = False ,__lowerCamelCase = True ,__lowerCamelCase = None ,**__lowerCamelCase ,) -> BatchEncoding: """simple docstring""" if images is None and text is None: raise ValueError('''You have to specify either images or text.''' ) # Get only text if images is None and not self.image_processor.is_vqa: lowerCAmelCase__ : List[str] = self.tokenizer lowerCAmelCase__ : List[str] = self.tokenizer( text=__lowerCamelCase ,add_special_tokens=__lowerCamelCase ,padding=__lowerCamelCase ,truncation=__lowerCamelCase ,max_length=__lowerCamelCase ,stride=__lowerCamelCase ,pad_to_multiple_of=__lowerCamelCase ,return_attention_mask=__lowerCamelCase ,return_overflowing_tokens=__lowerCamelCase ,return_special_tokens_mask=__lowerCamelCase ,return_offsets_mapping=__lowerCamelCase ,return_token_type_ids=__lowerCamelCase ,return_length=__lowerCamelCase ,verbose=__lowerCamelCase ,return_tensors=__lowerCamelCase ,**__lowerCamelCase ,) return text_encoding if not self.image_processor.is_vqa: # add pixel_values lowerCAmelCase__ : int = self.image_processor( __lowerCamelCase ,return_tensors=__lowerCamelCase ,max_patches=__lowerCamelCase ,**__lowerCamelCase ) else: # add pixel_values and bbox lowerCAmelCase__ : List[str] = self.image_processor( __lowerCamelCase ,return_tensors=__lowerCamelCase ,max_patches=__lowerCamelCase ,header_text=__lowerCamelCase ,**__lowerCamelCase ) if text is not None and not self.image_processor.is_vqa: lowerCAmelCase__ : List[str] = self.tokenizer( text=__lowerCamelCase ,add_special_tokens=__lowerCamelCase ,padding=__lowerCamelCase ,truncation=__lowerCamelCase ,max_length=__lowerCamelCase ,stride=__lowerCamelCase ,pad_to_multiple_of=__lowerCamelCase ,return_attention_mask=__lowerCamelCase ,return_overflowing_tokens=__lowerCamelCase ,return_special_tokens_mask=__lowerCamelCase ,return_offsets_mapping=__lowerCamelCase ,return_token_type_ids=__lowerCamelCase ,return_length=__lowerCamelCase ,verbose=__lowerCamelCase ,return_tensors=__lowerCamelCase ,**__lowerCamelCase ,) if "attention_mask" in text_encoding: lowerCAmelCase__ : List[str] = text_encoding.pop('''attention_mask''' ) if "input_ids" in text_encoding: lowerCAmelCase__ : Dict = text_encoding.pop('''input_ids''' ) else: lowerCAmelCase__ : int = None if text_encoding is not None: encoding_image_processor.update(__lowerCamelCase ) return encoding_image_processor def lowerCAmelCase__ (self ,*__lowerCamelCase ,**__lowerCamelCase ) -> Optional[Any]: """simple docstring""" return self.tokenizer.batch_decode(*__lowerCamelCase ,**__lowerCamelCase ) def lowerCAmelCase__ (self ,*__lowerCamelCase ,**__lowerCamelCase ) -> str: """simple docstring""" return self.tokenizer.decode(*__lowerCamelCase ,**__lowerCamelCase ) @property def lowerCAmelCase__ (self ) -> Any: """simple docstring""" lowerCAmelCase__ : Dict = self.tokenizer.model_input_names lowerCAmelCase__ : Any = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
129
1
'''simple docstring''' import re import time from typing import Optional import IPython.display as disp from ..trainer_callback import TrainerCallback from ..trainer_utils import IntervalStrategy, has_length def _lowercase ( __A ): '''simple docstring''' __UpperCamelCase = int(__A ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = t // 3_600, (t // 60) % 60, t % 60 return f"{h}:{m:02d}:{s:02d}" if h != 0 else f"{m:02d}:{s:02d}" def _lowercase ( __A ,__A ,__A ,__A ,__A=300 ): '''simple docstring''' return f"\n <div>\n {prefix}\n <progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress>\n {label}\n </div>\n " def _lowercase ( __A ): '''simple docstring''' __UpperCamelCase = """<table border=\"1\" class=\"dataframe\">\n""" html_code += """ <thead>\n <tr style="text-align: left;">\n""" for i in items[0]: html_code += f" <th>{i}</th>\n" html_code += " </tr>\n </thead>\n <tbody>\n" for line in items[1:]: html_code += " <tr>\n" for elt in line: __UpperCamelCase = f"{elt:.6f}" if isinstance(__A ,__A ) else str(__A ) html_code += f" <td>{elt}</td>\n" html_code += " </tr>\n" html_code += " </tbody>\n</table><p>" return html_code class UpperCAmelCase__ : __SCREAMING_SNAKE_CASE = 5 __SCREAMING_SNAKE_CASE = 0.2 def __init__( self , lowercase , lowercase = None , lowercase = True , lowercase = None , lowercase = 3_0_0 , ) -> str: __UpperCamelCase = total __UpperCamelCase = """""" if prefix is None else prefix __UpperCamelCase = leave __UpperCamelCase = parent __UpperCamelCase = width __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = None def __lowerCamelCase ( self , lowercase , lowercase = False , lowercase = None ) -> int: __UpperCamelCase = value if comment is not None: __UpperCamelCase = comment if self.last_value is None: __UpperCamelCase = __UpperCamelCase = time.time() __UpperCamelCase = __UpperCamelCase = value __UpperCamelCase = __UpperCamelCase = None __UpperCamelCase = self.warmup __UpperCamelCase = 1 self.update_bar(lowercase ) elif value <= self.last_value and not force_update: return elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ): if self.first_calls > 0: self.first_calls -= 1 __UpperCamelCase = time.time() __UpperCamelCase = current_time - self.start_time # We could have value = self.start_value if the update is called twixe with the same start value. if value > self.start_value: __UpperCamelCase = self.elapsed_time / (value - self.start_value) else: __UpperCamelCase = None if value >= self.total: __UpperCamelCase = self.total __UpperCamelCase = None if not self.leave: self.close() elif self.average_time_per_item is not None: __UpperCamelCase = self.average_time_per_item * (self.total - value) self.update_bar(lowercase ) __UpperCamelCase = value __UpperCamelCase = current_time if self.average_time_per_item is None: __UpperCamelCase = 1 else: __UpperCamelCase = max(int(self.update_every / self.average_time_per_item ) , 1 ) def __lowerCamelCase ( self , lowercase , lowercase=None ) -> List[str]: __UpperCamelCase = """ """ * (len(str(self.total ) ) - len(str(lowercase ) )) + str(lowercase ) if self.elapsed_time is None: __UpperCamelCase = f"[{spaced_value}/{self.total} : < :" elif self.predicted_remaining is None: __UpperCamelCase = f"[{spaced_value}/{self.total} {format_time(self.elapsed_time )}" else: __UpperCamelCase = ( f"[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <" f" {format_time(self.predicted_remaining )}" ) self.label += f", {1/self.average_time_per_item:.2f} it/s" self.label += "]" if self.comment is None or len(self.comment ) == 0 else f", {self.comment}]" self.display() def __lowerCamelCase ( self ) -> List[str]: __UpperCamelCase = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width ) if self.parent is not None: # If this is a child bar, the parent will take care of the display. self.parent.display() return if self.output is None: __UpperCamelCase = disp.display(disp.HTML(self.html_code ) , display_id=lowercase ) else: self.output.update(disp.HTML(self.html_code ) ) def __lowerCamelCase ( self ) -> Any: if self.parent is None and self.output is not None: self.output.update(disp.HTML("""""" ) ) class UpperCAmelCase__ ( UpperCAmelCase_): def __init__( self , lowercase , lowercase=None ) -> List[Any]: super().__init__(lowercase ) __UpperCamelCase = None if column_names is None else [column_names] __UpperCamelCase = None def __lowerCamelCase ( self ) -> Optional[Any]: __UpperCamelCase = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width ) if self.inner_table is not None: self.html_code += text_to_html_table(self.inner_table ) if self.child_bar is not None: self.html_code += self.child_bar.html_code if self.output is None: __UpperCamelCase = disp.display(disp.HTML(self.html_code ) , display_id=lowercase ) else: self.output.update(disp.HTML(self.html_code ) ) def __lowerCamelCase ( self , lowercase ) -> Dict: if self.inner_table is None: __UpperCamelCase = [list(values.keys() ), list(values.values() )] else: __UpperCamelCase = self.inner_table[0] if len(self.inner_table ) == 1: # We give a chance to update the column names at the first iteration for key in values.keys(): if key not in columns: columns.append(lowercase ) __UpperCamelCase = columns self.inner_table.append([values[c] for c in columns] ) def __lowerCamelCase ( self , lowercase , lowercase=None , lowercase=3_0_0 ) -> Tuple: __UpperCamelCase = NotebookProgressBar(lowercase , prefix=lowercase , parent=self , width=lowercase ) return self.child_bar def __lowerCamelCase ( self ) -> Optional[Any]: __UpperCamelCase = None self.display() class UpperCAmelCase__ ( UpperCAmelCase_): def __init__( self ) -> str: __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = False def __lowerCamelCase ( self , lowercase , lowercase , lowercase , **lowercase ) -> int: __UpperCamelCase = """Epoch""" if args.evaluation_strategy == IntervalStrategy.EPOCH else """Step""" __UpperCamelCase = 0 __UpperCamelCase = 0 __UpperCamelCase = [self.first_column] + ["""Training Loss"""] if args.evaluation_strategy != IntervalStrategy.NO: column_names.append("""Validation Loss""" ) __UpperCamelCase = NotebookTrainingTracker(state.max_steps , lowercase ) def __lowerCamelCase ( self , lowercase , lowercase , lowercase , **lowercase ) -> Union[str, Any]: __UpperCamelCase = int(state.epoch ) if int(state.epoch ) == state.epoch else f"{state.epoch:.2f}" self.training_tracker.update( state.global_step + 1 , comment=f"Epoch {epoch}/{state.num_train_epochs}" , force_update=self._force_next_update , ) __UpperCamelCase = False def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase=None , **lowercase ) -> Optional[int]: if not has_length(lowercase ): return if self.prediction_bar is None: if self.training_tracker is not None: __UpperCamelCase = self.training_tracker.add_child(len(lowercase ) ) else: __UpperCamelCase = NotebookProgressBar(len(lowercase ) ) self.prediction_bar.update(1 ) else: self.prediction_bar.update(self.prediction_bar.value + 1 ) def __lowerCamelCase ( self , lowercase , lowercase , lowercase , **lowercase ) -> List[Any]: if self.prediction_bar is not None: self.prediction_bar.close() __UpperCamelCase = None def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase=None , **lowercase ) -> Optional[int]: # Only for when there is no evaluation if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs: __UpperCamelCase = {"""Training Loss""": logs["""loss"""]} # First column is necessarily Step sine we're not in epoch eval strategy __UpperCamelCase = state.global_step self.training_tracker.write_line(lowercase ) def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase=None , **lowercase ) -> Optional[Any]: if self.training_tracker is not None: __UpperCamelCase = {"""Training Loss""": """No log""", """Validation Loss""": """No log"""} for log in reversed(state.log_history ): if "loss" in log: __UpperCamelCase = log["""loss"""] break if self.first_column == "Epoch": __UpperCamelCase = int(state.epoch ) else: __UpperCamelCase = state.global_step __UpperCamelCase = """eval""" for k in metrics: if k.endswith("""_loss""" ): __UpperCamelCase = re.sub(r"""\_loss$""" , """""" , lowercase ) __UpperCamelCase = metrics.pop("""total_flos""" , lowercase ) __UpperCamelCase = metrics.pop("""epoch""" , lowercase ) __UpperCamelCase = metrics.pop(f"{metric_key_prefix}_runtime" , lowercase ) __UpperCamelCase = metrics.pop(f"{metric_key_prefix}_samples_per_second" , lowercase ) __UpperCamelCase = metrics.pop(f"{metric_key_prefix}_steps_per_second" , lowercase ) __UpperCamelCase = metrics.pop(f"{metric_key_prefix}_jit_compilation_time" , lowercase ) for k, v in metrics.items(): if k == f"{metric_key_prefix}_loss": __UpperCamelCase = v else: __UpperCamelCase = k.split("""_""" ) __UpperCamelCase = """ """.join([part.capitalize() for part in splits[1:]] ) __UpperCamelCase = v self.training_tracker.write_line(lowercase ) self.training_tracker.remove_child() __UpperCamelCase = None # Evaluation takes a long time so we should force the next update. __UpperCamelCase = True def __lowerCamelCase ( self , lowercase , lowercase , lowercase , **lowercase ) -> List[str]: self.training_tracker.update( state.global_step , comment=f"Epoch {int(state.epoch )}/{state.num_train_epochs}" , force_update=lowercase ) __UpperCamelCase = None
243
'''simple docstring''' import functools import gc import inspect import torch from .imports import is_npu_available, is_xpu_available def _lowercase ( *__A ): '''simple docstring''' if not isinstance(__A ,__A ): __UpperCamelCase = list(__A ) for i in range(len(__A ) ): __UpperCamelCase = None gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() return objects def _lowercase ( __A ): '''simple docstring''' __UpperCamelCase = [ """CUDA out of memory.""", # CUDA OOM """cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU """DefaultCPUAllocator: can't allocate memory""", # CPU OOM ] if isinstance(__A ,__A ) and len(exception.args ) == 1: return any(err in exception.args[0] for err in _statements ) return False def _lowercase ( __A = None ,__A = 128 ): '''simple docstring''' if function is None: return functools.partial(__A ,starting_batch_size=__A ) __UpperCamelCase = starting_batch_size def decorator(*__A ,**__A ): nonlocal batch_size gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() __UpperCamelCase = list(inspect.signature(__A ).parameters.keys() ) # Guard against user error if len(__A ) < (len(__A ) + 1): __UpperCamelCase = """, """.join([f"{arg}={value}" for arg, value in zip(params[1:] ,args[1:] )] ) raise TypeError( f"Batch size was passed into `{function.__name__}` as the first argument when called." f"Remove this as the decorator already does so: `{function.__name__}({arg_str})`" ) while True: if batch_size == 0: raise RuntimeError("""No executable batch size found, reached zero.""" ) try: return function(__A ,*__A ,**__A ) except Exception as e: if should_reduce_batch_size(__A ): gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() batch_size //= 2 else: raise return decorator
243
1
import argparse import gc import json import os import shutil import warnings import torch from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer try: from transformers import LlamaTokenizerFast except ImportError as e: warnings.warn(e) warnings.warn( "The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion" ) _snake_case = None _snake_case = { "7B": 11008, "13B": 13824, "30B": 17920, "65B": 22016, "70B": 28672, } _snake_case = { "7B": 1, "7Bf": 1, "13B": 2, "13Bf": 2, "30B": 4, "65B": 8, "70B": 8, "70Bf": 8, } def lowerCAmelCase_ ( snake_case_,snake_case_=1,snake_case_=256 ): return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of) def lowerCAmelCase_ ( snake_case_ ): with open(snake_case_,"""r""" ) as f: return json.load(snake_case_ ) def lowerCAmelCase_ ( snake_case_,snake_case_ ): with open(snake_case_,"""w""" ) as f: json.dump(snake_case_,snake_case_ ) def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_=True ): os.makedirs(snake_case_,exist_ok=snake_case_ ) _A : Tuple = os.path.join(snake_case_,"""tmp""" ) os.makedirs(snake_case_,exist_ok=snake_case_ ) _A : int = read_json(os.path.join(snake_case_,"""params.json""" ) ) _A : Any = NUM_SHARDS[model_size] _A : Dict = params["""n_layers"""] _A : Optional[Any] = params["""n_heads"""] _A : Union[str, Any] = n_heads // num_shards _A : Dict = params["""dim"""] _A : Optional[Any] = dim // n_heads _A : List[Any] = 1_00_00.0 _A : List[str] = 1.0 / (base ** (torch.arange(0,snake_case_,2 ).float() / dims_per_head)) if "n_kv_heads" in params: _A : str = params["""n_kv_heads"""] # for GQA / MQA _A : List[Any] = n_heads_per_shard // num_key_value_heads _A : List[str] = dim // num_key_value_heads else: # compatibility with other checkpoints _A : Any = n_heads _A : Optional[int] = n_heads_per_shard _A : List[Any] = dim # permute for sliced rotary def permute(snake_case_,snake_case_=n_heads,snake_case_=dim,snake_case_=dim ): return w.view(snake_case_,dima // n_heads // 2,2,snake_case_ ).transpose(1,2 ).reshape(snake_case_,snake_case_ ) print(f'''Fetching all parameters from the checkpoint at {input_base_path}.''' ) # Load weights if model_size == "7B": # Not sharded # (The sharded implementation would also work, but this is simpler.) _A : str = torch.load(os.path.join(snake_case_,"""consolidated.00.pth""" ),map_location="""cpu""" ) else: # Sharded _A : int = [ torch.load(os.path.join(snake_case_,f'''consolidated.{i:02d}.pth''' ),map_location="""cpu""" ) for i in range(snake_case_ ) ] _A : Optional[int] = 0 _A : Optional[Any] = {"""weight_map""": {}} for layer_i in range(snake_case_ ): _A : int = f'''pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin''' if model_size == "7B": # Unsharded _A : Union[str, Any] = { f'''model.layers.{layer_i}.self_attn.q_proj.weight''': permute( loaded[f'''layers.{layer_i}.attention.wq.weight'''] ), f'''model.layers.{layer_i}.self_attn.k_proj.weight''': permute( loaded[f'''layers.{layer_i}.attention.wk.weight'''] ), f'''model.layers.{layer_i}.self_attn.v_proj.weight''': loaded[f'''layers.{layer_i}.attention.wv.weight'''], f'''model.layers.{layer_i}.self_attn.o_proj.weight''': loaded[f'''layers.{layer_i}.attention.wo.weight'''], f'''model.layers.{layer_i}.mlp.gate_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w1.weight'''], f'''model.layers.{layer_i}.mlp.down_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w2.weight'''], f'''model.layers.{layer_i}.mlp.up_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w3.weight'''], f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[f'''layers.{layer_i}.attention_norm.weight'''], f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[f'''layers.{layer_i}.ffn_norm.weight'''], } else: # Sharded # Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share # the same storage object, saving attention_norm and ffn_norm will save other weights too, which is # redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned. _A : Any = { f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[0][ f'''layers.{layer_i}.attention_norm.weight''' ].clone(), f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[0][ f'''layers.{layer_i}.ffn_norm.weight''' ].clone(), } _A : Union[str, Any] = permute( torch.cat( [ loaded[i][f'''layers.{layer_i}.attention.wq.weight'''].view(snake_case_,snake_case_,snake_case_ ) for i in range(snake_case_ ) ],dim=0,).reshape(snake_case_,snake_case_ ) ) _A : Optional[Any] = permute( torch.cat( [ loaded[i][f'''layers.{layer_i}.attention.wk.weight'''].view( snake_case_,snake_case_,snake_case_ ) for i in range(snake_case_ ) ],dim=0,).reshape(snake_case_,snake_case_ ),snake_case_,snake_case_,snake_case_,) _A : Union[str, Any] = torch.cat( [ loaded[i][f'''layers.{layer_i}.attention.wv.weight'''].view( snake_case_,snake_case_,snake_case_ ) for i in range(snake_case_ ) ],dim=0,).reshape(snake_case_,snake_case_ ) _A : str = torch.cat( [loaded[i][f'''layers.{layer_i}.attention.wo.weight'''] for i in range(snake_case_ )],dim=1 ) _A : Tuple = torch.cat( [loaded[i][f'''layers.{layer_i}.feed_forward.w1.weight'''] for i in range(snake_case_ )],dim=0 ) _A : Dict = torch.cat( [loaded[i][f'''layers.{layer_i}.feed_forward.w2.weight'''] for i in range(snake_case_ )],dim=1 ) _A : Any = torch.cat( [loaded[i][f'''layers.{layer_i}.feed_forward.w3.weight'''] for i in range(snake_case_ )],dim=0 ) _A : Any = inv_freq for k, v in state_dict.items(): _A : Dict = filename param_count += v.numel() torch.save(snake_case_,os.path.join(snake_case_,snake_case_ ) ) _A : Optional[Any] = f'''pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin''' if model_size == "7B": # Unsharded _A : Optional[Any] = { """model.embed_tokens.weight""": loaded["""tok_embeddings.weight"""], """model.norm.weight""": loaded["""norm.weight"""], """lm_head.weight""": loaded["""output.weight"""], } else: _A : Optional[int] = { """model.norm.weight""": loaded[0]["""norm.weight"""], """model.embed_tokens.weight""": torch.cat( [loaded[i]["""tok_embeddings.weight"""] for i in range(snake_case_ )],dim=1 ), """lm_head.weight""": torch.cat([loaded[i]["""output.weight"""] for i in range(snake_case_ )],dim=0 ), } for k, v in state_dict.items(): _A : List[str] = filename param_count += v.numel() torch.save(snake_case_,os.path.join(snake_case_,snake_case_ ) ) # Write configs _A : Tuple = {"""total_size""": param_count * 2} write_json(snake_case_,os.path.join(snake_case_,"""pytorch_model.bin.index.json""" ) ) _A : Any = params["""ffn_dim_multiplier"""] if """ffn_dim_multiplier""" in params else 1 _A : int = params["""multiple_of"""] if """multiple_of""" in params else 256 _A : str = LlamaConfig( hidden_size=snake_case_,intermediate_size=compute_intermediate_size(snake_case_,snake_case_,snake_case_ ),num_attention_heads=params["""n_heads"""],num_hidden_layers=params["""n_layers"""],rms_norm_eps=params["""norm_eps"""],num_key_value_heads=snake_case_,) config.save_pretrained(snake_case_ ) # Make space so we can load the model properly now. del state_dict del loaded gc.collect() print("""Loading the checkpoint in a Llama model.""" ) _A : Optional[Any] = LlamaForCausalLM.from_pretrained(snake_case_,torch_dtype=torch.floataa,low_cpu_mem_usage=snake_case_ ) # Avoid saving this as part of the config. del model.config._name_or_path print("""Saving in the Transformers format.""" ) model.save_pretrained(snake_case_,safe_serialization=snake_case_ ) shutil.rmtree(snake_case_ ) def lowerCAmelCase_ ( snake_case_,snake_case_ ): # Initialize the tokenizer based on the `spm` model _A : Union[str, Any] = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast print(f'''Saving a {tokenizer_class.__name__} to {tokenizer_path}.''' ) _A : List[Any] = tokenizer_class(snake_case_ ) tokenizer.save_pretrained(snake_case_ ) def lowerCAmelCase_ ( ): _A : Dict = argparse.ArgumentParser() parser.add_argument( """--input_dir""",help="""Location of LLaMA weights, which contains tokenizer.model and model folders""",) parser.add_argument( """--model_size""",choices=["""7B""", """7Bf""", """13B""", """13Bf""", """30B""", """65B""", """70B""", """70Bf""", """tokenizer_only"""],) parser.add_argument( """--output_dir""",help="""Location to write HF model and tokenizer""",) parser.add_argument("""--safe_serialization""",type=snake_case_,help="""Whether or not to save using `safetensors`.""" ) _A : Union[str, Any] = parser.parse_args() if args.model_size != "tokenizer_only": write_model( model_path=args.output_dir,input_base_path=os.path.join(args.input_dir,args.model_size ),model_size=args.model_size,safe_serialization=args.safe_serialization,) _A : int = os.path.join(args.input_dir,"""tokenizer.model""" ) write_tokenizer(args.output_dir,snake_case_ ) if __name__ == "__main__": main()
26
"""simple docstring""" import re import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class lowercase( __a ): '''simple docstring''' lowercase__ = ["image_processor", "tokenizer"] lowercase__ = "AutoImageProcessor" lowercase__ = "AutoTokenizer" def __init__( self: List[str], a_: List[str]=None, a_: Tuple=None, **a_: Tuple ): '''simple docstring''' _snake_case : str = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""", a_, ) _snake_case : str = kwargs.pop("""feature_extractor""" ) _snake_case : Union[str, Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(a_, a_ ) _snake_case : Dict = self.image_processor _snake_case : Any = False def __call__( self: Any, *a_: Any, **a_: Tuple ): '''simple docstring''' if self._in_target_context_manager: return self.current_processor(*a_, **a_ ) _snake_case : Dict = kwargs.pop("""images""", a_ ) _snake_case : Optional[Any] = kwargs.pop("""text""", a_ ) if len(a_ ) > 0: _snake_case : Optional[int] = args[0] _snake_case : Tuple = args[1:] if images is None and text is None: raise ValueError("""You need to specify either an `images` or `text` input to process.""" ) if images is not None: _snake_case : Tuple = self.image_processor(a_, *a_, **a_ ) if text is not None: _snake_case : Tuple = self.tokenizer(a_, **a_ ) if text is None: return inputs elif images is None: return encodings else: _snake_case : List[str] = encodings["""input_ids"""] return inputs def UpperCamelCase_ ( self: Optional[int], *a_: Tuple, **a_: List[str] ): '''simple docstring''' return self.tokenizer.batch_decode(*a_, **a_ ) def UpperCamelCase_ ( self: int, *a_: List[str], **a_: int ): '''simple docstring''' return self.tokenizer.decode(*a_, **a_ ) @contextmanager def UpperCamelCase_ ( self: Dict ): '''simple docstring''' warnings.warn( """`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """ """labels by using the argument `text` of the regular `__call__` method (either in the same call as """ """your images inputs, or in a separate call.""" ) _snake_case : Any = True _snake_case : Optional[int] = self.tokenizer yield _snake_case : int = self.image_processor _snake_case : Optional[int] = False def UpperCamelCase_ ( self: Dict, a_: Optional[Any], a_: str=False, a_: Optional[Any]=None ): '''simple docstring''' if added_vocab is None: _snake_case : Dict = self.tokenizer.get_added_vocab() _snake_case : str = {} while tokens: _snake_case : Union[str, Any] = re.search(r"""<s_(.*?)>""", a_, re.IGNORECASE ) if start_token is None: break _snake_case : List[Any] = start_token.group(1 ) _snake_case : str = re.search(rf"</s_{key}>", a_, re.IGNORECASE ) _snake_case : Dict = start_token.group() if end_token is None: _snake_case : List[Any] = tokens.replace(a_, """""" ) else: _snake_case : List[str] = end_token.group() _snake_case : str = re.escape(a_ ) _snake_case : str = re.escape(a_ ) _snake_case : Union[str, Any] = re.search(f"{start_token_escaped}(.*?){end_token_escaped}", a_, re.IGNORECASE ) if content is not None: _snake_case : int = content.group(1 ).strip() if r"<s_" in content and r"</s_" in content: # non-leaf node _snake_case : List[Any] = self.tokenajson(a_, is_inner_value=a_, added_vocab=a_ ) if value: if len(a_ ) == 1: _snake_case : List[str] = value[0] _snake_case : List[str] = value else: # leaf nodes _snake_case : Tuple = [] for leaf in content.split(r"""<sep/>""" ): _snake_case : Tuple = leaf.strip() if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>": _snake_case : int = leaf[1:-2] # for categorical special tokens output[key].append(a_ ) if len(output[key] ) == 1: _snake_case : int = output[key][0] _snake_case : Any = tokens[tokens.find(a_ ) + len(a_ ) :].strip() if tokens[:6] == r"<sep/>": # non-leaf nodes return [output] + self.tokenajson(tokens[6:], is_inner_value=a_, added_vocab=a_ ) if len(a_ ): return [output] if is_inner_value else output else: return [] if is_inner_value else {"text_sequence": tokens} @property def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""", a_, ) return self.image_processor_class @property def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""", a_, ) return self.image_processor
64
0
'''simple docstring''' import math def snake_case_ ( __SCREAMING_SNAKE_CASE : int ): """simple docstring""" lowercase_ : Tuple = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 ) return exponent == int(__SCREAMING_SNAKE_CASE ) def snake_case_ ( __SCREAMING_SNAKE_CASE : float = 1 / 12345 ): """simple docstring""" lowercase_ : Union[str, Any] = 0 lowercase_ : Optional[Any] = 0 lowercase_ : Tuple = 3 while True: lowercase_ : Optional[int] = (integer**2 - 1) / 4 # if candidate is an integer, then there is a partition for k if partition_candidate == int(__SCREAMING_SNAKE_CASE ): lowercase_ : Optional[int] = int(__SCREAMING_SNAKE_CASE ) total_partitions += 1 if check_partition_perfect(__SCREAMING_SNAKE_CASE ): perfect_partitions += 1 if perfect_partitions > 0: if perfect_partitions / total_partitions < max_proportion: return int(__SCREAMING_SNAKE_CASE ) integer += 1 if __name__ == "__main__": print(f"""{solution() = }""")
264
'''simple docstring''' import qiskit def snake_case_ ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ): """simple docstring""" lowercase_ : List[Any] = qiskit.Aer.get_backend('''aer_simulator''' ) # Create a Quantum Circuit acting on the q register lowercase_ : Dict = qiskit.QuantumCircuit(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Map the quantum measurement to the classical bits circuit.measure([0] , [0] ) # Execute the circuit on the simulator lowercase_ : Union[str, Any] = qiskit.execute(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , shots=1000 ) # Return the histogram data of the results of the experiment. return job.result().get_counts(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": print(f"""Total count for various states are: {single_qubit_measure(1, 1)}""")
264
1
"""simple docstring""" def a_ ( lowerCamelCase = 6_0_0_8_5_1_4_7_5_1_4_3 ): try: UpperCAmelCase__ = int(a__ ) except (TypeError, ValueError): raise TypeError('Parameter n must be int or castable to int.' ) if n <= 0: raise ValueError('Parameter n must be greater than or equal to one.' ) UpperCAmelCase__ = 1 UpperCAmelCase__ = 2 while i * i <= n: while n % i == 0: UpperCAmelCase__ = i n //= i i += 1 if n > 1: UpperCAmelCase__ = n return int(a__ ) if __name__ == "__main__": print(F"""{solution() = }""")
98
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __snake_case : Dict = logging.get_logger(__name__) __snake_case : Any = {"""vocab_file""": """sentencepiece.bpe.model"""} __snake_case : Union[str, Any] = { """vocab_file""": { """moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""", """moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""", """moussaKam/barthez-orangesum-title""": ( """https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model""" ), }, } __snake_case : Tuple = { """moussaKam/mbarthez""": 10_24, """moussaKam/barthez""": 10_24, """moussaKam/barthez-orangesum-title""": 10_24, } __snake_case : int = """▁""" class A__(a_ ): """simple docstring""" _A : str = VOCAB_FILES_NAMES _A : int = PRETRAINED_VOCAB_FILES_MAP _A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _A : Tuple = ['''input_ids''', '''attention_mask'''] def __init__( self , _lowercase , _lowercase="<s>" , _lowercase="</s>" , _lowercase="</s>" , _lowercase="<s>" , _lowercase="<unk>" , _lowercase="<pad>" , _lowercase="<mask>" , _lowercase = None , **_lowercase , ) -> None: # Mask token behave like a normal word, i.e. include the space before it a_ : Dict = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token a_ : str = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , cls_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , sp_model_kwargs=self.sp_model_kwargs , **_lowercase , ) a_ : List[Any] = vocab_file a_ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_lowercase ) ) a_ : Optional[Any] = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3} a_ : str = len(self.sp_model ) - 1 a_ : Dict = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def UpperCamelCase__ ( self , _lowercase , _lowercase = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] a_ : Optional[Any] = [self.cls_token_id] a_ : Union[str, Any] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def UpperCamelCase__ ( self , _lowercase , _lowercase = None , _lowercase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase ) if token_ids_a is None: return [1] + ([0] * len(_lowercase )) + [1] return [1] + ([0] * len(_lowercase )) + [1, 1] + ([0] * len(_lowercase )) + [1] def UpperCamelCase__ ( self , _lowercase , _lowercase = None ) -> List[int]: a_ : Optional[Any] = [self.sep_token_id] a_ : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def UpperCamelCase__ ( self ) -> List[Any]: return len(self.sp_model ) def UpperCamelCase__ ( self ) -> List[str]: a_ : List[Any] = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCamelCase__ ( self , _lowercase ) -> List[str]: return self.sp_model.encode(_lowercase , out_type=_lowercase ) def UpperCamelCase__ ( self , _lowercase ) -> Any: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] a_ : Optional[int] = self.sp_model.PieceToId(_lowercase ) return spm_id if spm_id else self.unk_token_id def UpperCamelCase__ ( self , _lowercase ) -> Union[str, Any]: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(_lowercase ) def UpperCamelCase__ ( self , _lowercase ) -> Any: a_ : Dict = [] a_ : Union[str, Any] = """""" a_ : Dict = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_lowercase ) + token a_ : Dict = True a_ : int = [] else: current_sub_tokens.append(_lowercase ) a_ : Union[str, Any] = False out_string += self.sp_model.decode(_lowercase ) return out_string.strip() def __getstate__( self ) -> Optional[int]: a_ : Any = self.__dict__.copy() a_ : int = None return state def __setstate__( self , _lowercase ) -> Union[str, Any]: a_ : List[Any] = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): a_ : str = {} a_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCamelCase__ ( self , _lowercase , _lowercase = None ) -> Tuple[str]: if not os.path.isdir(_lowercase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return a_ : Dict = os.path.join( _lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _lowercase ) elif not os.path.isfile(self.vocab_file ): with open(_lowercase , """wb""" ) as fi: a_ : Optional[Any] = self.sp_model.serialized_model_proto() fi.write(_lowercase ) return (out_vocab_file,)
248
0
'''simple docstring''' from __future__ import annotations import math def UpperCamelCase ( a , a ) -> list: '''simple docstring''' if len(a ) != 2 or len(a[0] ) != 2 or len(a ) != 2 or len(b[0] ) != 2: raise Exception('''Matrices are not 2x2''' ) __magic_name__ = [ [a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]], [a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]], ] return new_matrix def UpperCamelCase ( a , a ) -> str: '''simple docstring''' return [ [matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )] for row in range(len(a ) ) ] def UpperCamelCase ( a , a ) -> Union[str, Any]: '''simple docstring''' return [ [matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )] for row in range(len(a ) ) ] def UpperCamelCase ( a ) -> tuple[list, list, list, list]: '''simple docstring''' if len(a ) % 2 != 0 or len(a[0] ) % 2 != 0: raise Exception('''Odd matrices are not supported!''' ) __magic_name__ = len(a ) __magic_name__ = matrix_length // 2 __magic_name__ = [[a[i][j] for j in range(a , a )] for i in range(a )] __magic_name__ = [ [a[i][j] for j in range(a , a )] for i in range(a , a ) ] __magic_name__ = [[a[i][j] for j in range(a )] for i in range(a )] __magic_name__ = [[a[i][j] for j in range(a )] for i in range(a , a )] return top_left, top_right, bot_left, bot_right def UpperCamelCase ( a ) -> tuple[int, int]: '''simple docstring''' return len(a ), len(matrix[0] ) def UpperCamelCase ( a ) -> None: '''simple docstring''' print('''\n'''.join(str(a ) for line in matrix ) ) def UpperCamelCase ( a , a ) -> list: '''simple docstring''' if matrix_dimensions(a ) == (2, 2): return default_matrix_multiplication(a , a ) __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = split_matrix(a ) __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = split_matrix(a ) __magic_name__ = actual_strassen(a , matrix_subtraction(a , a ) ) __magic_name__ = actual_strassen(matrix_addition(a , a ) , a ) __magic_name__ = actual_strassen(matrix_addition(a , a ) , a ) __magic_name__ = actual_strassen(a , matrix_subtraction(a , a ) ) __magic_name__ = actual_strassen(matrix_addition(a , a ) , matrix_addition(a , a ) ) __magic_name__ = actual_strassen(matrix_subtraction(a , a ) , matrix_addition(a , a ) ) __magic_name__ = actual_strassen(matrix_subtraction(a , a ) , matrix_addition(a , a ) ) __magic_name__ = matrix_addition(matrix_subtraction(matrix_addition(a , a ) , a ) , a ) __magic_name__ = matrix_addition(a , a ) __magic_name__ = matrix_addition(a , a ) __magic_name__ = matrix_subtraction(matrix_subtraction(matrix_addition(a , a ) , a ) , a ) # construct the new matrix from our 4 quadrants __magic_name__ = [] for i in range(len(a ) ): new_matrix.append(top_left[i] + top_right[i] ) for i in range(len(a ) ): new_matrix.append(bot_left[i] + bot_right[i] ) return new_matrix def UpperCamelCase ( a , a ) -> list: '''simple docstring''' if matrix_dimensions(a )[1] != matrix_dimensions(a )[0]: __magic_name__ = ( '''Unable to multiply these matrices, please check the dimensions.\n''' F'''Matrix A: {matrixa}\n''' F'''Matrix B: {matrixa}''' ) raise Exception(a ) __magic_name__ = matrix_dimensions(a ) __magic_name__ = matrix_dimensions(a ) if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]: return [matrixa, matrixa] __magic_name__ = max(*a , *a ) __magic_name__ = int(math.pow(2 , math.ceil(math.loga(a ) ) ) ) __magic_name__ = matrixa __magic_name__ = matrixa # Adding zeros to the matrices so that the arrays dimensions are the same and also # power of 2 for i in range(0 , a ): if i < dimensiona[0]: for _ in range(dimensiona[1] , a ): new_matrixa[i].append(0 ) else: new_matrixa.append([0] * maxim ) if i < dimensiona[0]: for _ in range(dimensiona[1] , a ): new_matrixa[i].append(0 ) else: new_matrixa.append([0] * maxim ) __magic_name__ = actual_strassen(a , a ) # Removing the additional zeros for i in range(0 , a ): if i < dimensiona[0]: for _ in range(dimensiona[1] , a ): final_matrix[i].pop() else: final_matrix.pop() return final_matrix if __name__ == "__main__": _lowerCAmelCase : str = [ [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 2, 3, 1], ] _lowerCAmelCase : List[str] = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]] print(strassen(matrixa, matrixa))
364
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCAmelCase = { "configuration_clap": [ "CLAP_PRETRAINED_MODEL_ARCHIVE_LIST", "ClapAudioConfig", "ClapConfig", "ClapTextConfig", ], "processing_clap": ["ClapProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = [ "CLAP_PRETRAINED_MODEL_ARCHIVE_LIST", "ClapModel", "ClapPreTrainedModel", "ClapTextModel", "ClapTextModelWithProjection", "ClapAudioModel", "ClapAudioModelWithProjection", ] _lowerCAmelCase = ["ClapFeatureExtractor"] if TYPE_CHECKING: from .configuration_clap import ( CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, ClapAudioConfig, ClapConfig, ClapTextConfig, ) from .processing_clap import ClapProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clap import ClapFeatureExtractor from .modeling_clap import ( CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, ClapAudioModel, ClapAudioModelWithProjection, ClapModel, ClapPreTrainedModel, ClapTextModel, ClapTextModelWithProjection, ) else: import sys _lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
98
0
from math import pi, sqrt, tan def lowerCamelCase_ ( lowerCamelCase__ ): if side_length < 0: raise ValueError("surface_area_cube() only accepts non-negative values" ) return 6 * side_length**2 def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): if length < 0 or breadth < 0 or height < 0: raise ValueError("surface_area_cuboid() only accepts non-negative values" ) return 2 * ((length * breadth) + (breadth * height) + (length * height)) def lowerCamelCase_ ( lowerCamelCase__ ): if radius < 0: raise ValueError("surface_area_sphere() only accepts non-negative values" ) return 4 * pi * radius**2 def lowerCamelCase_ ( lowerCamelCase__ ): if radius < 0: raise ValueError("surface_area_hemisphere() only accepts non-negative values" ) return 3 * pi * radius**2 def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ): if radius < 0 or height < 0: raise ValueError("surface_area_cone() only accepts non-negative values" ) return pi * radius * (radius + (height**2 + radius**2) ** 0.5) def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): if radius_a < 0 or radius_a < 0 or height < 0: raise ValueError( "surface_area_conical_frustum() only accepts non-negative values" ) lowerCamelCase_ = (height**2 + (radius_a - radius_a) ** 2) ** 0.5 return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2) def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ): if radius < 0 or height < 0: raise ValueError("surface_area_cylinder() only accepts non-negative values" ) return 2 * pi * radius * (height + radius) def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ): if torus_radius < 0 or tube_radius < 0: raise ValueError("surface_area_torus() only accepts non-negative values" ) if torus_radius < tube_radius: raise ValueError( "surface_area_torus() does not support spindle or self intersecting tori" ) return 4 * pow(lowerCamelCase__ , 2 ) * torus_radius * tube_radius def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ): if length < 0 or width < 0: raise ValueError("area_rectangle() only accepts non-negative values" ) return length * width def lowerCamelCase_ ( lowerCamelCase__ ): if side_length < 0: raise ValueError("area_square() only accepts non-negative values" ) return side_length**2 def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ): if base < 0 or height < 0: raise ValueError("area_triangle() only accepts non-negative values" ) return (base * height) / 2 def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): if sidea < 0 or sidea < 0 or sidea < 0: raise ValueError("area_triangle_three_sides() only accepts non-negative values" ) elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea: raise ValueError("Given three sides do not form a triangle" ) lowerCamelCase_ = (sidea + sidea + sidea) / 2 lowerCamelCase_ = sqrt( semi_perimeter * (semi_perimeter - sidea) * (semi_perimeter - sidea) * (semi_perimeter - sidea) ) return area def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ): if base < 0 or height < 0: raise ValueError("area_parallelogram() only accepts non-negative values" ) return base * height def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): if basea < 0 or basea < 0 or height < 0: raise ValueError("area_trapezium() only accepts non-negative values" ) return 1 / 2 * (basea + basea) * height def lowerCamelCase_ ( lowerCamelCase__ ): if radius < 0: raise ValueError("area_circle() only accepts non-negative values" ) return pi * radius**2 def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ): if radius_x < 0 or radius_y < 0: raise ValueError("area_ellipse() only accepts non-negative values" ) return pi * radius_x * radius_y def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ): if diagonal_a < 0 or diagonal_a < 0: raise ValueError("area_rhombus() only accepts non-negative values" ) return 1 / 2 * diagonal_a * diagonal_a def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ): if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or sides < 3: raise ValueError( "area_reg_polygon() only accepts integers greater than or \ equal to three as number of sides" ) elif length < 0: raise ValueError( "area_reg_polygon() only accepts non-negative values as \ length of a side" ) return (sides * length**2) / (4 * tan(pi / sides )) return (sides * length**2) / (4 * tan(pi / sides )) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) # verbose so we can see methods missing tests print('''[DEMO] Areas of various geometric shapes: \n''') print(F"""Rectangle: {area_rectangle(1_0, 2_0) = }""") print(F"""Square: {area_square(1_0) = }""") print(F"""Triangle: {area_triangle(1_0, 1_0) = }""") print(F"""Triangle: {area_triangle_three_sides(5, 1_2, 1_3) = }""") print(F"""Parallelogram: {area_parallelogram(1_0, 2_0) = }""") print(F"""Rhombus: {area_rhombus(1_0, 2_0) = }""") print(F"""Trapezium: {area_trapezium(1_0, 2_0, 3_0) = }""") print(F"""Circle: {area_circle(2_0) = }""") print(F"""Ellipse: {area_ellipse(1_0, 2_0) = }""") print('''\nSurface Areas of various geometric shapes: \n''') print(F"""Cube: {surface_area_cube(2_0) = }""") print(F"""Cuboid: {surface_area_cuboid(1_0, 2_0, 3_0) = }""") print(F"""Sphere: {surface_area_sphere(2_0) = }""") print(F"""Hemisphere: {surface_area_hemisphere(2_0) = }""") print(F"""Cone: {surface_area_cone(1_0, 2_0) = }""") print(F"""Conical Frustum: {surface_area_conical_frustum(1_0, 2_0, 3_0) = }""") print(F"""Cylinder: {surface_area_cylinder(1_0, 2_0) = }""") print(F"""Torus: {surface_area_torus(2_0, 1_0) = }""") print(F"""Equilateral Triangle: {area_reg_polygon(3, 1_0) = }""") print(F"""Square: {area_reg_polygon(4, 1_0) = }""") print(F"""Reqular Pentagon: {area_reg_polygon(5, 1_0) = }""")
19
"""simple docstring""" import os def lowerCamelCase__ ( ) -> List[Any]: with open(os.path.dirname(_lowerCamelCase ) + '/grid.txt' ) as f: lowerCamelCase_ = [] # noqa: E741 for _ in range(20 ): l.append([int(_lowerCamelCase ) for x in f.readline().split()] ) lowerCamelCase_ = 0 # right for i in range(20 ): for j in range(17 ): lowerCamelCase_ = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3] if temp > maximum: lowerCamelCase_ = temp # down for i in range(17 ): for j in range(20 ): lowerCamelCase_ = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j] if temp > maximum: lowerCamelCase_ = temp # diagonal 1 for i in range(17 ): for j in range(17 ): lowerCamelCase_ = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3] if temp > maximum: lowerCamelCase_ = temp # diagonal 2 for i in range(17 ): for j in range(3 , 20 ): lowerCamelCase_ = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3] if temp > maximum: lowerCamelCase_ = temp return maximum if __name__ == "__main__": print(solution())
183
0
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary # Register SEW's fairseq modules from sew_asapp import tasks # noqa: F401 from transformers import ( SEWConfig, SEWForCTC, SEWModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() lowerCamelCase_ : Optional[int] = logging.get_logger(__name__) lowerCamelCase_ : Optional[int] = { """post_extract_proj""": """feature_projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.upsample.0""": """encoder.upsample.projection""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """layer_norm""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", } def _A ( lowercase , lowercase , lowercase , lowercase , lowercase ): """simple docstring""" for attribute in key.split('''.''' ): a =getattr(a__ , a__ ) if weight_type is not None: a =getattr(a__ , a__ ).shape else: a =hf_pointer.shape assert hf_shape == value.shape, ( f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": a =value elif weight_type == "weight_g": a =value elif weight_type == "weight_v": a =value elif weight_type == "bias": a =value else: a =value logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def _A ( lowercase , lowercase , lowercase ): """simple docstring""" a =[] a =fairseq_model.state_dict() a =hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): a =False if "conv_layers" in name: load_conv_layer( a__ , a__ , a__ , a__ , hf_model.config.feat_extract_norm == '''group''' , ) a =True else: for key, mapped_key in MAPPING.items(): a ='''sew.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: a =True if "*" in mapped_key: a =name.split(a__ )[0].split('''.''' )[-2] a =mapped_key.replace('''*''' , a__ ) if "weight_g" in name: a ='''weight_g''' elif "weight_v" in name: a ='''weight_v''' elif "weight" in name: a ='''weight''' elif "bias" in name: a ='''bias''' else: a =None set_recursively(a__ , a__ , a__ , a__ , a__ ) continue if not is_used: unused_weights.append(a__ ) logger.warning(f'''Unused weights: {unused_weights}''' ) def _A ( lowercase , lowercase , lowercase , lowercase , lowercase ): """simple docstring""" a =full_name.split('''conv_layers.''' )[-1] a =name.split('''.''' ) a =int(items[0] ) a =int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) a =value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) a =value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) a =value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) a =value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(a__ ) def _A ( lowercase , lowercase ): """simple docstring""" a =SEWConfig() if is_finetuned: a =model.wav_encoder.wav_model.cfg else: a =model.cfg a =fs_config.conv_bias a =eval(fs_config.conv_feature_layers ) a =[x[0] for x in conv_layers] a =[x[1] for x in conv_layers] a =[x[2] for x in conv_layers] a ='''gelu''' a ='''layer''' if fs_config.extractor_mode == '''layer_norm''' else '''group''' a =0.0 a =fs_config.activation_fn.name a =fs_config.encoder_embed_dim a =0.02 a =fs_config.encoder_ffn_embed_dim a =1E-5 a =fs_config.encoder_layerdrop a =fs_config.encoder_attention_heads a =fs_config.conv_pos_groups a =fs_config.conv_pos a =len(a__ ) a =fs_config.encoder_layers a =fs_config.squeeze_factor # take care of any params that are overridden by the Wav2VecCtc model if is_finetuned: a =model.cfg a =fs_config.final_dropout a =fs_config.layerdrop a =fs_config.activation_dropout a =fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0 a =fs_config.attention_dropout a =fs_config.dropout_input a =fs_config.dropout a =fs_config.mask_channel_length a =fs_config.mask_channel_prob a =fs_config.mask_length a =fs_config.mask_prob a ='''Wav2Vec2FeatureExtractor''' a ='''Wav2Vec2CTCTokenizer''' return config @torch.no_grad() def _A ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=True ): """simple docstring""" if is_finetuned: a , a , a =fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: a , a , a =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) if config_path is not None: a =SEWConfig.from_pretrained(a__ ) else: a =convert_config(model[0] , a__ ) a =model[0].eval() a =True if config.feat_extract_norm == '''layer''' else False a =WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=a__ , return_attention_mask=a__ , ) if is_finetuned: if dict_path: a =Dictionary.load(a__ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq a =target_dict.pad_index a =target_dict.bos_index a =target_dict.pad_index a =target_dict.bos_index a =target_dict.eos_index a =len(target_dict.symbols ) a =os.path.join(a__ , '''vocab.json''' ) if not os.path.isdir(a__ ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(a__ ) ) return os.makedirs(a__ , exist_ok=a__ ) with open(a__ , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(target_dict.indices , a__ ) a =WavaVecaCTCTokenizer( a__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=a__ , ) a =WavaVecaProcessor(feature_extractor=a__ , tokenizer=a__ ) processor.save_pretrained(a__ ) a =SEWForCTC(a__ ) else: a =SEWModel(a__ ) feature_extractor.save_pretrained(a__ ) recursively_load_weights(a__ , a__ , a__ ) hf_model.save_pretrained(a__ ) if __name__ == "__main__": lowerCamelCase_ : Optional[int] = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) lowerCamelCase_ : Union[str, Any] = parser.parse_args() convert_sew_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned )
369
"""simple docstring""" from ...processing_utils import ProcessorMixin class __A ( _SCREAMING_SNAKE_CASE ): """simple docstring""" __lowerCAmelCase = "WhisperFeatureExtractor" __lowerCAmelCase = "WhisperTokenizer" def __init__( self , __A , __A ) -> Dict: super().__init__(__A , __A ) a =self.feature_extractor a =False def SCREAMING_SNAKE_CASE ( self , __A=None , __A=None , __A=True ) -> int: return self.tokenizer.get_decoder_prompt_ids(task=__A , language=__A , no_timestamps=__A ) def __call__( self , *__A , **__A ) -> Tuple: # For backward compatibility if self._in_target_context_manager: return self.current_processor(*__A , **__A ) a =kwargs.pop('''audio''' , __A ) a =kwargs.pop('''sampling_rate''' , __A ) a =kwargs.pop('''text''' , __A ) if len(__A ) > 0: a =args[0] a =args[1:] if audio is None and text is None: raise ValueError('''You need to specify either an `audio` or `text` input to process.''' ) if audio is not None: a =self.feature_extractor(__A , *__A , sampling_rate=__A , **__A ) if text is not None: a =self.tokenizer(__A , **__A ) if text is None: return inputs elif audio is None: return encodings else: a =encodings['''input_ids'''] return inputs def SCREAMING_SNAKE_CASE ( self , *__A , **__A ) -> Optional[Any]: return self.tokenizer.batch_decode(*__A , **__A ) def SCREAMING_SNAKE_CASE ( self , *__A , **__A ) -> Union[str, Any]: return self.tokenizer.decode(*__A , **__A ) def SCREAMING_SNAKE_CASE ( self , __A , __A="np" ) -> Optional[Any]: return self.tokenizer.get_prompt_ids(__A , return_tensors=__A )
215
0
# Lint as: python3 # pylint: enable=line-too-long # pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position lowercase__ :int = "2.13.1" import platform import pyarrow from packaging import version if version.parse(platform.python_version()) < version.parse("3.7"): raise ImportWarning( "To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition." ) if version.parse(pyarrow.__version__).major < 8: raise ImportWarning( "To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n" "If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`." ) del platform del pyarrow del version from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip lowercase__ :Tuple = concatenate_datasets lowercase__ :List[str] = DownloadConfig lowercase__ :Optional[int] = DownloadManager lowercase__ :Optional[int] = DownloadMode lowercase__ :Any = DownloadConfig lowercase__ :str = DownloadMode lowercase__ :List[str] = DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
101
import random import unittest import torch from diffusers import IFInpaintingPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class lowercase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): lowercase_ : List[Any] =IFInpaintingPipeline lowercase_ : Optional[int] =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''} lowercase_ : Any =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS lowercase_ : str =PipelineTesterMixin.required_optional_params - {'''latents'''} def A__ ( self): return self._get_dummy_components() def A__ ( self ,A__ ,A__=0): if str(A__).startswith('''mps'''): lowercase = torch.manual_seed(A__) else: lowercase = torch.Generator(device=A__).manual_seed(A__) lowercase = floats_tensor((1, 3, 3_2, 3_2) ,rng=random.Random(A__)).to(A__) lowercase = floats_tensor((1, 3, 3_2, 3_2) ,rng=random.Random(A__)).to(A__) lowercase = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''mask_image''': mask_image, '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,) def A__ ( self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3) def A__ ( self): self._test_save_load_optional_components() @unittest.skipIf(torch_device != '''cuda''' ,reason='''float16 requires CUDA''') def A__ ( self): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1) def A__ ( self): self._test_attention_slicing_forward_pass(expected_max_diff=1E-2) def A__ ( self): self._test_save_load_local() def A__ ( self): self._test_inference_batch_single_identical( expected_max_diff=1E-2 ,)
101
1
'''simple docstring''' from __future__ import annotations def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" if not nums: return 0 lowerCAmelCase__ : Tuple = nums[0] lowerCAmelCase__ : List[Any] = 0 for num in nums[1:]: lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = ( max_excluding + num, max(UpperCamelCase , UpperCamelCase ), ) return max(UpperCamelCase , UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
184
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( UpperCamelCase = 1000 ): """simple docstring""" lowerCAmelCase__ : Union[str, Any] = -1 lowerCAmelCase__ : Optional[Any] = 0 for a in range(1 , n // 3 ): # Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c lowerCAmelCase__ : Optional[Any] = (n * n - 2 * a * n) // (2 * n - 2 * a) lowerCAmelCase__ : Tuple = n - a - b if c * c == (a * a + b * b): lowerCAmelCase__ : int = a * b * c if candidate >= product: lowerCAmelCase__ : Any = candidate return product if __name__ == "__main__": print(F"""{solution() = }""")
184
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A = { "configuration_xlm_roberta_xl": [ "XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMRobertaXLConfig", "XLMRobertaXLOnnxConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ "XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST", "XLMRobertaXLForCausalLM", "XLMRobertaXLForMaskedLM", "XLMRobertaXLForMultipleChoice", "XLMRobertaXLForQuestionAnswering", "XLMRobertaXLForSequenceClassification", "XLMRobertaXLForTokenClassification", "XLMRobertaXLModel", "XLMRobertaXLPreTrainedModel", ] if TYPE_CHECKING: from .configuration_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaXLConfig, XLMRobertaXLOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaXLForCausalLM, XLMRobertaXLForMaskedLM, XLMRobertaXLForMultipleChoice, XLMRobertaXLForQuestionAnswering, XLMRobertaXLForSequenceClassification, XLMRobertaXLForTokenClassification, XLMRobertaXLModel, XLMRobertaXLPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()["__file__"], _import_structure)
10
import io import os import unicodedata from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowercase__ : Optional[Any] = logging.get_logger(__name__) lowercase__ : List[str] = "▁" lowercase__ : Union[str, Any] = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"} lowercase__ : List[Any] = { "sentencepiece_model_file": "sentencepiece.bpe.model", "vocab_file": "vocab.txt", } lowercase__ : Tuple = { "vocab_file": { "ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt", "ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt", }, "sentencepiece_model_file": { "ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model", "ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model", }, } lowercase__ : Optional[int] = { "ernie-m-base": 514, "ernie-m-large": 514, } lowercase__ : Dict = { "ernie-m-base": {"do_lower_case": False}, "ernie-m-large": {"do_lower_case": False}, } class UpperCAmelCase ( UpperCAmelCase__ ): '''simple docstring''' lowerCAmelCase_ = ["input_ids"] lowerCAmelCase_ = VOCAB_FILES_NAMES lowerCAmelCase_ = PRETRAINED_INIT_CONFIGURATION lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase_ = RESOURCE_FILES_NAMES def __init__( self : Dict , __lowercase : List[Any] , __lowercase : Tuple=None , __lowercase : List[str]=False , __lowercase : List[str]="utf8" , __lowercase : Union[str, Any]="[UNK]" , __lowercase : List[str]="[SEP]" , __lowercase : Optional[Any]="[PAD]" , __lowercase : Any="[CLS]" , __lowercase : Any="[MASK]" , __lowercase : Optional[Dict[str, Any]] = None , **__lowercase : Tuple , ): """simple docstring""" snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , vocab_file=__lowercase , encoding=__lowercase , sp_model_kwargs=self.sp_model_kwargs , **__lowercase , ) snake_case_ = do_lower_case snake_case_ = sentencepiece_model_ckpt snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__lowercase ) # to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning if vocab_file is not None: snake_case_ = self.load_vocab(filepath=__lowercase ) else: snake_case_ = {self.sp_model.id_to_piece(__lowercase ): id for id in range(self.sp_model.get_piece_size() )} snake_case_ = {v: k for k, v in self.vocab.items()} def snake_case__ ( self : Dict , __lowercase : Optional[int] ): """simple docstring""" if text is None: return None snake_case_ = self.tokenize(__lowercase ) snake_case_ , snake_case_ = "", [] for i, ch in enumerate(__lowercase ): if ch in self.SP_CHAR_MAPPING: snake_case_ = self.SP_CHAR_MAPPING.get(__lowercase ) else: snake_case_ = unicodedata.normalize("NFKC" , __lowercase ) if self.is_whitespace(__lowercase ): continue normalized_text += ch char_mapping.extend([i] * len(__lowercase ) ) snake_case_ , snake_case_ , snake_case_ = normalized_text, [], 0 if self.do_lower_case: snake_case_ = text.lower() for token in split_tokens: if token[:1] == "▁": snake_case_ = token[1:] snake_case_ = text[offset:].index(__lowercase ) + offset snake_case_ = start + len(__lowercase ) token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) ) snake_case_ = end return token_mapping @property def snake_case__ ( self : Optional[int] ): """simple docstring""" return len(self.vocab ) def snake_case__ ( self : Any ): """simple docstring""" return dict(self.vocab , **self.added_tokens_encoder ) def __getstate__( self : List[str] ): """simple docstring""" snake_case_ = self.__dict__.copy() snake_case_ = None return state def __setstate__( self : str , __lowercase : str ): """simple docstring""" snake_case_ = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): snake_case_ = {} snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.sentencepiece_model_ckpt ) def snake_case__ ( self : int , __lowercase : Optional[Any] ): """simple docstring""" return "".join((self.SP_CHAR_MAPPING.get(__lowercase , __lowercase ) for c in text) ) def snake_case__ ( self : List[str] , __lowercase : int , __lowercase : Any=False , __lowercase : str=64 , __lowercase : Optional[Any]=0.1 ): """simple docstring""" if self.sp_model_kwargs.get("enable_sampling" ) is True: snake_case_ = True if self.sp_model_kwargs.get("alpha" ) is not None: snake_case_ = self.sp_model_kwargs.get("alpha" ) if self.sp_model_kwargs.get("nbest_size" ) is not None: snake_case_ = self.sp_model_kwargs.get("nbest_size" ) if not enable_sampling: snake_case_ = self.sp_model.EncodeAsPieces(__lowercase ) else: snake_case_ = self.sp_model.SampleEncodeAsPieces(__lowercase , __lowercase , __lowercase ) snake_case_ = [] for pi, piece in enumerate(__lowercase ): if piece == SPIECE_UNDERLINE: if not pieces[pi + 1].startswith(__lowercase ) and pi != 0: new_pieces.append(__lowercase ) continue else: continue snake_case_ = 0 for i, chunk in enumerate(__lowercase ): if chunk == SPIECE_UNDERLINE: continue if self.is_ch_char(__lowercase ) or self.is_punct(__lowercase ): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) new_pieces.append(__lowercase ) snake_case_ = i + 1 elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) snake_case_ = i elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) snake_case_ = i if len(__lowercase ) > lst_i: new_pieces.append(piece[lst_i:] ) return new_pieces def snake_case__ ( self : List[Any] , __lowercase : Dict ): """simple docstring""" snake_case_ = "".join(__lowercase ).replace(__lowercase , " " ).strip() return out_string def snake_case__ ( self : int , __lowercase : int ): """simple docstring""" snake_case_ = self.convert_ids_to_tokens(__lowercase ) snake_case_ = "".join(__lowercase ).replace(__lowercase , " " ).strip() return out_string def snake_case__ ( self : Dict , __lowercase : Any ): """simple docstring""" return self.vocab.get(__lowercase , self.vocab.get(self.unk_token ) ) def snake_case__ ( self : str , __lowercase : List[Any] ): """simple docstring""" return self.reverse_vocab.get(__lowercase , self.unk_token ) def snake_case__ ( self : Optional[Any] , __lowercase : Union[str, Any] , __lowercase : int=None ): """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] snake_case_ = [self.cls_token_id] snake_case_ = [self.sep_token_id] return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep def snake_case__ ( self : str , __lowercase : List[str] , __lowercase : Any=None ): """simple docstring""" if offset_mapping_a is None: return [(0, 0)] + offset_mapping_a + [(0, 0)] return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)] def snake_case__ ( self : Dict , __lowercase : List[Any] , __lowercase : List[Any]=None , __lowercase : Dict=False ): """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model." ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(__lowercase )) + [1, 1] + ([0] * len(__lowercase )) + [1] return [1] + ([0] * len(__lowercase )) + [1] def snake_case__ ( self : Optional[int] , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ): """simple docstring""" if token_ids_a is None: # [CLS] X [SEP] return (len(__lowercase ) + 2) * [0] # [CLS] A [SEP] [SEP] B [SEP] return [0] * (len(__lowercase ) + 1) + [1] * (len(__lowercase ) + 3) def snake_case__ ( self : Any , __lowercase : Union[str, Any] ): """simple docstring""" if "\u4e00" <= char <= "\u9fff": return True return False def snake_case__ ( self : List[str] , __lowercase : Any ): """simple docstring""" if ("a" <= char <= "z") or ("A" <= char <= "Z"): return True return False def snake_case__ ( self : int , __lowercase : Dict ): """simple docstring""" if char in ",;:.?!~,;:。?!《》【】": return True return False def snake_case__ ( self : Union[str, Any] , __lowercase : Union[str, Any] ): """simple docstring""" if char == " " or char == "\t" or char == "\n" or char == "\r": return True if len(__lowercase ) == 1: snake_case_ = unicodedata.category(__lowercase ) if cat == "Zs": return True return False def snake_case__ ( self : Dict , __lowercase : Optional[Any] ): """simple docstring""" snake_case_ = {} with io.open(__lowercase , "r" , encoding="utf-8" ) as f: for index, line in enumerate(__lowercase ): snake_case_ = line.rstrip("\n" ) snake_case_ = int(__lowercase ) return token_to_idx def snake_case__ ( self : Dict , __lowercase : str , __lowercase : Optional[str] = None ): """simple docstring""" snake_case_ = 0 if os.path.isdir(__lowercase ): snake_case_ = os.path.join( __lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) else: snake_case_ = (filename_prefix + "-" if filename_prefix else "") + save_directory with open(__lowercase , "w" , encoding="utf-8" ) as writer: for token, token_index in sorted(self.vocab.items() , key=lambda __lowercase : kv[1] ): if index != token_index: logger.warning( f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive." " Please check that the vocabulary is not corrupted!" ) snake_case_ = token_index writer.write(token + "\n" ) index += 1 snake_case_ = os.path.join(__lowercase , "sentencepiece.bpe.model" ) with open(__lowercase , "wb" ) as fi: snake_case_ = self.sp_model.serialized_model_proto() fi.write(__lowercase ) return (vocab_file,)
187
0
from math import ceil, sqrt def __lowercase ( __lowerCAmelCase : int = 1_0_0_0_0_0_0 ): a__ = 0 for outer_width in range(3 , (limit // 4) + 2 ): if outer_width**2 > limit: a__ = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 ) else: a__ = 1 if (outer_width - hole_width_lower_bound) % 2: hole_width_lower_bound += 1 answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1 return answer if __name__ == "__main__": print(f"""{solution() = }""")
363
from math import ceil, sqrt def __lowercase ( __lowerCAmelCase : int = 1_0_0_0_0_0_0 ): a__ = 0 for outer_width in range(3 , (limit // 4) + 2 ): if outer_width**2 > limit: a__ = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 ) else: a__ = 1 if (outer_width - hole_width_lower_bound) % 2: hole_width_lower_bound += 1 answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1 return answer if __name__ == "__main__": print(f"""{solution() = }""")
109
0
"""simple docstring""" import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : str ): __lowercase = 3 __lowercase = 2_5_0 __lowercase = ids_tensor((batch_size, length), UpperCAmelCase__ ) __lowercase = torch.ones((batch_size, length), device=UpperCAmelCase__, dtype=torch.float ) / length return input_ids, scores def _lowercase ( self : List[Any] ): __lowercase ,__lowercase = self._get_tensors(5 ) __lowercase = StoppingCriteriaList( [ MaxLengthCriteria(max_length=1_0 ), MaxTimeCriteria(max_time=0.1 ), ] ) self.assertFalse(criteria(UpperCAmelCase__, UpperCAmelCase__ ) ) __lowercase ,__lowercase = self._get_tensors(9 ) self.assertFalse(criteria(UpperCAmelCase__, UpperCAmelCase__ ) ) __lowercase ,__lowercase = self._get_tensors(1_0 ) self.assertTrue(criteria(UpperCAmelCase__, UpperCAmelCase__ ) ) def _lowercase ( self : Any ): __lowercase = MaxLengthCriteria(max_length=1_0 ) __lowercase ,__lowercase = self._get_tensors(5 ) self.assertFalse(criteria(UpperCAmelCase__, UpperCAmelCase__ ) ) __lowercase ,__lowercase = self._get_tensors(9 ) self.assertFalse(criteria(UpperCAmelCase__, UpperCAmelCase__ ) ) __lowercase ,__lowercase = self._get_tensors(1_0 ) self.assertTrue(criteria(UpperCAmelCase__, UpperCAmelCase__ ) ) def _lowercase ( self : List[Any] ): __lowercase = MaxNewTokensCriteria(start_length=5, max_new_tokens=5 ) __lowercase ,__lowercase = self._get_tensors(5 ) self.assertFalse(criteria(UpperCAmelCase__, UpperCAmelCase__ ) ) __lowercase ,__lowercase = self._get_tensors(9 ) self.assertFalse(criteria(UpperCAmelCase__, UpperCAmelCase__ ) ) __lowercase ,__lowercase = self._get_tensors(1_0 ) self.assertTrue(criteria(UpperCAmelCase__, UpperCAmelCase__ ) ) __lowercase = StoppingCriteriaList([criteria] ) self.assertEqual(criteria_list.max_length, 1_0 ) def _lowercase ( self : Optional[int] ): __lowercase ,__lowercase = self._get_tensors(5 ) __lowercase = MaxTimeCriteria(max_time=0.1 ) self.assertFalse(criteria(UpperCAmelCase__, UpperCAmelCase__ ) ) __lowercase = MaxTimeCriteria(max_time=0.1, initial_timestamp=time.time() - 0.2 ) self.assertTrue(criteria(UpperCAmelCase__, UpperCAmelCase__ ) ) def _lowercase ( self : int ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ), 1_0 ) with self.assertWarns(UpperCAmelCase__ ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ), 1_1 ) __lowercase = validate_stopping_criteria(StoppingCriteriaList(), 1_1 ) self.assertEqual(len(UpperCAmelCase__ ), 1 )
17
"""simple docstring""" import inspect import unittest import numpy as np from transformers import BeitConfig from transformers.testing_utils import require_flax, require_vision, slow from transformers.utils import cached_property, is_flax_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor if is_flax_available(): import jax from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : str, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : int=1_0_0, UpperCAmelCase__ : Any=1_3, UpperCAmelCase__ : List[Any]=3_0, UpperCAmelCase__ : Dict=2, UpperCAmelCase__ : Any=3, UpperCAmelCase__ : Optional[Any]=True, UpperCAmelCase__ : List[str]=True, UpperCAmelCase__ : Optional[Any]=3_2, UpperCAmelCase__ : Any=5, UpperCAmelCase__ : Any=4, UpperCAmelCase__ : Any=3_7, UpperCAmelCase__ : Optional[int]="gelu", UpperCAmelCase__ : Dict=0.1, UpperCAmelCase__ : Optional[int]=0.1, UpperCAmelCase__ : Dict=1_0, UpperCAmelCase__ : Tuple=0.02, UpperCAmelCase__ : List[Any]=3, ): __lowercase = parent __lowercase = vocab_size __lowercase = batch_size __lowercase = image_size __lowercase = patch_size __lowercase = num_channels __lowercase = is_training __lowercase = use_labels __lowercase = hidden_size __lowercase = num_hidden_layers __lowercase = num_attention_heads __lowercase = intermediate_size __lowercase = hidden_act __lowercase = hidden_dropout_prob __lowercase = attention_probs_dropout_prob __lowercase = type_sequence_label_size __lowercase = initializer_range # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) __lowercase = (image_size // patch_size) ** 2 __lowercase = num_patches + 1 def _lowercase ( self : int ): __lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowercase = None if self.use_labels: __lowercase = ids_tensor([self.batch_size], self.type_sequence_label_size ) __lowercase = BeitConfig( vocab_size=self.vocab_size, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=UpperCAmelCase__, initializer_range=self.initializer_range, ) return config, pixel_values, labels def _lowercase ( self : Any, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : List[str], UpperCAmelCase__ : List[str] ): __lowercase = FlaxBeitModel(config=UpperCAmelCase__ ) __lowercase = model(UpperCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : int, UpperCAmelCase__ : List[Any] ): __lowercase = FlaxBeitForMaskedImageModeling(config=UpperCAmelCase__ ) __lowercase = model(UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length - 1, self.vocab_size) ) def _lowercase ( self : Dict, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Any, UpperCAmelCase__ : Union[str, Any] ): __lowercase = self.type_sequence_label_size __lowercase = FlaxBeitForImageClassification(config=UpperCAmelCase__ ) __lowercase = model(UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) ) # test greyscale images __lowercase = 1 __lowercase = FlaxBeitForImageClassification(UpperCAmelCase__ ) __lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __lowercase = model(UpperCAmelCase__ ) def _lowercase ( self : List[str] ): __lowercase = self.prepare_config_and_inputs() ( ( __lowercase ) ,( __lowercase ) ,( __lowercase ) , ) = config_and_inputs __lowercase = {"pixel_values": pixel_values} return config, inputs_dict @require_flax class _lowerCAmelCase ( lowercase ,unittest.TestCase ): """simple docstring""" __UpperCAmelCase : str = ( (FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else () ) def _lowercase ( self : List[Any] ): __lowercase = FlaxBeitModelTester(self ) __lowercase = ConfigTester(self, config_class=UpperCAmelCase__, has_text_modality=UpperCAmelCase__, hidden_size=3_7 ) def _lowercase ( self : Union[str, Any] ): self.config_tester.run_common_tests() def _lowercase ( self : Optional[int] ): __lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase = model_class(UpperCAmelCase__ ) __lowercase = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowercase = [*signature.parameters.keys()] __lowercase = ["pixel_values"] self.assertListEqual(arg_names[:1], UpperCAmelCase__ ) def _lowercase ( self : Tuple ): __lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __lowercase = self._prepare_for_class(UpperCAmelCase__, UpperCAmelCase__ ) __lowercase = model_class(UpperCAmelCase__ ) @jax.jit def model_jitted(UpperCAmelCase__ : str, **UpperCAmelCase__ : Dict ): return model(pixel_values=UpperCAmelCase__, **UpperCAmelCase__ ) with self.subTest("JIT Enabled" ): __lowercase = model_jitted(**UpperCAmelCase__ ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): __lowercase = model_jitted(**UpperCAmelCase__ ).to_tuple() self.assertEqual(len(UpperCAmelCase__ ), len(UpperCAmelCase__ ) ) for jitted_output, output in zip(UpperCAmelCase__, UpperCAmelCase__ ): self.assertEqual(jitted_output.shape, output.shape ) def _lowercase ( self : List[str] ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def _lowercase ( self : int ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ ) def _lowercase ( self : Tuple ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ ) @slow def _lowercase ( self : Union[str, Any] ): for model_class_name in self.all_model_classes: __lowercase = model_class_name.from_pretrained("microsoft/beit-base-patch16-224" ) __lowercase = model(np.ones((1, 3, 2_2_4, 2_2_4) ) ) self.assertIsNotNone(UpperCAmelCase__ ) def _A ( ) -> str: '''simple docstring''' __lowercase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_vision @require_flax class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @cached_property def _lowercase ( self : Optional[int] ): return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None @slow def _lowercase ( self : Any ): __lowercase = FlaxBeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ) __lowercase = self.default_image_processor __lowercase = prepare_img() __lowercase = image_processor(images=UpperCAmelCase__, return_tensors="np" ).pixel_values # prepare bool_masked_pos __lowercase = np.ones((1, 1_9_6), dtype=UpperCAmelCase__ ) # forward pass __lowercase = model(pixel_values=UpperCAmelCase__, bool_masked_pos=UpperCAmelCase__ ) __lowercase = outputs.logits # verify the logits __lowercase = (1, 1_9_6, 8_1_9_2) self.assertEqual(logits.shape, UpperCAmelCase__ ) __lowercase = np.array( [[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ) self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3], UpperCAmelCase__, atol=1E-2 ) ) @slow def _lowercase ( self : Any ): __lowercase = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ) __lowercase = self.default_image_processor __lowercase = prepare_img() __lowercase = image_processor(images=UpperCAmelCase__, return_tensors="np" ) # forward pass __lowercase = model(**UpperCAmelCase__ ) __lowercase = outputs.logits # verify the logits __lowercase = (1, 1_0_0_0) self.assertEqual(logits.shape, UpperCAmelCase__ ) __lowercase = np.array([-1.2_385, -1.0_987, -1.0_108] ) self.assertTrue(np.allclose(logits[0, :3], UpperCAmelCase__, atol=1E-4 ) ) __lowercase = 2_8_1 self.assertEqual(logits.argmax(-1 ).item(), UpperCAmelCase__ ) @slow def _lowercase ( self : List[str] ): __lowercase = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ) __lowercase = self.default_image_processor __lowercase = prepare_img() __lowercase = image_processor(images=UpperCAmelCase__, return_tensors="np" ) # forward pass __lowercase = model(**UpperCAmelCase__ ) __lowercase = outputs.logits # verify the logits __lowercase = (1, 2_1_8_4_1) self.assertEqual(logits.shape, UpperCAmelCase__ ) __lowercase = np.array([1.6_881, -0.2_787, 0.5_901] ) self.assertTrue(np.allclose(logits[0, :3], UpperCAmelCase__, atol=1E-4 ) ) __lowercase = 2_3_9_6 self.assertEqual(logits.argmax(-1 ).item(), UpperCAmelCase__ )
17
1
def _A ( lowerCAmelCase_ : str , lowerCAmelCase_ : int ): """simple docstring""" return [sentence[i : i + ngram_size] for i in range(len(lowerCAmelCase_ ) - ngram_size + 1 )] if __name__ == "__main__": from doctest import testmod testmod()
362
import collections.abc from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_poolformer import PoolFormerConfig UpperCamelCase = logging.get_logger(__name__) # General docstring UpperCamelCase = 'PoolFormerConfig' # Base docstring UpperCamelCase = 'sail/poolformer_s12' UpperCamelCase = [1, 512, 7, 7] # Image classification docstring UpperCamelCase = 'sail/poolformer_s12' UpperCamelCase = 'tabby, tabby cat' UpperCamelCase = [ 'sail/poolformer_s12', # See all PoolFormer models at https://huggingface.co/models?filter=poolformer ] def _A ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : bool = False ): """simple docstring""" if drop_prob == 0.0 or not training: return input lowerCAmelCase__ = 1 - drop_prob lowerCAmelCase__ = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets lowerCAmelCase__ = keep_prob + torch.rand(lowerCAmelCase_ , dtype=input.dtype , device=input.device ) random_tensor.floor_() # binarize lowerCAmelCase__ = input.div(lowerCAmelCase_ ) * random_tensor return output class __lowerCamelCase ( nn.Module ): """simple docstring""" def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[float] = None ) -> None: super().__init__() lowerCAmelCase__ = drop_prob def a ( self : str , SCREAMING_SNAKE_CASE__ : torch.Tensor ) -> torch.Tensor: return drop_path(SCREAMING_SNAKE_CASE__ , self.drop_prob , self.training ) def a ( self : Optional[Any] ) -> str: return "p={}".format(self.drop_prob ) class __lowerCamelCase ( nn.Module ): """simple docstring""" def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str=None ) -> Optional[Any]: super().__init__() lowerCAmelCase__ = patch_size if isinstance(SCREAMING_SNAKE_CASE__ , collections.abc.Iterable ) else (patch_size, patch_size) lowerCAmelCase__ = stride if isinstance(SCREAMING_SNAKE_CASE__ , collections.abc.Iterable ) else (stride, stride) lowerCAmelCase__ = padding if isinstance(SCREAMING_SNAKE_CASE__ , collections.abc.Iterable ) else (padding, padding) lowerCAmelCase__ = nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = norm_layer(SCREAMING_SNAKE_CASE__ ) if norm_layer else nn.Identity() def a ( self : int , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Union[str, Any]: lowerCAmelCase__ = self.projection(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = self.norm(SCREAMING_SNAKE_CASE__ ) return embeddings class __lowerCamelCase ( nn.GroupNorm ): """simple docstring""" def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Any ) -> Dict: super().__init__(1 , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) class __lowerCamelCase ( nn.Module ): """simple docstring""" def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int ) -> List[Any]: super().__init__() lowerCAmelCase__ = nn.AvgPoolad(SCREAMING_SNAKE_CASE__ , stride=1 , padding=pool_size // 2 , count_include_pad=SCREAMING_SNAKE_CASE__ ) def a ( self : int , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[Any]: return self.pool(SCREAMING_SNAKE_CASE__ ) - hidden_states class __lowerCamelCase ( nn.Module ): """simple docstring""" def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple ) -> Dict: super().__init__() lowerCAmelCase__ = nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 ) lowerCAmelCase__ = nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 ) lowerCAmelCase__ = PoolFormerDropPath(SCREAMING_SNAKE_CASE__ ) if isinstance(config.hidden_act , SCREAMING_SNAKE_CASE__ ): lowerCAmelCase__ = ACTaFN[config.hidden_act] else: lowerCAmelCase__ = config.hidden_act def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[Any]: lowerCAmelCase__ = self.conva(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = self.act_fn(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = self.drop(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = self.conva(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = self.drop(SCREAMING_SNAKE_CASE__ ) return hidden_states class __lowerCamelCase ( nn.Module ): """simple docstring""" def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int ) -> Optional[Any]: super().__init__() lowerCAmelCase__ = PoolFormerPooling(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = PoolFormerOutput(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = PoolFormerGroupNorm(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = PoolFormerGroupNorm(SCREAMING_SNAKE_CASE__ ) # Useful for training neural nets lowerCAmelCase__ = PoolFormerDropPath(SCREAMING_SNAKE_CASE__ ) if drop_path > 0.0 else nn.Identity() lowerCAmelCase__ = config.use_layer_scale if config.use_layer_scale: lowerCAmelCase__ = nn.Parameter( config.layer_scale_init_value * torch.ones((SCREAMING_SNAKE_CASE__) ) , requires_grad=SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = nn.Parameter( config.layer_scale_init_value * torch.ones((SCREAMING_SNAKE_CASE__) ) , requires_grad=SCREAMING_SNAKE_CASE__ ) def a ( self : Dict , SCREAMING_SNAKE_CASE__ : str ) -> int: if self.use_layer_scale: lowerCAmelCase__ = self.pooling(self.before_norm(SCREAMING_SNAKE_CASE__ ) ) lowerCAmelCase__ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output # First residual connection lowerCAmelCase__ = hidden_states + self.drop_path(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = () lowerCAmelCase__ = self.output(self.after_norm(SCREAMING_SNAKE_CASE__ ) ) lowerCAmelCase__ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output # Second residual connection lowerCAmelCase__ = hidden_states + self.drop_path(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = (output,) + outputs return outputs else: lowerCAmelCase__ = self.drop_path(self.pooling(self.before_norm(SCREAMING_SNAKE_CASE__ ) ) ) # First residual connection lowerCAmelCase__ = pooling_output + hidden_states lowerCAmelCase__ = () # Second residual connection inside the PoolFormerOutput block lowerCAmelCase__ = self.drop_path(self.output(self.after_norm(SCREAMING_SNAKE_CASE__ ) ) ) lowerCAmelCase__ = hidden_states + layer_output lowerCAmelCase__ = (output,) + outputs return outputs class __lowerCamelCase ( nn.Module ): """simple docstring""" def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict ) -> Any: super().__init__() lowerCAmelCase__ = config # stochastic depth decay rule lowerCAmelCase__ = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )] # patch embeddings lowerCAmelCase__ = [] for i in range(config.num_encoder_blocks ): embeddings.append( PoolFormerEmbeddings( patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) ) lowerCAmelCase__ = nn.ModuleList(SCREAMING_SNAKE_CASE__ ) # Transformer blocks lowerCAmelCase__ = [] lowerCAmelCase__ = 0 for i in range(config.num_encoder_blocks ): # each block consists of layers lowerCAmelCase__ = [] if i != 0: cur += config.depths[i - 1] for j in range(config.depths[i] ): layers.append( PoolFormerLayer( SCREAMING_SNAKE_CASE__ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) ) blocks.append(nn.ModuleList(SCREAMING_SNAKE_CASE__ ) ) lowerCAmelCase__ = nn.ModuleList(SCREAMING_SNAKE_CASE__ ) def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int=False , SCREAMING_SNAKE_CASE__ : List[str]=True ) -> Dict: lowerCAmelCase__ = () if output_hidden_states else None lowerCAmelCase__ = pixel_values for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ): lowerCAmelCase__ , lowerCAmelCase__ = layers # Get patch embeddings from hidden_states lowerCAmelCase__ = embedding_layer(SCREAMING_SNAKE_CASE__ ) # Send the embeddings through the blocks for _, blk in enumerate(SCREAMING_SNAKE_CASE__ ): lowerCAmelCase__ = blk(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = layer_outputs[0] if output_hidden_states: lowerCAmelCase__ = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=SCREAMING_SNAKE_CASE__ , hidden_states=SCREAMING_SNAKE_CASE__ ) class __lowerCamelCase ( UpperCamelCase__ ): """simple docstring""" snake_case__ = PoolFormerConfig snake_case__ = "poolformer" snake_case__ = "pixel_values" snake_case__ = True def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int ) -> List[Any]: if isinstance(SCREAMING_SNAKE_CASE__ , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(SCREAMING_SNAKE_CASE__ , nn.LayerNorm ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) def a ( self : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ) -> Tuple: if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowerCAmelCase__ = value UpperCamelCase = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' UpperCamelCase = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n' @add_start_docstrings( "The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." , UpperCamelCase__ , ) class __lowerCamelCase ( UpperCamelCase__ ): """simple docstring""" def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : int ) -> Optional[Any]: super().__init__(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = config lowerCAmelCase__ = PoolFormerEncoder(SCREAMING_SNAKE_CASE__ ) # Initialize weights and apply final processing self.post_init() def a ( self : Optional[int] ) -> Optional[Any]: return self.embeddings.patch_embeddings @add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]: lowerCAmelCase__ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values" ) lowerCAmelCase__ = self.encoder( SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ , ) lowerCAmelCase__ = encoder_outputs[0] if not return_dict: return (sequence_output, None) + encoder_outputs[1:] return BaseModelOutputWithNoAttention( last_hidden_state=SCREAMING_SNAKE_CASE__ , hidden_states=encoder_outputs.hidden_states , ) class __lowerCamelCase ( nn.Module ): """simple docstring""" def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[str]: super().__init__() lowerCAmelCase__ = nn.Linear(config.hidden_size , config.hidden_size ) def a ( self : Any , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> int: lowerCAmelCase__ = self.dense(SCREAMING_SNAKE_CASE__ ) return output @add_start_docstrings( "\n PoolFormer Model transformer with an image classification head on top\n " , UpperCamelCase__ , ) class __lowerCamelCase ( UpperCamelCase__ ): """simple docstring""" def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[int]: super().__init__(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = config.num_labels lowerCAmelCase__ = PoolFormerModel(SCREAMING_SNAKE_CASE__ ) # Final norm lowerCAmelCase__ = PoolFormerGroupNorm(config.hidden_sizes[-1] ) # Classifier head lowerCAmelCase__ = ( nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.LongTensor] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]: lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict lowerCAmelCase__ = self.poolformer( SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ , ) lowerCAmelCase__ = outputs[0] lowerCAmelCase__ = self.classifier(self.norm(SCREAMING_SNAKE_CASE__ ).mean([-2, -1] ) ) lowerCAmelCase__ = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: lowerCAmelCase__ = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): lowerCAmelCase__ = "single_label_classification" else: lowerCAmelCase__ = "multi_label_classification" if self.config.problem_type == "regression": lowerCAmelCase__ = MSELoss() if self.num_labels == 1: lowerCAmelCase__ = loss_fct(logits.squeeze() , labels.squeeze() ) else: lowerCAmelCase__ = loss_fct(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) elif self.config.problem_type == "single_label_classification": lowerCAmelCase__ = CrossEntropyLoss() lowerCAmelCase__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": lowerCAmelCase__ = BCEWithLogitsLoss() lowerCAmelCase__ = loss_fct(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if not return_dict: lowerCAmelCase__ = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=SCREAMING_SNAKE_CASE__ , logits=SCREAMING_SNAKE_CASE__ , hidden_states=outputs.hidden_states )
221
0
"""simple docstring""" from collections import Counter from timeit import timeit def a_ ( _lowerCAmelCase : str = "" , ): '''simple docstring''' return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2 def a_ ( _lowerCAmelCase : str = "" ): '''simple docstring''' if len(_lowerCAmelCase ) == 0: return True lowercase__ : int = input_str.replace(' ' , '' ).lower() # character_freq_dict: Stores the frequency of every character in the input string lowercase__ : dict[str, int] = {} for character in lower_case_input_str: lowercase__ : Optional[Any] = character_freq_dict.get(_lowerCAmelCase , 0 ) + 1 lowercase__ : Any = 0 for character_count in character_freq_dict.values(): if character_count % 2: odd_char += 1 if odd_char > 1: return False return True def a_ ( _lowerCAmelCase : str = "" ): '''simple docstring''' print('\nFor string = ' , _lowerCAmelCase , ':' ) print( '> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(_lowerCAmelCase ) , '\ttime =' , timeit( 'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , ) print( '> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(_lowerCAmelCase ) , '\ttime =' , timeit( 'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , ) if __name__ == "__main__": _UpperCamelCase : int = input( "Enter string to determine if it can be rearranged as a palindrome or not: " ).strip() benchmark(check_str) _UpperCamelCase : List[Any] = can_string_be_rearranged_as_palindrome_counter(check_str) print(f'''{check_str} can {"" if status else "not "}be rearranged as a palindrome''')
77
"""simple docstring""" import argparse import json from tqdm import tqdm def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( '--src_path' , type=__snake_case , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , ) parser.add_argument( '--evaluation_set' , type=__snake_case , help='where to store parsed evaluation_set file' , ) parser.add_argument( '--gold_data_path' , type=__snake_case , help='where to store parsed gold_data_path file' , ) lowercase = parser.parse_args() with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open( args.gold_data_path , 'w' ) as gold_file: lowercase = json.load(__snake_case ) for dpr_record in tqdm(__snake_case ): lowercase = dpr_record['question'] lowercase = [context['title'] for context in dpr_record['positive_ctxs']] eval_file.write(question + '\n' ) gold_file.write('\t'.join(__snake_case ) + '\n' ) if __name__ == "__main__": main()
220
0
"""simple docstring""" import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def lowercase ( A_ , A_ , A_ , A_ , A_ )-> Optional[Any]: '''simple docstring''' a : Optional[int] = TapasConfig.from_json_file(A_ ) # set absolute/relative position embeddings parameter a : str = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": a : Dict = TapasForQuestionAnswering(config=A_ ) elif task == "WTQ": # run_task_main.py hparams a : Any = 4 a : Dict = True # hparam_utils.py hparams a : str = 0.6_6_4_6_9_4 a : Optional[int] = 0.2_0_7_9_5_1 a : Optional[Any] = 0.1_2_1_1_9_4 a : Union[str, Any] = True a : int = True a : Tuple = False a : Dict = 0.0_3_5_2_5_1_3 a : List[str] = TapasForQuestionAnswering(config=A_ ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams a : Union[str, Any] = 4 a : List[Any] = False # hparam_utils.py hparams a : Dict = 3_6.4_5_1_9 a : List[str] = 0.9_0_3_4_2_1 a : Optional[Any] = 2_2_2.0_8_8 a : Dict = True a : Union[str, Any] = True a : List[str] = True a : List[str] = 0.7_6_3_1_4_1 a : Any = TapasForQuestionAnswering(config=A_ ) elif task == "TABFACT": a : int = TapasForSequenceClassification(config=A_ ) elif task == "MLM": a : int = TapasForMaskedLM(config=A_ ) elif task == "INTERMEDIATE_PRETRAINING": a : List[Any] = TapasModel(config=A_ ) else: raise ValueError(F'''Task {task} not supported.''' ) print(F'''Building PyTorch model from configuration: {config}''' ) # Load weights from tf checkpoint load_tf_weights_in_tapas(A_ , A_ , A_ ) # Save pytorch-model (weights and configuration) print(F'''Save PyTorch model to {pytorch_dump_path}''' ) model.save_pretrained(A_ ) # Save tokenizer files print(F'''Save tokenizer files to {pytorch_dump_path}''' ) a : Optional[int] = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" , model_max_length=512 ) tokenizer.save_pretrained(A_ ) print("Used relative position embeddings:" , model.config.reset_position_index_per_cell ) if __name__ == "__main__": __lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA.""" ) parser.add_argument( """--reset_position_index_per_cell""", default=False, action="""store_true""", help="""Whether to use relative position embeddings or not. Defaults to True.""", ) parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--tapas_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained TAPAS model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) __lowercase = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
226
"""simple docstring""" from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from transformers.modeling_outputs import BaseModelOutput from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING __lowercase = logging.get_logger(__name__) @add_end_docstrings(_a ) class _A ( _a ): """simple docstring""" def __init__( self : List[Any] , **__UpperCAmelCase : List[Any]): super().__init__(**__UpperCAmelCase) if self.framework == "tf": raise ValueError(f'''The {self.__class__} is only available in PyTorch.''') requires_backends(self , "vision") self.check_model_type(__UpperCAmelCase) def __call__( self : str , __UpperCAmelCase : Union[str, "Image.Image", List[Dict[str, Any]]] , __UpperCAmelCase : Union[str, List[str]] = None , **__UpperCAmelCase : List[Any] , ): if "text_queries" in kwargs: a : List[Any] = kwargs.pop("text_queries") if isinstance(__UpperCAmelCase , (str, Image.Image)): a : Any = {"image": image, "candidate_labels": candidate_labels} else: a : Optional[int] = image a : Optional[int] = super().__call__(__UpperCAmelCase , **__UpperCAmelCase) return results def __snake_case ( self : Optional[int] , **__UpperCAmelCase : List[Any]): a : str = {} if "threshold" in kwargs: a : Dict = kwargs["threshold"] if "top_k" in kwargs: a : str = kwargs["top_k"] return {}, {}, postprocess_params def __snake_case ( self : List[Any] , __UpperCAmelCase : Optional[Any]): a : Union[str, Any] = load_image(inputs["image"]) a : Any = inputs["candidate_labels"] if isinstance(__UpperCAmelCase , __UpperCAmelCase): a : Optional[Any] = candidate_labels.split(",") a : Union[str, Any] = torch.tensor([[image.height, image.width]] , dtype=torch.intaa) for i, candidate_label in enumerate(__UpperCAmelCase): a : int = self.tokenizer(__UpperCAmelCase , return_tensors=self.framework) a : int = self.image_processor(__UpperCAmelCase , return_tensors=self.framework) yield { "is_last": i == len(__UpperCAmelCase) - 1, "target_size": target_size, "candidate_label": candidate_label, **text_inputs, **image_features, } def __snake_case ( self : Dict , __UpperCAmelCase : Optional[int]): a : List[Any] = model_inputs.pop("target_size") a : Optional[int] = model_inputs.pop("candidate_label") a : List[Any] = model_inputs.pop("is_last") a : List[Any] = self.model(**__UpperCAmelCase) a : Union[str, Any] = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs} return model_outputs def __snake_case ( self : str , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : List[str]=None): a : Dict = [] for model_output in model_outputs: a : int = model_output["candidate_label"] a : Any = BaseModelOutput(__UpperCAmelCase) a : Optional[Any] = self.image_processor.post_process_object_detection( outputs=__UpperCAmelCase , threshold=__UpperCAmelCase , target_sizes=model_output["target_size"])[0] for index in outputs["scores"].nonzero(): a : Any = outputs["scores"][index].item() a : str = self._get_bounding_box(outputs["boxes"][index][0]) a : Optional[Any] = {"score": score, "label": label, "box": box} results.append(__UpperCAmelCase) a : str = sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase: x["score"] , reverse=__UpperCAmelCase) if top_k: a : Union[str, Any] = results[:top_k] return results def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : "torch.Tensor"): if self.framework != "pt": raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch.") a , a , a , a : List[Any] = box.int().tolist() a : str = { "xmin": xmin, "ymin": ymin, "xmax": xmax, "ymax": ymax, } return bbox
226
1
import io import json import unittest from parameterized import parameterized from transformers import FSMTForConditionalGeneration, FSMTTokenizer from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device from utils import calculate_bleu A : List[Any] = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json" with io.open(filename, "r", encoding="utf-8") as f: A : str = json.load(f) @require_torch class lowerCamelCase (unittest.TestCase ): """simple docstring""" def __A ( self : Any , __magic_name__ : Dict ) -> int: return FSMTTokenizer.from_pretrained(__magic_name__ ) def __A ( self : List[Any] , __magic_name__ : Optional[Any] ) -> List[Any]: SCREAMING_SNAKE_CASE_ = FSMTForConditionalGeneration.from_pretrained(__magic_name__ ).to(__magic_name__ ) if torch_device == "cuda": model.half() return model @parameterized.expand( [ ["en-ru", 26.0], ["ru-en", 22.0], ["en-de", 22.0], ["de-en", 29.0], ] ) @slow def __A ( self : Any , __magic_name__ : List[Any] , __magic_name__ : int ) -> List[Any]: # note: this test is not testing the best performance since it only evals a small batch # but it should be enough to detect a regression in the output quality SCREAMING_SNAKE_CASE_ = F'''facebook/wmt19-{pair}''' SCREAMING_SNAKE_CASE_ = self.get_tokenizer(__magic_name__ ) SCREAMING_SNAKE_CASE_ = self.get_model(__magic_name__ ) SCREAMING_SNAKE_CASE_ = bleu_data[pair]["src"] SCREAMING_SNAKE_CASE_ = bleu_data[pair]["tgt"] SCREAMING_SNAKE_CASE_ = tokenizer(__magic_name__ , return_tensors="pt" , truncation=__magic_name__ , padding="longest" ).to(__magic_name__ ) SCREAMING_SNAKE_CASE_ = model.generate( input_ids=batch.input_ids , num_beams=8 , ) SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode( __magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ ) SCREAMING_SNAKE_CASE_ = calculate_bleu(__magic_name__ , __magic_name__ ) print(__magic_name__ ) self.assertGreaterEqual(scores["bleu"] , __magic_name__ )
118
from itertools import product from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros def a__ ( __UpperCamelCase , __UpperCamelCase ): SCREAMING_SNAKE_CASE_ = k_size // 2 SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = mgrid[0 - center : k_size - center, 0 - center : k_size - center] SCREAMING_SNAKE_CASE_ = 1 / (2 * pi * sigma) * exp(-(square(__UpperCamelCase ) + square(__UpperCamelCase )) / (2 * square(__UpperCamelCase )) ) return g def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = image.shape[0], image.shape[1] # dst image height and width SCREAMING_SNAKE_CASE_ = height - k_size + 1 SCREAMING_SNAKE_CASE_ = width - k_size + 1 # im2col, turn the k_size*k_size pixels into a row and np.vstack all rows SCREAMING_SNAKE_CASE_ = zeros((dst_height * dst_width, k_size * k_size) ) SCREAMING_SNAKE_CASE_ = 0 for i, j in product(range(__UpperCamelCase ) , range(__UpperCamelCase ) ): SCREAMING_SNAKE_CASE_ = ravel(image[i : i + k_size, j : j + k_size] ) SCREAMING_SNAKE_CASE_ = window row += 1 # turn the kernel into shape(k*k, 1) SCREAMING_SNAKE_CASE_ = gen_gaussian_kernel(__UpperCamelCase , __UpperCamelCase ) SCREAMING_SNAKE_CASE_ = ravel(__UpperCamelCase ) # reshape and get the dst image SCREAMING_SNAKE_CASE_ = dot(__UpperCamelCase , __UpperCamelCase ).reshape(__UpperCamelCase , __UpperCamelCase ).astype(__UpperCamelCase ) return dst if __name__ == "__main__": # read original image A : Tuple = imread(r"../image_data/lena.jpg") # turn image in gray scale value A : Optional[int] = cvtColor(img, COLOR_BGR2GRAY) # get values with two different mask size A : Tuple = gaussian_filter(gray, 3, sigma=1) A : Optional[int] = gaussian_filter(gray, 5, sigma=0.8) # show result images imshow("gaussian filter with 3x3 mask", gaussianaxa) imshow("gaussian filter with 5x5 mask", gaussianaxa) waitKey()
118
1
from __future__ import annotations from math import pow, sqrt def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> dict[str, float]: if (resistance, reactance, impedance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if resistance == 0: return {"resistance": sqrt(pow(snake_case__, 2 ) - pow(snake_case__, 2 ) )} elif reactance == 0: return {"reactance": sqrt(pow(snake_case__, 2 ) - pow(snake_case__, 2 ) )} elif impedance == 0: return {"impedance": sqrt(pow(snake_case__, 2 ) + pow(snake_case__, 2 ) )} else: raise ValueError("Exactly one argument must be 0" ) if __name__ == "__main__": import doctest doctest.testmod()
342
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTConfig, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _snake_case = logging.get_logger(__name__) def _UpperCamelCase ( snake_case__ ) -> int: __UpperCAmelCase : int = MobileViTConfig() # size of the architecture if "mobilevit_s" in mobilevit_name: __UpperCAmelCase : int = [144, 192, 240] __UpperCAmelCase : Optional[Any] = [16, 32, 64, 96, 128, 160, 640] elif "mobilevit_xs" in mobilevit_name: __UpperCAmelCase : Optional[Any] = [96, 120, 144] __UpperCAmelCase : Tuple = [16, 32, 48, 64, 80, 96, 384] elif "mobilevit_xxs" in mobilevit_name: __UpperCAmelCase : str = [64, 80, 96] __UpperCAmelCase : Optional[Any] = [16, 16, 24, 48, 64, 80, 320] __UpperCAmelCase : Tuple = 0.05 __UpperCAmelCase : Dict = 2.0 if mobilevit_name.startswith("deeplabv3_" ): __UpperCAmelCase : str = 512 __UpperCAmelCase : Any = 16 __UpperCAmelCase : str = 21 __UpperCAmelCase : Union[str, Any] = "pascal-voc-id2label.json" else: __UpperCAmelCase : Optional[Any] = 1000 __UpperCAmelCase : int = "imagenet-1k-id2label.json" __UpperCAmelCase : Dict = "huggingface/label-files" __UpperCAmelCase : int = json.load(open(hf_hub_download(snake_case__, snake_case__, repo_type="dataset" ), "r" ) ) __UpperCAmelCase : Any = {int(snake_case__ ): v for k, v in idalabel.items()} __UpperCAmelCase : int = idalabel __UpperCAmelCase : List[str] = {v: k for k, v in idalabel.items()} return config def _UpperCamelCase ( snake_case__, snake_case__=False ) -> Tuple: for i in range(1, 6 ): if f'''layer_{i}.''' in name: __UpperCAmelCase : Tuple = name.replace(f'''layer_{i}.''', f'''encoder.layer.{i - 1}.''' ) if "conv_1." in name: __UpperCAmelCase : Dict = name.replace("conv_1.", "conv_stem." ) if ".block." in name: __UpperCAmelCase : Optional[int] = name.replace(".block.", "." ) if "exp_1x1" in name: __UpperCAmelCase : Tuple = name.replace("exp_1x1", "expand_1x1" ) if "red_1x1" in name: __UpperCAmelCase : Optional[Any] = name.replace("red_1x1", "reduce_1x1" ) if ".local_rep.conv_3x3." in name: __UpperCAmelCase : Optional[int] = name.replace(".local_rep.conv_3x3.", ".conv_kxk." ) if ".local_rep.conv_1x1." in name: __UpperCAmelCase : Any = name.replace(".local_rep.conv_1x1.", ".conv_1x1." ) if ".norm." in name: __UpperCAmelCase : Dict = name.replace(".norm.", ".normalization." ) if ".conv." in name: __UpperCAmelCase : List[Any] = name.replace(".conv.", ".convolution." ) if ".conv_proj." in name: __UpperCAmelCase : List[str] = name.replace(".conv_proj.", ".conv_projection." ) for i in range(0, 2 ): for j in range(0, 4 ): if f'''.{i}.{j}.''' in name: __UpperCAmelCase : List[Any] = name.replace(f'''.{i}.{j}.''', f'''.{i}.layer.{j}.''' ) for i in range(2, 6 ): for j in range(0, 4 ): if f'''.{i}.{j}.''' in name: __UpperCAmelCase : Any = name.replace(f'''.{i}.{j}.''', f'''.{i}.''' ) if "expand_1x1" in name: __UpperCAmelCase : Optional[int] = name.replace("expand_1x1", "downsampling_layer.expand_1x1" ) if "conv_3x3" in name: __UpperCAmelCase : List[Any] = name.replace("conv_3x3", "downsampling_layer.conv_3x3" ) if "reduce_1x1" in name: __UpperCAmelCase : Dict = name.replace("reduce_1x1", "downsampling_layer.reduce_1x1" ) for i in range(2, 5 ): if f'''.global_rep.{i}.weight''' in name: __UpperCAmelCase : Any = name.replace(f'''.global_rep.{i}.weight''', ".layernorm.weight" ) if f'''.global_rep.{i}.bias''' in name: __UpperCAmelCase : Optional[Any] = name.replace(f'''.global_rep.{i}.bias''', ".layernorm.bias" ) if ".global_rep." in name: __UpperCAmelCase : Tuple = name.replace(".global_rep.", ".transformer." ) if ".pre_norm_mha.0." in name: __UpperCAmelCase : Optional[Any] = name.replace(".pre_norm_mha.0.", ".layernorm_before." ) if ".pre_norm_mha.1.out_proj." in name: __UpperCAmelCase : Tuple = name.replace(".pre_norm_mha.1.out_proj.", ".attention.output.dense." ) if ".pre_norm_ffn.0." in name: __UpperCAmelCase : Optional[Any] = name.replace(".pre_norm_ffn.0.", ".layernorm_after." ) if ".pre_norm_ffn.1." in name: __UpperCAmelCase : Dict = name.replace(".pre_norm_ffn.1.", ".intermediate.dense." ) if ".pre_norm_ffn.4." in name: __UpperCAmelCase : int = name.replace(".pre_norm_ffn.4.", ".output.dense." ) if ".transformer." in name: __UpperCAmelCase : Tuple = name.replace(".transformer.", ".transformer.layer." ) if ".aspp_layer." in name: __UpperCAmelCase : Any = name.replace(".aspp_layer.", "." ) if ".aspp_pool." in name: __UpperCAmelCase : Optional[Any] = name.replace(".aspp_pool.", "." ) if "seg_head." in name: __UpperCAmelCase : Optional[int] = name.replace("seg_head.", "segmentation_head." ) if "segmentation_head.classifier.classifier." in name: __UpperCAmelCase : str = name.replace("segmentation_head.classifier.classifier.", "segmentation_head.classifier." ) if "classifier.fc." in name: __UpperCAmelCase : Optional[Any] = name.replace("classifier.fc.", "classifier." ) elif (not base_model) and ("segmentation_head." not in name): __UpperCAmelCase : List[str] = "mobilevit." + name return name def _UpperCamelCase ( snake_case__, snake_case__, snake_case__=False ) -> Union[str, Any]: if base_model: __UpperCAmelCase : Optional[int] = "" else: __UpperCAmelCase : Tuple = "mobilevit." for key in orig_state_dict.copy().keys(): __UpperCAmelCase : Optional[int] = orig_state_dict.pop(snake_case__ ) if key[:8] == "encoder.": __UpperCAmelCase : str = key[8:] if "qkv" in key: __UpperCAmelCase : Tuple = key.split("." ) __UpperCAmelCase : List[Any] = int(key_split[0][6:] ) - 1 __UpperCAmelCase : Optional[Any] = int(key_split[3] ) __UpperCAmelCase : Tuple = model.get_submodule(f'''{model_prefix}encoder.layer.{layer_num}''' ) __UpperCAmelCase : List[str] = layer.transformer.layer[transformer_num].attention.attention.all_head_size __UpperCAmelCase : Optional[Any] = ( f'''{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.''' ) if "weight" in key: __UpperCAmelCase : Any = val[:dim, :] __UpperCAmelCase : Any = val[dim : dim * 2, :] __UpperCAmelCase : List[Any] = val[-dim:, :] else: __UpperCAmelCase : List[str] = val[:dim] __UpperCAmelCase : Optional[Any] = val[dim : dim * 2] __UpperCAmelCase : List[Any] = val[-dim:] else: __UpperCAmelCase : str = val return orig_state_dict def _UpperCamelCase ( ) -> Any: __UpperCAmelCase : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg" __UpperCAmelCase : List[str] = Image.open(requests.get(snake_case__, stream=snake_case__ ).raw ) return im @torch.no_grad() def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__=False ) -> Optional[Any]: __UpperCAmelCase : Tuple = get_mobilevit_config(snake_case__ ) # load original state_dict __UpperCAmelCase : str = torch.load(snake_case__, map_location="cpu" ) # load 🤗 model if mobilevit_name.startswith("deeplabv3_" ): __UpperCAmelCase : Optional[int] = MobileViTForSemanticSegmentation(snake_case__ ).eval() else: __UpperCAmelCase : List[Any] = MobileViTForImageClassification(snake_case__ ).eval() __UpperCAmelCase : Dict = convert_state_dict(snake_case__, snake_case__ ) model.load_state_dict(snake_case__ ) # Check outputs on an image, prepared by MobileViTImageProcessor __UpperCAmelCase : Optional[Any] = MobileViTImageProcessor(crop_size=config.image_size, size=config.image_size + 32 ) __UpperCAmelCase : Any = image_processor(images=prepare_img(), return_tensors="pt" ) __UpperCAmelCase : Dict = model(**snake_case__ ) __UpperCAmelCase : Tuple = outputs.logits if mobilevit_name.startswith("deeplabv3_" ): assert logits.shape == (1, 21, 32, 32) if mobilevit_name == "deeplabv3_mobilevit_s": __UpperCAmelCase : int = torch.tensor( [ [[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]], [[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]], [[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xs": __UpperCAmelCase : Tuple = torch.tensor( [ [[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]], [[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]], [[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xxs": __UpperCAmelCase : Any = torch.tensor( [ [[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]], [[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]], [[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]], ] ) else: raise ValueError(f'''Unknown mobilevit_name: {mobilevit_name}''' ) assert torch.allclose(logits[0, :3, :3, :3], snake_case__, atol=1e-4 ) else: assert logits.shape == (1, 1000) if mobilevit_name == "mobilevit_s": __UpperCAmelCase : str = torch.tensor([-0.9866, 0.2392, -1.1241] ) elif mobilevit_name == "mobilevit_xs": __UpperCAmelCase : Tuple = torch.tensor([-2.4761, -0.9399, -1.9587] ) elif mobilevit_name == "mobilevit_xxs": __UpperCAmelCase : Union[str, Any] = torch.tensor([-1.9364, -1.2327, -0.4653] ) else: raise ValueError(f'''Unknown mobilevit_name: {mobilevit_name}''' ) assert torch.allclose(logits[0, :3], snake_case__, atol=1e-4 ) Path(snake_case__ ).mkdir(exist_ok=snake_case__ ) print(f'''Saving model {mobilevit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(snake_case__ ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(snake_case__ ) if push_to_hub: __UpperCAmelCase : List[str] = { "mobilevit_s": "mobilevit-small", "mobilevit_xs": "mobilevit-x-small", "mobilevit_xxs": "mobilevit-xx-small", "deeplabv3_mobilevit_s": "deeplabv3-mobilevit-small", "deeplabv3_mobilevit_xs": "deeplabv3-mobilevit-x-small", "deeplabv3_mobilevit_xxs": "deeplabv3-mobilevit-xx-small", } print("Pushing to the hub..." ) __UpperCAmelCase : int = model_mapping[mobilevit_name] image_processor.push_to_hub(snake_case__, organization="apple" ) model.push_to_hub(snake_case__, organization="apple" ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--mobilevit_name''', default='''mobilevit_s''', type=str, help=( '''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\',''' ''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.''' ), ) parser.add_argument( '''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) _snake_case = parser.parse_args() convert_movilevit_checkpoint( args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
342
1
'''simple docstring''' from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging A =logging.get_logger(__name__) class _a ( __a ): __a : Dict = ["""pixel_values"""] def __init__( self : int , lowercase : bool = True , lowercase : Union[int, float] = 1 / 255 , lowercase : bool = True , lowercase : int = 8 , **lowercase : Optional[int] , ): '''simple docstring''' super().__init__(**lowercase ) UpperCAmelCase = do_rescale UpperCAmelCase = rescale_factor UpperCAmelCase = do_pad UpperCAmelCase = pad_size def A ( self : Any , lowercase : np.ndarray , lowercase : float , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : str ): '''simple docstring''' return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase ) def A ( self : Optional[int] , lowercase : np.ndarray , lowercase : int , lowercase : Optional[Union[str, ChannelDimension]] = None ): '''simple docstring''' UpperCAmelCase , UpperCAmelCase = get_image_size(lowercase ) UpperCAmelCase = (old_height // size + 1) * size - old_height UpperCAmelCase = (old_width // size + 1) * size - old_width return pad(lowercase , ((0, pad_height), (0, pad_width)) , mode='''symmetric''' , data_format=lowercase ) def A ( self : Union[str, Any] , lowercase : ImageInput , lowercase : Optional[bool] = None , lowercase : Optional[float] = None , lowercase : Optional[bool] = None , lowercase : Optional[int] = None , lowercase : Optional[Union[str, TensorType]] = None , lowercase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowercase : Optional[Any] , ): '''simple docstring''' UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase = do_pad if do_pad is not None else self.do_pad UpperCAmelCase = pad_size if pad_size is not None else self.pad_size UpperCAmelCase = make_list_of_images(lowercase ) if not valid_images(lowercase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) # All transformations expect numpy arrays. UpperCAmelCase = [to_numpy_array(lowercase ) for image in images] if do_rescale: UpperCAmelCase = [self.rescale(image=lowercase , scale=lowercase ) for image in images] if do_pad: UpperCAmelCase = [self.pad(lowercase , size=lowercase ) for image in images] UpperCAmelCase = [to_channel_dimension_format(lowercase , lowercase ) for image in images] UpperCAmelCase = {'''pixel_values''': images} return BatchFeature(data=lowercase , tensor_type=lowercase )
34
def _UpperCAmelCase (UpperCamelCase__ : str , UpperCamelCase__ : bool = False ): if not isinstance(UpperCamelCase__ , UpperCamelCase__ ): _A : Optional[Any] = f"Expected string as input, found {type(UpperCamelCase__ )}" raise ValueError(UpperCamelCase__ ) if not isinstance(UpperCamelCase__ , UpperCamelCase__ ): _A : Union[str, Any] = f"Expected boolean as use_pascal parameter, found {type(UpperCamelCase__ )}" raise ValueError(UpperCamelCase__ ) _A : int = input_str.split("_" ) _A : str = 0 if use_pascal else 1 _A : str = words[start_index:] _A : Optional[Any] = [word[0].upper() + word[1:] for word in words_to_capitalize] _A : Any = "" if use_pascal else words[0] return "".join([initial_word, *capitalized_words] ) if __name__ == "__main__": from doctest import testmod testmod()
11
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging A_ = logging.get_logger(__name__) A_ = { '''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''', # See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox } class __SCREAMING_SNAKE_CASE ( UpperCamelCase ): snake_case_ = 'gpt_neox' def __init__( self : Union[str, Any] , snake_case : List[str]=5_0432 , snake_case : int=6144 , snake_case : List[Any]=44 , snake_case : str=64 , snake_case : Optional[int]=2_4576 , snake_case : List[Any]="gelu" , snake_case : Optional[Any]=0.25 , snake_case : Optional[int]=1_0000 , snake_case : Union[str, Any]=0.0 , snake_case : str=0.0 , snake_case : Tuple=0.1 , snake_case : int=2048 , snake_case : Dict=0.02 , snake_case : Optional[int]=1e-5 , snake_case : Any=True , snake_case : int=0 , snake_case : str=2 , snake_case : Tuple=False , snake_case : Union[str, Any]=True , snake_case : List[Any]=None , **snake_case : List[str] , ): '''simple docstring''' super().__init__(bos_token_id=snake_case , eos_token_id=snake_case , **snake_case ) A__ : int = vocab_size A__ : Any = max_position_embeddings A__ : int = hidden_size A__ : int = num_hidden_layers A__ : Tuple = num_attention_heads A__ : Any = intermediate_size A__ : List[Any] = hidden_act A__ : List[Any] = rotary_pct A__ : Dict = rotary_emb_base A__ : Tuple = attention_dropout A__ : Optional[Any] = hidden_dropout A__ : Tuple = classifier_dropout A__ : List[Any] = initializer_range A__ : Union[str, Any] = layer_norm_eps A__ : Union[str, Any] = use_cache A__ : int = tie_word_embeddings A__ : List[str] = use_parallel_residual A__ : Optional[Any] = rope_scaling self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( """The hidden size is not divisble by the number of attention heads! Make sure to update them!""" ) def _UpperCamelCase ( self : str ): '''simple docstring''' if self.rope_scaling is None: return if not isinstance(self.rope_scaling , snake_case ) or len(self.rope_scaling ) != 2: raise ValueError( """`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """ F'got {self.rope_scaling}' ) A__ : List[Any] = self.rope_scaling.get("""type""" , snake_case ) A__ : Union[str, Any] = self.rope_scaling.get("""factor""" , snake_case ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' ) if rope_scaling_factor is None or not isinstance(snake_case , snake_case ) or rope_scaling_factor <= 1.0: raise ValueError(F'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
370
"""simple docstring""" import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor from transformers.utils import logging logging.set_verbosity_info() A_ = logging.get_logger(__name__) def _lowerCAmelCase ( UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Tuple=False ) ->str: A__ : Optional[int] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'blocks.{i}.norm1.weight', f'deit.encoder.layer.{i}.layernorm_before.weight') ) rename_keys.append((f'blocks.{i}.norm1.bias', f'deit.encoder.layer.{i}.layernorm_before.bias') ) rename_keys.append((f'blocks.{i}.attn.proj.weight', f'deit.encoder.layer.{i}.attention.output.dense.weight') ) rename_keys.append((f'blocks.{i}.attn.proj.bias', f'deit.encoder.layer.{i}.attention.output.dense.bias') ) rename_keys.append((f'blocks.{i}.norm2.weight', f'deit.encoder.layer.{i}.layernorm_after.weight') ) rename_keys.append((f'blocks.{i}.norm2.bias', f'deit.encoder.layer.{i}.layernorm_after.bias') ) rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'deit.encoder.layer.{i}.intermediate.dense.weight') ) rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'deit.encoder.layer.{i}.intermediate.dense.bias') ) rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'deit.encoder.layer.{i}.output.dense.weight') ) rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'deit.encoder.layer.{i}.output.dense.bias') ) # projection layer + position embeddings rename_keys.extend( [ ("""cls_token""", """deit.embeddings.cls_token"""), ("""dist_token""", """deit.embeddings.distillation_token"""), ("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""), ("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""), ("""pos_embed""", """deit.embeddings.position_embeddings"""), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""norm.weight""", """layernorm.weight"""), ("""norm.bias""", """layernorm.bias"""), ("""pre_logits.fc.weight""", """pooler.dense.weight"""), ("""pre_logits.fc.bias""", """pooler.dense.bias"""), ] ) # if just the base model, we should remove "deit" from all keys that start with "deit" A__ : Optional[int] = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys] else: # layernorm + classification heads rename_keys.extend( [ ("""norm.weight""", """deit.layernorm.weight"""), ("""norm.bias""", """deit.layernorm.bias"""), ("""head.weight""", """cls_classifier.weight"""), ("""head.bias""", """cls_classifier.bias"""), ("""head_dist.weight""", """distillation_classifier.weight"""), ("""head_dist.bias""", """distillation_classifier.bias"""), ] ) return rename_keys def _lowerCAmelCase ( UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Tuple, UpperCAmelCase__ : List[Any]=False ) ->str: for i in range(config.num_hidden_layers ): if base_model: A__ : Any = """""" else: A__ : Tuple = """deit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) A__ : Any = state_dict.pop(f'blocks.{i}.attn.qkv.weight' ) A__ : Tuple = state_dict.pop(f'blocks.{i}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict A__ : List[Any] = in_proj_weight[ : config.hidden_size, : ] A__ : str = in_proj_bias[: config.hidden_size] A__ : Any = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] A__ : Dict = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] A__ : Optional[Any] = in_proj_weight[ -config.hidden_size :, : ] A__ : Any = in_proj_bias[-config.hidden_size :] def _lowerCAmelCase ( UpperCAmelCase__ : List[Any], UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Union[str, Any] ) ->Any: A__ : int = dct.pop(UpperCAmelCase__ ) A__ : Tuple = val def _lowerCAmelCase ( ) ->List[Any]: A__ : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg""" A__ : int = Image.open(requests.get(UpperCAmelCase__, stream=UpperCAmelCase__ ).raw ) return im @torch.no_grad() def _lowerCAmelCase ( UpperCAmelCase__ : Dict, UpperCAmelCase__ : Any ) ->Tuple: A__ : List[Any] = DeiTConfig() # all deit models have fine-tuned heads A__ : Tuple = False # dataset (fine-tuned on ImageNet 2012), patch_size and image_size A__ : str = 1_0_0_0 A__ : List[str] = """huggingface/label-files""" A__ : Dict = """imagenet-1k-id2label.json""" A__ : List[str] = json.load(open(hf_hub_download(UpperCAmelCase__, UpperCAmelCase__, repo_type="""dataset""" ), """r""" ) ) A__ : Dict = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()} A__ : Optional[int] = idalabel A__ : Dict = {v: k for k, v in idalabel.items()} A__ : List[str] = int(deit_name[-6:-4] ) A__ : str = int(deit_name[-3:] ) # size of the architecture if deit_name[9:].startswith("""tiny""" ): A__ : List[str] = 1_9_2 A__ : int = 7_6_8 A__ : List[Any] = 1_2 A__ : Dict = 3 elif deit_name[9:].startswith("""small""" ): A__ : List[Any] = 3_8_4 A__ : List[str] = 1_5_3_6 A__ : Any = 1_2 A__ : Union[str, Any] = 6 if deit_name[9:].startswith("""base""" ): pass elif deit_name[4:].startswith("""large""" ): A__ : int = 1_0_2_4 A__ : str = 4_0_9_6 A__ : Any = 2_4 A__ : int = 1_6 # load original model from timm A__ : Dict = timm.create_model(UpperCAmelCase__, pretrained=UpperCAmelCase__ ) timm_model.eval() # load state_dict of original model, remove and rename some keys A__ : Tuple = timm_model.state_dict() A__ : str = create_rename_keys(UpperCAmelCase__, UpperCAmelCase__ ) for src, dest in rename_keys: rename_key(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) read_in_q_k_v(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) # load HuggingFace model A__ : str = DeiTForImageClassificationWithTeacher(UpperCAmelCase__ ).eval() model.load_state_dict(UpperCAmelCase__ ) # Check outputs on an image, prepared by DeiTImageProcessor A__ : int = int( (2_5_6 / 2_2_4) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103 A__ : Any = DeiTImageProcessor(size=UpperCAmelCase__, crop_size=config.image_size ) A__ : Union[str, Any] = image_processor(images=prepare_img(), return_tensors="""pt""" ) A__ : Optional[Any] = encoding["""pixel_values"""] A__ : Union[str, Any] = model(UpperCAmelCase__ ) A__ : Union[str, Any] = timm_model(UpperCAmelCase__ ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(UpperCAmelCase__, outputs.logits, atol=1e-3 ) Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ ) print(f'Saving model {deit_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(UpperCAmelCase__ ) print(f'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(UpperCAmelCase__ ) if __name__ == "__main__": A_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--deit_name''', default='''vit_deit_base_distilled_patch16_224''', type=str, help='''Name of the DeiT timm model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) A_ = parser.parse_args() convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
296
0
from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable _snake_case = {"""configuration_gpt_neox""": ["""GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXConfig"""]} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = ["""GPTNeoXTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ """GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST""", """GPTNeoXForCausalLM""", """GPTNeoXForQuestionAnswering""", """GPTNeoXForSequenceClassification""", """GPTNeoXForTokenClassification""", """GPTNeoXLayer""", """GPTNeoXModel""", """GPTNeoXPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox import ( GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXLayer, GPTNeoXModel, GPTNeoXPreTrainedModel, ) else: import sys _snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
283
'''simple docstring''' # using dfs for finding eulerian path traversal def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None )-> List[str]: '''simple docstring''' _UpperCAmelCase : Any = (path or []) + [u] for v in graph[u]: if visited_edge[u][v] is False: _UpperCAmelCase ,_UpperCAmelCase : Tuple = True, True _UpperCAmelCase : List[Any] = dfs(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) return path def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]: '''simple docstring''' _UpperCAmelCase : Optional[Any] = 0 _UpperCAmelCase : Optional[int] = -1 for i in range(lowerCAmelCase_ ): if i not in graph.keys(): continue if len(graph[i] ) % 2 == 1: odd_degree_nodes += 1 _UpperCAmelCase : Optional[int] = i if odd_degree_nodes == 0: return 1, odd_node if odd_degree_nodes == 2: return 2, odd_node return 3, odd_node def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[Any]: '''simple docstring''' _UpperCAmelCase : str = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )] _UpperCAmelCase ,_UpperCAmelCase : int = check_circuit_or_path(lowerCAmelCase_ , lowerCAmelCase_ ) if check == 3: print("""graph is not Eulerian""" ) print("""no path""" ) return _UpperCAmelCase : Dict = 1 if check == 2: _UpperCAmelCase : Dict = odd_node print("""graph has a Euler path""" ) if check == 1: print("""graph has a Euler cycle""" ) _UpperCAmelCase : Dict = dfs(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) print(lowerCAmelCase_ ) def snake_case_ ( )-> Union[str, Any]: '''simple docstring''' _UpperCAmelCase : Any = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]} _UpperCAmelCase : int = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]} _UpperCAmelCase : Tuple = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]} _UpperCAmelCase : List[Any] = {1: [2, 3], 2: [1, 3], 3: [1, 2]} _UpperCAmelCase : List[str] = { 1: [], 2: [] # all degree is zero } _UpperCAmelCase : Union[str, Any] = 10 check_euler(lowerCAmelCase_ , lowerCAmelCase_ ) check_euler(lowerCAmelCase_ , lowerCAmelCase_ ) check_euler(lowerCAmelCase_ , lowerCAmelCase_ ) check_euler(lowerCAmelCase_ , lowerCAmelCase_ ) check_euler(lowerCAmelCase_ , lowerCAmelCase_ ) if __name__ == "__main__": main()
215
0
'''simple docstring''' from __future__ import annotations def _lowerCAmelCase ( lowerCamelCase_ : list , lowerCamelCase_ : int ): # Checks if the entire collection has been sorted if len(lowerCamelCase_ ) <= 1 or n <= 1: return insert_next(lowerCamelCase_ , n - 1 ) rec_insertion_sort(lowerCamelCase_ , n - 1 ) def _lowerCAmelCase ( lowerCamelCase_ : list , lowerCamelCase_ : int ): # Checks order between adjacent elements if index >= len(lowerCamelCase_ ) or collection[index - 1] <= collection[index]: return # Swaps adjacent elements since they are not in ascending order __lowercase = ( collection[index], collection[index - 1], ) insert_next(lowerCamelCase_ , index + 1 ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = input('''Enter integers separated by spaces: ''') _SCREAMING_SNAKE_CASE = [int(num) for num in numbers.split()] rec_insertion_sort(number_list, len(number_list)) print(number_list)
360
'''simple docstring''' def _lowerCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : str ): __lowercase = len(lowerCamelCase_ ) + 1 __lowercase = len(lowerCamelCase_ ) + 1 # dp is a 2d matrix where dp[i][j] denotes whether prefix string of # length i of input_string matches with prefix string of length j of # given pattern. # "dp" stands for dynamic programming. __lowercase = [[0 for i in range(lowerCamelCase_ )] for j in range(lowerCamelCase_ )] # since string of zero length match pattern of zero length __lowercase = 1 # since pattern of zero length will never match with string of non-zero length for i in range(1 , lowerCamelCase_ ): __lowercase = 0 # since string of zero length will match with pattern where there # is at least one * alternatively for j in range(1 , lowerCamelCase_ ): __lowercase = dp[0][j - 2] if pattern[j - 1] == '''*''' else 0 # now using bottom-up approach to find for all remaining lengths for i in range(1 , lowerCamelCase_ ): for j in range(1 , lowerCamelCase_ ): if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".": __lowercase = dp[i - 1][j - 1] elif pattern[j - 1] == "*": if dp[i][j - 2] == 1: __lowercase = 1 elif pattern[j - 2] in (input_string[i - 1], "."): __lowercase = dp[i - 1][j] else: __lowercase = 0 else: __lowercase = 0 return bool(dp[-1][-1] ) if __name__ == "__main__": import doctest doctest.testmod() # inputing the strings # input_string = input("input a string :") # pattern = input("input a pattern :") _SCREAMING_SNAKE_CASE = '''aab''' _SCREAMING_SNAKE_CASE = '''c*a*b''' # using function to check whether given string matches the given pattern if match_pattern(input_string, pattern): print(f'''{input_string} matches the given pattern {pattern}''') else: print(f'''{input_string} does not match with the given pattern {pattern}''')
217
0
"""simple docstring""" import gc import unittest import numpy as np import torch from diffusers import ( AudioDiffusionPipeline, AutoencoderKL, DDIMScheduler, DDPMScheduler, DiffusionPipeline, Mel, UNetaDConditionModel, UNetaDModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class lowerCamelCase (unittest.TestCase ): def SCREAMING_SNAKE_CASE ( self : str ) -> str: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ = UNetaDModel( sample_size=(3_2, 6_4) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , ) return model @property def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ = UNetaDConditionModel( sample_size=(6_4, 3_2) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , cross_attention_dim=1_0 , ) return model @property def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ = AutoencoderKL( sample_size=(1_2_8, 6_4) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , ) SCREAMING_SNAKE_CASE__ = UNetaDModel( sample_size=(6_4, 3_2) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , ) return vqvae, unet @slow def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ = """cpu""" # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE__ = Mel( x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , ) SCREAMING_SNAKE_CASE__ = DDPMScheduler() SCREAMING_SNAKE_CASE__ = AudioDiffusionPipeline(vqvae=__UpperCAmelCase , unet=self.dummy_unet , mel=__UpperCAmelCase , scheduler=__UpperCAmelCase ) SCREAMING_SNAKE_CASE__ = pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) SCREAMING_SNAKE_CASE__ = torch.Generator(device=__UpperCAmelCase ).manual_seed(4_2 ) SCREAMING_SNAKE_CASE__ = pipe(generator=__UpperCAmelCase , steps=4 ) SCREAMING_SNAKE_CASE__ = output.audios[0] SCREAMING_SNAKE_CASE__ = output.images[0] SCREAMING_SNAKE_CASE__ = torch.Generator(device=__UpperCAmelCase ).manual_seed(4_2 ) SCREAMING_SNAKE_CASE__ = pipe(generator=__UpperCAmelCase , steps=4 , return_dict=__UpperCAmelCase ) SCREAMING_SNAKE_CASE__ = output[0][0] assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length) assert ( image.height == self.dummy_unet.config.sample_size[0] and image.width == self.dummy_unet.config.sample_size[1] ) SCREAMING_SNAKE_CASE__ = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:1_0] SCREAMING_SNAKE_CASE__ = np.frombuffer(image_from_tuple.tobytes() , dtype="""uint8""" )[:1_0] SCREAMING_SNAKE_CASE__ = np.array([6_9, 2_5_5, 2_5_5, 2_5_5, 0, 0, 7_7, 1_8_1, 1_2, 1_2_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0 SCREAMING_SNAKE_CASE__ = Mel( x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , ) SCREAMING_SNAKE_CASE__ = DDIMScheduler() SCREAMING_SNAKE_CASE__ = self.dummy_vqvae_and_unet SCREAMING_SNAKE_CASE__ = AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=__UpperCAmelCase , scheduler=__UpperCAmelCase ) SCREAMING_SNAKE_CASE__ = pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) np.random.seed(0 ) SCREAMING_SNAKE_CASE__ = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) ) SCREAMING_SNAKE_CASE__ = torch.Generator(device=__UpperCAmelCase ).manual_seed(4_2 ) SCREAMING_SNAKE_CASE__ = pipe(raw_audio=__UpperCAmelCase , generator=__UpperCAmelCase , start_step=5 , steps=1_0 ) SCREAMING_SNAKE_CASE__ = output.images[0] assert ( image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0] and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1] ) SCREAMING_SNAKE_CASE__ = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:1_0] SCREAMING_SNAKE_CASE__ = np.array([1_2_0, 1_1_7, 1_1_0, 1_0_9, 1_3_8, 1_6_7, 1_3_8, 1_4_8, 1_3_2, 1_2_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 SCREAMING_SNAKE_CASE__ = self.dummy_unet_condition SCREAMING_SNAKE_CASE__ = AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0] , unet=__UpperCAmelCase , mel=__UpperCAmelCase , scheduler=__UpperCAmelCase ) SCREAMING_SNAKE_CASE__ = pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) np.random.seed(0 ) SCREAMING_SNAKE_CASE__ = torch.rand((1, 1, 1_0) ) SCREAMING_SNAKE_CASE__ = pipe(generator=__UpperCAmelCase , encoding=__UpperCAmelCase ) SCREAMING_SNAKE_CASE__ = output.images[0] SCREAMING_SNAKE_CASE__ = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:1_0] SCREAMING_SNAKE_CASE__ = np.array([1_0_7, 1_0_3, 1_2_0, 1_2_7, 1_4_2, 1_2_2, 1_1_3, 1_2_2, 9_7, 1_1_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 @slow @require_torch_gpu class lowerCamelCase (unittest.TestCase ): def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int: SCREAMING_SNAKE_CASE__ = torch_device SCREAMING_SNAKE_CASE__ = DiffusionPipeline.from_pretrained("""teticio/audio-diffusion-ddim-256""" ) SCREAMING_SNAKE_CASE__ = pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) SCREAMING_SNAKE_CASE__ = torch.Generator(device=__UpperCAmelCase ).manual_seed(4_2 ) SCREAMING_SNAKE_CASE__ = pipe(generator=__UpperCAmelCase ) SCREAMING_SNAKE_CASE__ = output.audios[0] SCREAMING_SNAKE_CASE__ = output.images[0] assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length) assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1] SCREAMING_SNAKE_CASE__ = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:1_0] SCREAMING_SNAKE_CASE__ = np.array([1_5_1, 1_6_7, 1_5_4, 1_4_4, 1_2_2, 1_3_4, 1_2_1, 1_0_5, 7_0, 2_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
165
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available A_ : List[str] = {"configuration_yolos": ["YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP", "YolosConfig", "YolosOnnxConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : List[Any] = ["YolosFeatureExtractor"] A_ : Optional[int] = ["YolosImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : str = [ "YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST", "YolosForObjectDetection", "YolosModel", "YolosPreTrainedModel", ] if TYPE_CHECKING: from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_yolos import YolosFeatureExtractor from .image_processing_yolos import YolosImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_yolos import ( YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST, YolosForObjectDetection, YolosModel, YolosPreTrainedModel, ) else: import sys A_ : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
165
1
import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0.999 , _SCREAMING_SNAKE_CASE="cosine" , ) -> int: '''simple docstring''' if alpha_transform_type == "cosine": def alpha_bar_fn(_SCREAMING_SNAKE_CASE ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(_SCREAMING_SNAKE_CASE ): return math.exp(t * -12.0 ) else: raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" ) SCREAMING_SNAKE_CASE = [] for i in range(_SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE = i / num_diffusion_timesteps SCREAMING_SNAKE_CASE = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(_SCREAMING_SNAKE_CASE ) / alpha_bar_fn(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) ) return torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.floataa ) class UpperCamelCase__ ( lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __snake_case : Optional[Any] = [e.name for e in KarrasDiffusionSchedulers] __snake_case : Any = 2 @register_to_config def __init__( self : Any ,lowerCamelCase__ : int = 1000 ,lowerCamelCase__ : float = 0.00085 ,lowerCamelCase__ : float = 0.012 ,lowerCamelCase__ : str = "linear" ,lowerCamelCase__ : Optional[Union[np.ndarray, List[float]]] = None ,lowerCamelCase__ : str = "epsilon" ,lowerCamelCase__ : Optional[bool] = False ,lowerCamelCase__ : Optional[bool] = False ,lowerCamelCase__ : float = 1.0 ,lowerCamelCase__ : str = "linspace" ,lowerCamelCase__ : int = 0 ,) -> Tuple: '''simple docstring''' if trained_betas is not None: SCREAMING_SNAKE_CASE = torch.tensor(lowerCamelCase__ ,dtype=torch.floataa ) elif beta_schedule == "linear": SCREAMING_SNAKE_CASE = torch.linspace(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. SCREAMING_SNAKE_CASE = ( torch.linspace(beta_start**0.5 ,beta_end**0.5 ,lowerCamelCase__ ,dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule SCREAMING_SNAKE_CASE = betas_for_alpha_bar(lowerCamelCase__ ,alpha_transform_type="""cosine""" ) elif beta_schedule == "exp": SCREAMING_SNAKE_CASE = betas_for_alpha_bar(lowerCamelCase__ ,alpha_transform_type="""exp""" ) else: raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" ) SCREAMING_SNAKE_CASE = 1.0 - self.betas SCREAMING_SNAKE_CASE = torch.cumprod(self.alphas ,dim=0 ) # set all values self.set_timesteps(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ) SCREAMING_SNAKE_CASE = use_karras_sigmas def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Tuple=None ) -> str: '''simple docstring''' if schedule_timesteps is None: SCREAMING_SNAKE_CASE = self.timesteps SCREAMING_SNAKE_CASE = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: SCREAMING_SNAKE_CASE = 1 if len(lowerCamelCase__ ) > 1 else 0 else: SCREAMING_SNAKE_CASE = timestep.cpu().item() if torch.is_tensor(lowerCamelCase__ ) else timestep SCREAMING_SNAKE_CASE = self._index_counter[timestep_int] return indices[pos].item() @property def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[str]: '''simple docstring''' if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : torch.FloatTensor ,lowerCamelCase__ : Union[float, torch.FloatTensor] ,) -> torch.FloatTensor: '''simple docstring''' SCREAMING_SNAKE_CASE = self.index_for_timestep(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = self.sigmas[step_index] SCREAMING_SNAKE_CASE = sample / ((sigma**2 + 1) ** 0.5) return sample def SCREAMING_SNAKE_CASE__ ( self : Any ,lowerCamelCase__ : int ,lowerCamelCase__ : Union[str, torch.device] = None ,lowerCamelCase__ : Optional[int] = None ,) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = num_inference_steps SCREAMING_SNAKE_CASE = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": SCREAMING_SNAKE_CASE = np.linspace(0 ,num_train_timesteps - 1 ,lowerCamelCase__ ,dtype=lowerCamelCase__ )[::-1].copy() elif self.config.timestep_spacing == "leading": SCREAMING_SNAKE_CASE = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 SCREAMING_SNAKE_CASE = (np.arange(0 ,lowerCamelCase__ ) * step_ratio).round()[::-1].copy().astype(lowerCamelCase__ ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": SCREAMING_SNAKE_CASE = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 SCREAMING_SNAKE_CASE = (np.arange(lowerCamelCase__ ,0 ,-step_ratio )).round().copy().astype(lowerCamelCase__ ) timesteps -= 1 else: raise ValueError( F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" ) SCREAMING_SNAKE_CASE = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) SCREAMING_SNAKE_CASE = np.log(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = np.interp(lowerCamelCase__ ,np.arange(0 ,len(lowerCamelCase__ ) ) ,lowerCamelCase__ ) if self.config.use_karras_sigmas: SCREAMING_SNAKE_CASE = self._convert_to_karras(in_sigmas=lowerCamelCase__ ,num_inference_steps=self.num_inference_steps ) SCREAMING_SNAKE_CASE = np.array([self._sigma_to_t(lowerCamelCase__ ,lowerCamelCase__ ) for sigma in sigmas] ) SCREAMING_SNAKE_CASE = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) SCREAMING_SNAKE_CASE = torch.from_numpy(lowerCamelCase__ ).to(device=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] ) SCREAMING_SNAKE_CASE = torch.from_numpy(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] ) if str(lowerCamelCase__ ).startswith("""mps""" ): # mps does not support float64 SCREAMING_SNAKE_CASE = timesteps.to(lowerCamelCase__ ,dtype=torch.floataa ) else: SCREAMING_SNAKE_CASE = timesteps.to(device=lowerCamelCase__ ) # empty dt and derivative SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter SCREAMING_SNAKE_CASE = defaultdict(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : Dict ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = np.log(lowerCamelCase__ ) # get distribution SCREAMING_SNAKE_CASE = log_sigma - log_sigmas[:, np.newaxis] # get sigmas range SCREAMING_SNAKE_CASE = np.cumsum((dists >= 0) ,axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 ) SCREAMING_SNAKE_CASE = low_idx + 1 SCREAMING_SNAKE_CASE = log_sigmas[low_idx] SCREAMING_SNAKE_CASE = log_sigmas[high_idx] # interpolate sigmas SCREAMING_SNAKE_CASE = (low - log_sigma) / (low - high) SCREAMING_SNAKE_CASE = np.clip(lowerCamelCase__ ,0 ,1 ) # transform interpolation to time range SCREAMING_SNAKE_CASE = (1 - w) * low_idx + w * high_idx SCREAMING_SNAKE_CASE = t.reshape(sigma.shape ) return t def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ,lowerCamelCase__ : torch.FloatTensor ,lowerCamelCase__ : int ) -> torch.FloatTensor: '''simple docstring''' SCREAMING_SNAKE_CASE = in_sigmas[-1].item() SCREAMING_SNAKE_CASE = in_sigmas[0].item() SCREAMING_SNAKE_CASE = 7.0 # 7.0 is the value used in the paper SCREAMING_SNAKE_CASE = np.linspace(0 ,1 ,lowerCamelCase__ ) SCREAMING_SNAKE_CASE = sigma_min ** (1 / rho) SCREAMING_SNAKE_CASE = sigma_max ** (1 / rho) SCREAMING_SNAKE_CASE = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas @property def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]: '''simple docstring''' return self.dt is None def SCREAMING_SNAKE_CASE__ ( self : Tuple ,lowerCamelCase__ : Union[torch.FloatTensor, np.ndarray] ,lowerCamelCase__ : Union[float, torch.FloatTensor] ,lowerCamelCase__ : Union[torch.FloatTensor, np.ndarray] ,lowerCamelCase__ : bool = True ,) -> Union[SchedulerOutput, Tuple]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.index_for_timestep(lowerCamelCase__ ) # advance index counter by 1 SCREAMING_SNAKE_CASE = timestep.cpu().item() if torch.is_tensor(lowerCamelCase__ ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: SCREAMING_SNAKE_CASE = self.sigmas[step_index] SCREAMING_SNAKE_CASE = self.sigmas[step_index + 1] else: # 2nd order / Heun's method SCREAMING_SNAKE_CASE = self.sigmas[step_index - 1] SCREAMING_SNAKE_CASE = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": SCREAMING_SNAKE_CASE = sigma_hat if self.state_in_first_order else sigma_next SCREAMING_SNAKE_CASE = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": SCREAMING_SNAKE_CASE = sigma_hat if self.state_in_first_order else sigma_next SCREAMING_SNAKE_CASE = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": SCREAMING_SNAKE_CASE = model_output else: raise ValueError( F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" ) if self.config.clip_sample: SCREAMING_SNAKE_CASE = pred_original_sample.clamp( -self.config.clip_sample_range ,self.config.clip_sample_range ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order SCREAMING_SNAKE_CASE = (sample - pred_original_sample) / sigma_hat # 3. delta timestep SCREAMING_SNAKE_CASE = sigma_next - sigma_hat # store for 2nd order step SCREAMING_SNAKE_CASE = derivative SCREAMING_SNAKE_CASE = dt SCREAMING_SNAKE_CASE = sample else: # 2. 2nd order / Heun's method SCREAMING_SNAKE_CASE = (sample - pred_original_sample) / sigma_next SCREAMING_SNAKE_CASE = (self.prev_derivative + derivative) / 2 # 3. take prev timestep & sample SCREAMING_SNAKE_CASE = self.dt SCREAMING_SNAKE_CASE = self.sample # free dt and derivative # Note, this puts the scheduler in "first order mode" SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : torch.FloatTensor ,lowerCamelCase__ : torch.FloatTensor ,lowerCamelCase__ : torch.FloatTensor ,) -> torch.FloatTensor: '''simple docstring''' SCREAMING_SNAKE_CASE = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(lowerCamelCase__ ): # mps does not support float64 SCREAMING_SNAKE_CASE = self.timesteps.to(original_samples.device ,dtype=torch.floataa ) SCREAMING_SNAKE_CASE = timesteps.to(original_samples.device ,dtype=torch.floataa ) else: SCREAMING_SNAKE_CASE = self.timesteps.to(original_samples.device ) SCREAMING_SNAKE_CASE = timesteps.to(original_samples.device ) SCREAMING_SNAKE_CASE = [self.index_for_timestep(lowerCamelCase__ ,lowerCamelCase__ ) for t in timesteps] SCREAMING_SNAKE_CASE = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): SCREAMING_SNAKE_CASE = sigma.unsqueeze(-1 ) SCREAMING_SNAKE_CASE = original_samples + noise * sigma return noisy_samples def __len__( self : List[Any] ) -> int: '''simple docstring''' return self.config.num_train_timesteps
193
class UpperCamelCase__ : '''simple docstring''' def __init__( self : int ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = {} def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> None: '''simple docstring''' print(self.vertex ) for i in self.vertex: print(lowerCamelCase__ ,""" -> """ ,""" -> """.join([str(lowerCamelCase__ ) for j in self.vertex[i]] ) ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : int ) -> None: '''simple docstring''' if from_vertex in self.vertex: self.vertex[from_vertex].append(lowerCamelCase__ ) else: # else make a new vertex SCREAMING_SNAKE_CASE = [to_vertex] def SCREAMING_SNAKE_CASE__ ( self : str ) -> None: '''simple docstring''' SCREAMING_SNAKE_CASE = [False] * len(self.vertex ) # call the recursive helper function for i in range(len(self.vertex ) ): if not visited[i]: self.dfs_recursive(lowerCamelCase__ ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Any ,lowerCamelCase__ : int ,lowerCamelCase__ : list ) -> None: '''simple docstring''' SCREAMING_SNAKE_CASE = True print(lowerCamelCase__ ,end=""" """ ) # Recur for all the vertices that are adjacent to this node for i in self.vertex: if not visited[i]: self.dfs_recursive(lowerCamelCase__ ,lowerCamelCase__ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_ = Graph() g.add_edge(0, 1) g.add_edge(0, 2) g.add_edge(1, 2) g.add_edge(2, 0) g.add_edge(2, 3) g.add_edge(3, 3) g.print_graph() print("""DFS:""") g.dfs() # OUTPUT: # 0 -> 1 -> 2 # 1 -> 2 # 2 -> 0 -> 3 # 3 -> 3 # DFS: # 0 1 2 3
193
1
import argparse from tax import checkpoints from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]: lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(_UpperCAmelCase ) lowerCamelCase__ : Any = FlaxAutoModelForSeqaSeqLM.from_config(config=_UpperCAmelCase ) lowerCamelCase__ : Optional[int] = checkpoints.load_tax_checkpoint(_UpperCAmelCase ) lowerCamelCase__ : Dict = 'wi_0' in tax_model['target']['encoder']['layers_0']['mlp'] if config.model_type == "t5": lowerCamelCase__ : Optional[Any] = 'SelfAttention' if config.model_type == "longt5" and config.encoder_attention_type == "local": lowerCamelCase__ : Union[str, Any] = 'LocalSelfAttention' elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowerCamelCase__ : Tuple = 'TransientGlobalSelfAttention' else: raise ValueError( 'Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`' ' attribute with a value from [\'local\', \'transient-global].' ) # Encoder for layer_index in range(config.num_layers ): lowerCamelCase__ : Union[str, Any] = F"""layers_{str(_UpperCAmelCase )}""" # Self-Attention lowerCamelCase__ : Any = tax_model['target']['encoder'][layer_name]['attention']['key']['kernel'] lowerCamelCase__ : List[str] = tax_model['target']['encoder'][layer_name]['attention']['out']['kernel'] lowerCamelCase__ : Union[str, Any] = tax_model['target']['encoder'][layer_name]['attention']['query']['kernel'] lowerCamelCase__ : Tuple = tax_model['target']['encoder'][layer_name]['attention']['value']['kernel'] # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowerCamelCase__ : Optional[Any] = tax_model['target']['encoder'][layer_name]['attention']['T5LayerNorm_0']['scale'] # Layer Normalization lowerCamelCase__ : int = tax_model['target']['encoder'][layer_name]['pre_attention_layer_norm']['scale'] if split_mlp_wi: lowerCamelCase__ : Tuple = tax_model['target']['encoder'][layer_name]['mlp']['wi_0']['kernel'] lowerCamelCase__ : Optional[int] = tax_model['target']['encoder'][layer_name]['mlp']['wi_1']['kernel'] else: lowerCamelCase__ : List[str] = tax_model['target']['encoder'][layer_name]['mlp']['wi']['kernel'] lowerCamelCase__ : int = tax_model['target']['encoder'][layer_name]['mlp']['wo']['kernel'] # Layer Normalization lowerCamelCase__ : Optional[Any] = tax_model['target']['encoder'][layer_name]['pre_mlp_layer_norm']['scale'] # Assigning lowerCamelCase__ : Optional[int] = flax_model.params['encoder']['block'][str(_UpperCAmelCase )]['layer'] lowerCamelCase__ : int = tax_attention_key lowerCamelCase__ : str = tax_attention_out lowerCamelCase__ : Optional[Any] = tax_attention_query lowerCamelCase__ : Dict = tax_attention_value lowerCamelCase__ : List[str] = tax_attention_layer_norm # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowerCamelCase__ : List[Any] = tax_global_layer_norm if split_mlp_wi: lowerCamelCase__ : List[str] = tax_mlp_wi_a lowerCamelCase__ : Union[str, Any] = tax_mlp_wi_a else: lowerCamelCase__ : List[Any] = tax_mlp_wi lowerCamelCase__ : List[str] = tax_mlp_wo lowerCamelCase__ : Optional[int] = tax_mlp_layer_norm lowerCamelCase__ : Any = flax_model_encoder_layer_block # Only for layer 0: lowerCamelCase__ : List[Any] = tax_model['target']['encoder']['relpos_bias']['rel_embedding'].T lowerCamelCase__ : List[str] = tax_encoder_rel_embedding # Side/global relative position_bias + layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": lowerCamelCase__ : Optional[Any] = tax_model['target']['encoder']['side_relpos_bias']['rel_embedding'].T lowerCamelCase__ : str = tax_encoder_global_rel_embedding # Assigning lowerCamelCase__ : Union[str, Any] = tax_model['target']['encoder']['encoder_norm']['scale'] lowerCamelCase__ : str = tax_encoder_norm # Decoder for layer_index in range(config.num_layers ): lowerCamelCase__ : str = F"""layers_{str(_UpperCAmelCase )}""" # Self-Attention lowerCamelCase__ : Optional[int] = tax_model['target']['decoder'][layer_name]['self_attention']['key']['kernel'] lowerCamelCase__ : List[Any] = tax_model['target']['decoder'][layer_name]['self_attention']['out']['kernel'] lowerCamelCase__ : str = tax_model['target']['decoder'][layer_name]['self_attention']['query']['kernel'] lowerCamelCase__ : Tuple = tax_model['target']['decoder'][layer_name]['self_attention']['value']['kernel'] # Layer Normalization lowerCamelCase__ : Union[str, Any] = tax_model['target']['decoder'][layer_name]['pre_self_attention_layer_norm'][ 'scale' ] # Encoder-Decoder-Attention lowerCamelCase__ : Optional[Any] = tax_model['target']['decoder'][layer_name]['encoder_decoder_attention'] lowerCamelCase__ : str = tax_enc_dec_attention_module['key']['kernel'] lowerCamelCase__ : str = tax_enc_dec_attention_module['out']['kernel'] lowerCamelCase__ : int = tax_enc_dec_attention_module['query']['kernel'] lowerCamelCase__ : str = tax_enc_dec_attention_module['value']['kernel'] # Layer Normalization lowerCamelCase__ : Dict = tax_model['target']['decoder'][layer_name]['pre_cross_attention_layer_norm']['scale'] # MLP if split_mlp_wi: lowerCamelCase__ : Optional[Any] = tax_model['target']['decoder'][layer_name]['mlp']['wi_0']['kernel'] lowerCamelCase__ : str = tax_model['target']['decoder'][layer_name]['mlp']['wi_1']['kernel'] else: lowerCamelCase__ : List[str] = tax_model['target']['decoder'][layer_name]['mlp']['wi']['kernel'] lowerCamelCase__ : Union[str, Any] = tax_model['target']['decoder'][layer_name]['mlp']['wo']['kernel'] # Layer Normalization lowerCamelCase__ : List[str] = tax_model['target']['decoder'][layer_name]['pre_mlp_layer_norm']['scale'] # Assigning lowerCamelCase__ : Tuple = flax_model.params['decoder']['block'][str(_UpperCAmelCase )]['layer'] lowerCamelCase__ : Optional[Any] = tax_attention_key lowerCamelCase__ : str = tax_attention_out lowerCamelCase__ : int = tax_attention_query lowerCamelCase__ : Optional[Any] = tax_attention_value lowerCamelCase__ : Optional[Any] = tax_pre_attention_layer_norm lowerCamelCase__ : str = tax_enc_dec_attention_key lowerCamelCase__ : Dict = tax_enc_dec_attention_out lowerCamelCase__ : Optional[Any] = tax_enc_dec_attention_query lowerCamelCase__ : Optional[int] = tax_enc_dec_attention_value lowerCamelCase__ : Tuple = tax_cross_layer_norm if split_mlp_wi: lowerCamelCase__ : List[Any] = tax_mlp_wi_a lowerCamelCase__ : Union[str, Any] = tax_mlp_wi_a else: lowerCamelCase__ : str = tax_mlp_wi lowerCamelCase__ : Optional[int] = tax_mlp_wo lowerCamelCase__ : Dict = txa_mlp_layer_norm lowerCamelCase__ : List[Any] = flax_model_decoder_layer_block # Decoder Normalization lowerCamelCase__ : str = tax_model['target']['decoder']['decoder_norm']['scale'] lowerCamelCase__ : List[Any] = txa_decoder_norm # Only for layer 0: lowerCamelCase__ : Any = tax_model['target']['decoder']['relpos_bias']['rel_embedding'].T lowerCamelCase__ : List[str] = tax_decoder_rel_embedding # Token Embeddings lowerCamelCase__ : Union[str, Any] = tax_model['target']['token_embedder']['embedding'] lowerCamelCase__ : Optional[Any] = txa_token_embeddings # LM Head (only in v1.1 and LongT5 checkpoints) if "logits_dense" in tax_model["target"]["decoder"]: lowerCamelCase__ : List[str] = tax_model['target']['decoder']['logits_dense']['kernel'] flax_model.save_pretrained(_UpperCAmelCase ) print('T5X Model was sucessfully converted!' ) if __name__ == "__main__": _UpperCAmelCase : int = argparse.ArgumentParser() # Required parameters parser.add_argument( """--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path the T5X checkpoint.""" ) parser.add_argument("""--config_name""", default=None, type=str, required=True, help="""Config name of LongT5/T5 model.""") parser.add_argument( """--flax_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output FLAX model.""" ) _UpperCAmelCase : Tuple = parser.parse_args() convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
50
'''simple docstring''' import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class __magic_name__ ( _UpperCamelCase , unittest.TestCase ): lowerCAmelCase : Optional[int] = BarthezTokenizer lowerCAmelCase : int = BarthezTokenizerFast lowerCAmelCase : Dict = True lowerCAmelCase : str = True def __lowercase ( self : List[Any] ): super().setUp() _a : List[Any] = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' ) tokenizer.save_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ,legacy_format=_UpperCAmelCase ) _a : Union[str, Any] = tokenizer def __lowercase ( self : Tuple ): _a : Optional[Any] = '<pad>' _a : List[Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) ,_UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) ,_UpperCAmelCase ) def __lowercase ( self : str ): _a : Any = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,'<s>' ) self.assertEqual(vocab_keys[1] ,'<pad>' ) self.assertEqual(vocab_keys[-1] ,'<mask>' ) self.assertEqual(len(_UpperCAmelCase ) ,101122 ) def __lowercase ( self : Dict ): self.assertEqual(self.get_tokenizer().vocab_size ,101122 ) @require_torch def __lowercase ( self : Dict ): _a : Any = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] _a : Dict = [0, 57, 3018, 70307, 91, 2] _a : Dict = self.tokenizer( _UpperCAmelCase ,max_length=len(_UpperCAmelCase ) ,padding=_UpperCAmelCase ,truncation=_UpperCAmelCase ,return_tensors='pt' ) self.assertIsInstance(_UpperCAmelCase ,_UpperCAmelCase ) self.assertEqual((2, 6) ,batch.input_ids.shape ) self.assertEqual((2, 6) ,batch.attention_mask.shape ) _a : Tuple = batch.input_ids.tolist()[0] self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase ) def __lowercase ( self : Optional[Any] ): if not self.test_rust_tokenizer: return _a : str = self.get_tokenizer() _a : List[str] = self.get_rust_tokenizer() _a : Dict = 'I was born in 92000, and this is falsé.' _a : List[Any] = tokenizer.tokenize(_UpperCAmelCase ) _a : Tuple = rust_tokenizer.tokenize(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase ) _a : Optional[Any] = tokenizer.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase ) _a : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase ) _a : Union[str, Any] = self.get_rust_tokenizer() _a : Any = tokenizer.encode(_UpperCAmelCase ) _a : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase ) @slow def __lowercase ( self : Optional[int] ): # fmt: off _a : Optional[int] = {'input_ids': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. _a : Optional[Any] = [ 'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, ' 'utilisé principalement dans le domaine du traitement automatique des langues (TAL).', 'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus ' 'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches ' 'telles que la traduction et la synthèse de texte.', ] self.tokenizer_integration_test_util( expected_encoding=_UpperCAmelCase ,model_name='moussaKam/mbarthez' ,revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' ,sequences=_UpperCAmelCase ,)
89
0
"""simple docstring""" import logging import os from logging import ( CRITICAL, # NOQA DEBUG, # NOQA ERROR, # NOQA FATAL, # NOQA INFO, # NOQA NOTSET, # NOQA WARN, # NOQA WARNING, # NOQA ) from typing import Optional from tqdm import auto as tqdm_lib SCREAMING_SNAKE_CASE__ = { "debug": logging.DEBUG, "info": logging.INFO, "warning": logging.WARNING, "error": logging.ERROR, "critical": logging.CRITICAL, } SCREAMING_SNAKE_CASE__ = logging.WARNING def lowerCAmelCase__ ( ) -> List[Any]: """simple docstring""" snake_case = os.getenv('DATASETS_VERBOSITY' , _UpperCamelCase ) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( f"""Unknown option DATASETS_VERBOSITY={env_level_str}, """ f"""has to be one of: { ", ".join(log_levels.keys() ) }""" ) return _default_log_level def lowerCAmelCase__ ( ) -> str: """simple docstring""" return __name__.split('.' )[0] def lowerCAmelCase__ ( ) -> logging.Logger: """simple docstring""" return logging.getLogger(_get_library_name() ) def lowerCAmelCase__ ( ) -> None: """simple docstring""" snake_case = _get_library_root_logger() library_root_logger.setLevel(_get_default_logging_level() ) def lowerCAmelCase__ ( ) -> None: """simple docstring""" snake_case = _get_library_root_logger() library_root_logger.setLevel(logging.NOTSET ) def lowerCAmelCase__ ( _UpperCamelCase : Optional[str] = None ) -> logging.Logger: """simple docstring""" if name is None: snake_case = _get_library_name() return logging.getLogger(_UpperCamelCase ) def lowerCAmelCase__ ( ) -> int: """simple docstring""" return _get_library_root_logger().getEffectiveLevel() def lowerCAmelCase__ ( _UpperCamelCase : int ) -> None: """simple docstring""" _get_library_root_logger().setLevel(_UpperCamelCase ) def lowerCAmelCase__ ( ) -> Any: """simple docstring""" return set_verbosity(_UpperCamelCase ) def lowerCAmelCase__ ( ) -> Optional[Any]: """simple docstring""" return set_verbosity(_UpperCamelCase ) def lowerCAmelCase__ ( ) -> List[str]: """simple docstring""" return set_verbosity(_UpperCamelCase ) def lowerCAmelCase__ ( ) -> Any: """simple docstring""" return set_verbosity(_UpperCamelCase ) def lowerCAmelCase__ ( ) -> None: """simple docstring""" snake_case = False def lowerCAmelCase__ ( ) -> None: """simple docstring""" snake_case = True # Configure the library root logger at the module level (singleton-like) _configure_library_root_logger() class lowerCAmelCase_ : def __init__( self , *lowerCAmelCase , **lowerCAmelCase ): # pylint: disable=unused-argument """simple docstring""" snake_case = args[0] if args else None def __iter__( self ): """simple docstring""" return iter(self._iterator ) def __getattr__( self , lowerCAmelCase ): """simple docstring""" def empty_fn(*lowerCAmelCase , **lowerCAmelCase ): # pylint: disable=unused-argument return return empty_fn def __enter__( self ): """simple docstring""" return self def __exit__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" return SCREAMING_SNAKE_CASE__ = True class lowerCAmelCase_ : def __call__( self , *lowerCAmelCase , lowerCAmelCase=False , **lowerCAmelCase ): """simple docstring""" if _tqdm_active and not disable: return tqdm_lib.tqdm(*lowerCAmelCase , **lowerCAmelCase ) else: return EmptyTqdm(*lowerCAmelCase , **lowerCAmelCase ) def snake_case ( self , *lowerCAmelCase , **lowerCAmelCase ): """simple docstring""" snake_case = None if _tqdm_active: return tqdm_lib.tqdm.set_lock(*lowerCAmelCase , **lowerCAmelCase ) def snake_case ( self ): """simple docstring""" if _tqdm_active: return tqdm_lib.tqdm.get_lock() SCREAMING_SNAKE_CASE__ = _tqdm_cls() def lowerCAmelCase__ ( ) -> bool: """simple docstring""" global _tqdm_active return bool(_tqdm_active ) def lowerCAmelCase__ ( ) -> Optional[Any]: """simple docstring""" global _tqdm_active snake_case = True def lowerCAmelCase__ ( ) -> Tuple: """simple docstring""" global _tqdm_active snake_case = False
357
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { "microsoft/swinv2-tiny-patch4-window8-256": ( "https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json" ), } class lowerCAmelCase_ ( lowerCAmelCase ): """simple docstring""" _lowerCAmelCase : Tuple = """swinv2""" _lowerCAmelCase : Any = { """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self , lowerCAmelCase=2_24 , lowerCAmelCase=4 , lowerCAmelCase=3 , lowerCAmelCase=96 , lowerCAmelCase=[2, 2, 6, 2] , lowerCAmelCase=[3, 6, 12, 24] , lowerCAmelCase=7 , lowerCAmelCase=4.0 , lowerCAmelCase=True , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.1 , lowerCAmelCase="gelu" , lowerCAmelCase=False , lowerCAmelCase=0.02 , lowerCAmelCase=1E-5 , lowerCAmelCase=32 , **lowerCAmelCase , ): """simple docstring""" super().__init__(**lowerCAmelCase ) snake_case = image_size snake_case = patch_size snake_case = num_channels snake_case = embed_dim snake_case = depths snake_case = len(lowerCAmelCase ) snake_case = num_heads snake_case = window_size snake_case = mlp_ratio snake_case = qkv_bias snake_case = hidden_dropout_prob snake_case = attention_probs_dropout_prob snake_case = drop_path_rate snake_case = hidden_act snake_case = use_absolute_embeddings snake_case = layer_norm_eps snake_case = initializer_range snake_case = encoder_stride # we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model snake_case = int(embed_dim * 2 ** (len(lowerCAmelCase ) - 1) ) snake_case = (0, 0, 0, 0)
149
0
import argparse import pathlib import fairseq import torch from fairseq.models.roberta import RobertaModel as FairseqRobertaModel from fairseq.modules import TransformerSentenceEncoderLayer from packaging import version from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.models.roberta.modeling_roberta import RobertaAttention from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse('''1.0.0a'''): raise Exception('''requires fairseq >= 1.0.0a''') logging.set_verbosity_info() __a = logging.get_logger(__name__) __a = '''Hello world! cécé herlolip''' def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Optional[Any]: """simple docstring""" lowercase : Optional[Any] = FairseqRobertaModel.from_pretrained(A_ ) roberta.eval() # disable dropout lowercase : str = roberta.model.encoder.sentence_encoder lowercase : Tuple = XLMRobertaConfig( vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings, hidden_size=roberta.cfg.model.encoder_embed_dim, num_hidden_layers=roberta.cfg.model.encoder_layers, num_attention_heads=roberta.cfg.model.encoder_attention_heads, intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim, max_position_embeddings=514, type_vocab_size=1, layer_norm_eps=1e-5, ) if classification_head: lowercase : Optional[Any] = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0] print('''Our RoBERTa config:''', A_ ) lowercase : Optional[int] = XLMRobertaXLForSequenceClassification(A_ ) if classification_head else XLMRobertaXLForMaskedLM(A_ ) model.eval() # Now let's copy all the weights. # Embeddings lowercase : Union[str, Any] = roberta_sent_encoder.embed_tokens.weight lowercase : Dict = roberta_sent_encoder.embed_positions.weight lowercase : int = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them. lowercase : Union[str, Any] = roberta_sent_encoder.layer_norm.weight lowercase : List[Any] = roberta_sent_encoder.layer_norm.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer lowercase : BertLayer = model.roberta.encoder.layer[i] lowercase : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i] lowercase : RobertaAttention = layer.attention lowercase : Tuple = roberta_layer.self_attn_layer_norm.weight lowercase : int = roberta_layer.self_attn_layer_norm.bias # self attention lowercase : BertSelfAttention = layer.attention.self assert ( roberta_layer.self_attn.k_proj.weight.data.shape == roberta_layer.self_attn.q_proj.weight.data.shape == roberta_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ) lowercase : List[str] = roberta_layer.self_attn.q_proj.weight lowercase : int = roberta_layer.self_attn.q_proj.bias lowercase : Union[str, Any] = roberta_layer.self_attn.k_proj.weight lowercase : Tuple = roberta_layer.self_attn.k_proj.bias lowercase : Optional[Any] = roberta_layer.self_attn.v_proj.weight lowercase : Any = roberta_layer.self_attn.v_proj.bias # self-attention output lowercase : BertSelfOutput = layer.attention.output assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape lowercase : List[Any] = roberta_layer.self_attn.out_proj.weight lowercase : Dict = roberta_layer.self_attn.out_proj.bias # this one is final layer norm lowercase : Any = roberta_layer.final_layer_norm.weight lowercase : str = roberta_layer.final_layer_norm.bias # intermediate lowercase : BertIntermediate = layer.intermediate assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape lowercase : Dict = roberta_layer.fca.weight lowercase : List[Any] = roberta_layer.fca.bias # output lowercase : BertOutput = layer.output assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape lowercase : Any = roberta_layer.fca.weight lowercase : List[Any] = roberta_layer.fca.bias # end of layer if classification_head: lowercase : Dict = roberta.model.classification_heads['''mnli'''].dense.weight lowercase : Union[str, Any] = roberta.model.classification_heads['''mnli'''].dense.bias lowercase : Any = roberta.model.classification_heads['''mnli'''].out_proj.weight lowercase : Optional[Any] = roberta.model.classification_heads['''mnli'''].out_proj.bias else: # LM Head lowercase : Optional[Any] = roberta.model.encoder.lm_head.dense.weight lowercase : Union[str, Any] = roberta.model.encoder.lm_head.dense.bias lowercase : List[str] = roberta.model.encoder.lm_head.layer_norm.weight lowercase : Union[str, Any] = roberta.model.encoder.lm_head.layer_norm.bias lowercase : int = roberta.model.encoder.lm_head.weight lowercase : Optional[Any] = roberta.model.encoder.lm_head.bias # Let's check that we get the same results. lowercase : torch.Tensor = roberta.encode(A_ ).unsqueeze(0 ) # batch of size 1 lowercase : str = model(A_ )[0] if classification_head: lowercase : Optional[int] = roberta.model.classification_heads['''mnli'''](roberta.extract_features(A_ ) ) else: lowercase : Dict = roberta.model(A_ )[0] print(our_output.shape, their_output.shape ) lowercase : Any = torch.max(torch.abs(our_output - their_output ) ).item() print(f"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7 lowercase : int = torch.allclose(A_, A_, atol=1e-3 ) print('''Do both models output the same tensors?''', '''🔥''' if success else '''💩''' ) if not success: raise Exception('''Something went wRoNg''' ) pathlib.Path(A_ ).mkdir(parents=A_, exist_ok=A_ ) print(f"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(A_ ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--roberta_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.''' ) __a = parser.parse_args() convert_xlm_roberta_xl_checkpoint_to_pytorch( args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
337
"""simple docstring""" import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetaImageProcessor class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def __init__( self : List[str] ,lowercase_ : Dict ,lowercase_ : Dict=7 ,lowercase_ : Optional[int]=3 ,lowercase_ : int=3_0 ,lowercase_ : Optional[Any]=4_0_0 ,lowercase_ : Any=True ,lowercase_ : List[str]=None ,lowercase_ : str=True ,lowercase_ : List[Any]=[0.5, 0.5, 0.5] ,lowercase_ : List[str]=[0.5, 0.5, 0.5] ,lowercase_ : Any=True ,lowercase_ : Union[str, Any]=1 / 2_5_5 ,lowercase_ : str=True ,): # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p lowerCAmelCase__ : str = size if size is not None else {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3} lowerCAmelCase__ : Any = parent lowerCAmelCase__ : Tuple = batch_size lowerCAmelCase__ : List[str] = num_channels lowerCAmelCase__ : Optional[Any] = min_resolution lowerCAmelCase__ : Union[str, Any] = max_resolution lowerCAmelCase__ : Optional[int] = do_resize lowerCAmelCase__ : str = size lowerCAmelCase__ : Union[str, Any] = do_normalize lowerCAmelCase__ : List[str] = image_mean lowerCAmelCase__ : str = image_std lowerCAmelCase__ : Optional[Any] = do_rescale lowerCAmelCase__ : Union[str, Any] = rescale_factor lowerCAmelCase__ : Optional[Any] = do_pad def __lowerCAmelCase ( self : Optional[Any] ): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def __lowerCAmelCase ( self : List[str] ,lowercase_ : List[Any] ,lowercase_ : int=False ): if not batched: lowerCAmelCase__ : Tuple = image_inputs[0] if isinstance(lowercase_ ,Image.Image ): lowerCAmelCase__ ,lowerCAmelCase__ : List[str] = image.size else: lowerCAmelCase__ ,lowerCAmelCase__ : Optional[int] = image.shape[1], image.shape[2] if w < h: lowerCAmelCase__ : Any = int(self.size['''shortest_edge'''] * h / w ) lowerCAmelCase__ : str = self.size['''shortest_edge'''] elif w > h: lowerCAmelCase__ : Union[str, Any] = self.size['''shortest_edge'''] lowerCAmelCase__ : Dict = int(self.size['''shortest_edge'''] * w / h ) else: lowerCAmelCase__ : List[str] = self.size['''shortest_edge'''] lowerCAmelCase__ : str = self.size['''shortest_edge'''] else: lowerCAmelCase__ : Optional[Any] = [] for image in image_inputs: lowerCAmelCase__ ,lowerCAmelCase__ : Any = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) lowerCAmelCase__ : List[str] = max(lowercase_ ,key=lambda lowercase_ : item[0] )[0] lowerCAmelCase__ : Any = max(lowercase_ ,key=lambda lowercase_ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class SCREAMING_SNAKE_CASE ( a_ , unittest.TestCase ): """simple docstring""" lowercase__ = DetaImageProcessor if is_vision_available() else None def __lowerCAmelCase ( self : Any ): lowerCAmelCase__ : Optional[Any] = DetaImageProcessingTester(self ) @property def __lowerCAmelCase ( self : Any ): return self.image_processor_tester.prepare_image_processor_dict() def __lowerCAmelCase ( self : List[str] ): lowerCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowercase_ ,'''image_mean''' ) ) self.assertTrue(hasattr(lowercase_ ,'''image_std''' ) ) self.assertTrue(hasattr(lowercase_ ,'''do_normalize''' ) ) self.assertTrue(hasattr(lowercase_ ,'''do_resize''' ) ) self.assertTrue(hasattr(lowercase_ ,'''do_rescale''' ) ) self.assertTrue(hasattr(lowercase_ ,'''do_pad''' ) ) self.assertTrue(hasattr(lowercase_ ,'''size''' ) ) def __lowerCAmelCase ( self : Dict ): lowerCAmelCase__ : Dict = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3} ) self.assertEqual(image_processor.do_pad ,lowercase_ ) def __lowerCAmelCase ( self : List[str] ): pass def __lowerCAmelCase ( self : Union[str, Any] ): # Initialize image_processing lowerCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowercase_ ) for image in image_inputs: self.assertIsInstance(lowercase_ ,Image.Image ) # Test not batched input lowerCAmelCase__ : List[Any] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values lowerCAmelCase__ ,lowerCAmelCase__ : Tuple = self.image_processor_tester.get_expected_values(lowercase_ ) self.assertEqual( encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,) # Test batched lowerCAmelCase__ ,lowerCAmelCase__ : Optional[int] = self.image_processor_tester.get_expected_values(lowercase_ ,batched=lowercase_ ) lowerCAmelCase__ : Optional[int] = image_processing(lowercase_ ,return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) ,) def __lowerCAmelCase ( self : Dict ): # Initialize image_processing lowerCAmelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCAmelCase__ : Tuple = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowercase_ ,numpify=lowercase_ ) for image in image_inputs: self.assertIsInstance(lowercase_ ,np.ndarray ) # Test not batched input lowerCAmelCase__ : List[str] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values lowerCAmelCase__ ,lowerCAmelCase__ : Any = self.image_processor_tester.get_expected_values(lowercase_ ) self.assertEqual( encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,) # Test batched lowerCAmelCase__ : str = image_processing(lowercase_ ,return_tensors='''pt''' ).pixel_values lowerCAmelCase__ ,lowerCAmelCase__ : Optional[Any] = self.image_processor_tester.get_expected_values(lowercase_ ,batched=lowercase_ ) self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) ,) def __lowerCAmelCase ( self : Union[str, Any] ): # Initialize image_processing lowerCAmelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCAmelCase__ : Any = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowercase_ ,torchify=lowercase_ ) for image in image_inputs: self.assertIsInstance(lowercase_ ,torch.Tensor ) # Test not batched input lowerCAmelCase__ : List[Any] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values lowerCAmelCase__ ,lowerCAmelCase__ : Any = self.image_processor_tester.get_expected_values(lowercase_ ) self.assertEqual( encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,) # Test batched lowerCAmelCase__ : str = image_processing(lowercase_ ,return_tensors='''pt''' ).pixel_values lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = self.image_processor_tester.get_expected_values(lowercase_ ,batched=lowercase_ ) self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) ,) @slow def __lowerCAmelCase ( self : Tuple ): # prepare image and target lowerCAmelCase__ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' ,'''r''' ) as f: lowerCAmelCase__ : Union[str, Any] = json.loads(f.read() ) lowerCAmelCase__ : str = {'''image_id''': 3_9_7_6_9, '''annotations''': target} # encode them lowerCAmelCase__ : Optional[Any] = DetaImageProcessor() lowerCAmelCase__ : Optional[int] = image_processing(images=lowercase_ ,annotations=lowercase_ ,return_tensors='''pt''' ) # verify pixel values lowerCAmelCase__ : Dict = torch.Size([1, 3, 8_0_0, 1_0_6_6] ) self.assertEqual(encoding['''pixel_values'''].shape ,lowercase_ ) lowerCAmelCase__ : Any = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] ,lowercase_ ,atol=1E-4 ) ) # verify area lowerCAmelCase__ : Tuple = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] ,lowercase_ ) ) # verify boxes lowerCAmelCase__ : List[str] = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape ,lowercase_ ) lowerCAmelCase__ : Tuple = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] ,lowercase_ ,atol=1E-3 ) ) # verify image_id lowerCAmelCase__ : Optional[int] = torch.tensor([3_9_7_6_9] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] ,lowercase_ ) ) # verify is_crowd lowerCAmelCase__ : List[str] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] ,lowercase_ ) ) # verify class_labels lowerCAmelCase__ : Any = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] ,lowercase_ ) ) # verify orig_size lowerCAmelCase__ : int = torch.tensor([4_8_0, 6_4_0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] ,lowercase_ ) ) # verify size lowerCAmelCase__ : str = torch.tensor([8_0_0, 1_0_6_6] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] ,lowercase_ ) ) @slow def __lowerCAmelCase ( self : Any ): # prepare image, target and masks_path lowerCAmelCase__ : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' ,'''r''' ) as f: lowerCAmelCase__ : str = json.loads(f.read() ) lowerCAmelCase__ : Tuple = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9_7_6_9, '''segments_info''': target} lowerCAmelCase__ : Optional[Any] = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' ) # encode them lowerCAmelCase__ : str = DetaImageProcessor(format='''coco_panoptic''' ) lowerCAmelCase__ : Optional[int] = image_processing(images=lowercase_ ,annotations=lowercase_ ,masks_path=lowercase_ ,return_tensors='''pt''' ) # verify pixel values lowerCAmelCase__ : Any = torch.Size([1, 3, 8_0_0, 1_0_6_6] ) self.assertEqual(encoding['''pixel_values'''].shape ,lowercase_ ) lowerCAmelCase__ : int = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] ,lowercase_ ,atol=1E-4 ) ) # verify area lowerCAmelCase__ : Tuple = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] ,lowercase_ ) ) # verify boxes lowerCAmelCase__ : Optional[Any] = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape ,lowercase_ ) lowerCAmelCase__ : str = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] ,lowercase_ ,atol=1E-3 ) ) # verify image_id lowerCAmelCase__ : Union[str, Any] = torch.tensor([3_9_7_6_9] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] ,lowercase_ ) ) # verify is_crowd lowerCAmelCase__ : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] ,lowercase_ ) ) # verify class_labels lowerCAmelCase__ : List[str] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] ,lowercase_ ) ) # verify masks lowerCAmelCase__ : Optional[int] = 8_2_2_8_7_3 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() ,lowercase_ ) # verify orig_size lowerCAmelCase__ : List[Any] = torch.tensor([4_8_0, 6_4_0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] ,lowercase_ ) ) # verify size lowerCAmelCase__ : Optional[Any] = torch.tensor([8_0_0, 1_0_6_6] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] ,lowercase_ ) )
106
0
import json import os import unittest from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class UpperCamelCase__ ( __lowercase , unittest.TestCase ): """simple docstring""" UpperCAmelCase_ =CTRLTokenizer UpperCAmelCase_ =False UpperCAmelCase_ =False def _UpperCamelCase ( self ) -> List[str]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt SCREAMING_SNAKE_CASE_ = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>'''] SCREAMING_SNAKE_CASE_ = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) ) SCREAMING_SNAKE_CASE_ = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', ''''''] SCREAMING_SNAKE_CASE_ = {'''unk_token''': '''<unk>'''} SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(snake_case_ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(snake_case_ ) ) def _UpperCamelCase ( self , **_A ) -> Union[str, Any]: kwargs.update(self.special_tokens_map ) return CTRLTokenizer.from_pretrained(self.tmpdirname , **snake_case_ ) def _UpperCamelCase ( self , _A ) -> List[Any]: SCREAMING_SNAKE_CASE_ = '''adapt react readapt apt''' SCREAMING_SNAKE_CASE_ = '''adapt react readapt apt''' return input_text, output_text def _UpperCamelCase ( self ) -> str: SCREAMING_SNAKE_CASE_ = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) SCREAMING_SNAKE_CASE_ = '''adapt react readapt apt''' SCREAMING_SNAKE_CASE_ = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split() SCREAMING_SNAKE_CASE_ = tokenizer.tokenize(snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) SCREAMING_SNAKE_CASE_ = tokens + [tokenizer.unk_token] SCREAMING_SNAKE_CASE_ = [0, 1, 2, 4, 5, 1, 0, 3, 6] self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , snake_case_ )
352
def A__ ( __lowerCamelCase = 10_00 ): return sum(2 * a * ((a - 1) // 2) for a in range(3, n + 1 ) ) if __name__ == "__main__": print(solution())
257
0
from typing import Dict from .base import GenericTensor, Pipeline class snake_case ( UpperCAmelCase__ ): '''simple docstring''' def UpperCamelCase_ ( self : Optional[int] , lowerCAmelCase : Any=None , lowerCAmelCase : str=None , lowerCAmelCase : Tuple=None , **lowerCAmelCase : Optional[int]) -> Tuple: """simple docstring""" if tokenize_kwargs is None: _snake_case : Optional[Any] = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( """truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)""") _snake_case : List[str] = truncation _snake_case : Optional[Any] = tokenize_kwargs _snake_case : Union[str, Any] = {} if return_tensors is not None: _snake_case : Any = return_tensors return preprocess_params, {}, postprocess_params def UpperCamelCase_ ( self : List[Any] , lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Optional[int]) -> int: """simple docstring""" _snake_case : Union[str, Any] = self.framework _snake_case : Optional[int] = self.tokenizer(_a , return_tensors=_a , **_a) return model_inputs def UpperCamelCase_ ( self : List[str] , lowerCAmelCase : List[str]) -> Optional[int]: """simple docstring""" _snake_case : Optional[Any] = self.model(**_a) return model_outputs def UpperCamelCase_ ( self : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : Any=False) -> str: """simple docstring""" if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self : Tuple , *lowerCAmelCase : List[Any] , **lowerCAmelCase : Union[str, Any]) -> List[str]: """simple docstring""" return super().__call__(*_a , **_a)
317
"""simple docstring""" from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging lowerCAmelCase : List[str] = logging.get_logger(__name__) class __magic_name__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = ["audio_values", "audio_mask"] def __init__( self , _a=2_048 , _a=1 , _a=[16, 16] , _a=128 , _a=44_100 , _a=86 , _a=2_048 , _a=0.0 , **_a , ): """simple docstring""" super().__init__( feature_size=_a , sampling_rate=_a , padding_value=_a , **_a , ) lowerCamelCase = spectrogram_length lowerCamelCase = num_channels lowerCamelCase = patch_size lowerCamelCase = feature_size // self.patch_size[1] lowerCamelCase = n_fft lowerCamelCase = sampling_rate // hop_length_to_sampling_rate lowerCamelCase = sampling_rate lowerCamelCase = padding_value lowerCamelCase = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_a , min_frequency=0.0 , max_frequency=22_050.0 , sampling_rate=_a , norm="""slaney""" , mel_scale="""slaney""" , ).T def _lowerCAmelCase ( self , _a ): """simple docstring""" lowerCamelCase = spectrogram( _a , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=80.0 , ) lowerCamelCase = log_spec[:, :-1] lowerCamelCase = log_spec - 20.0 lowerCamelCase = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self , _a , _a = None , _a = True , _a = None , _a = False , _a = False , **_a , ): """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( """This feature extractor is set to support sampling rate""" f' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled' f' with {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) lowerCamelCase = isinstance(_a , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'Only mono-channel audio is supported for input to {self}' ) lowerCamelCase = is_batched_numpy or ( isinstance(_a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: lowerCamelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(_a , np.ndarray ): lowerCamelCase = np.asarray(_a , dtype=np.floataa ) elif isinstance(_a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): lowerCamelCase = raw_speech.astype(np.floataa ) # always return batch if not is_batched: lowerCamelCase = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis lowerCamelCase = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , _a ): lowerCamelCase = [np.asarray(_a , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask lowerCamelCase = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: lowerCamelCase = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] lowerCamelCase = np.array(_a ).astype(np.floataa ) # convert into correct format for padding lowerCamelCase = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch lowerCamelCase = np.ones([len(_a ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) lowerCamelCase = padded_audio_features * self.padding_value for i in range(len(_a ) ): lowerCamelCase = audio_features[i] lowerCamelCase = feature # return as BatchFeature if return_attention_mask: lowerCamelCase = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask} else: lowerCamelCase = {"""audio_values""": padded_audio_features} lowerCamelCase = BatchFeature(data=_a , tensor_type=_a ) return encoded_inputs
291
0
def _A ( lowercase , lowercase , lowercase ): """simple docstring""" return round(float(moles / volume ) * nfactor ) def _A ( lowercase , lowercase , lowercase ): """simple docstring""" return round(float((moles * 0.0821 * temperature) / (volume) ) ) def _A ( lowercase , lowercase , lowercase ): """simple docstring""" return round(float((moles * 0.0821 * temperature) / (pressure) ) ) def _A ( lowercase , lowercase , lowercase ): """simple docstring""" return round(float((pressure * volume) / (0.0821 * moles) ) ) if __name__ == "__main__": import doctest doctest.testmod()
360
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_rembert import RemBertTokenizer else: lowerCamelCase_ : Optional[int] = None lowerCamelCase_ : Union[str, Any] = logging.get_logger(__name__) lowerCamelCase_ : str = {"""vocab_file""": """sentencepiece.model""", """tokenizer_file""": """tokenizer.json"""} lowerCamelCase_ : int = { """vocab_file""": { """google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""", }, """tokenizer_file""": { """google/rembert""": """https://huggingface.co/google/rembert/resolve/main/tokenizer.json""", }, } lowerCamelCase_ : Optional[Any] = { """google/rembert""": 2_5_6, } lowerCamelCase_ : Optional[Any] = """▁""" class __A ( _SCREAMING_SNAKE_CASE ): """simple docstring""" __lowerCAmelCase = VOCAB_FILES_NAMES __lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCAmelCase = RemBertTokenizer def __init__( self , __A=None , __A=None , __A=True , __A=True , __A=False , __A="[CLS]" , __A="[SEP]" , __A="<unk>" , __A="[SEP]" , __A="<pad>" , __A="[CLS]" , __A="[MASK]" , **__A , ) -> Dict: # Mask token behave like a normal word, i.e. include the space before it a =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token super().__init__( __A , tokenizer_file=__A , do_lower_case=__A , remove_space=__A , keep_accents=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , **__A , ) a =do_lower_case a =remove_space a =keep_accents a =vocab_file a =False if not self.vocab_file else True def SCREAMING_SNAKE_CASE ( self , __A , __A = None ) -> List[int]: a =[self.sep_token_id] a =[self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def SCREAMING_SNAKE_CASE ( self , __A , __A = None , __A = False ) -> List[int]: if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(__A )) + [1] + ([0] * len(__A )) + [1] return [1] + ([0] * len(__A )) + [1] def SCREAMING_SNAKE_CASE ( self , __A , __A = None ) -> List[int]: a =[self.sep_token_id] a =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def SCREAMING_SNAKE_CASE ( self , __A , __A = None ) -> Tuple[str]: if not os.path.isdir(__A ): logger.error('''Vocabulary path ({}) should be a directory'''.format(__A ) ) return a =os.path.join( __A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ): copyfile(self.vocab_file , __A ) return (out_vocab_file,)
215
0
from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__) @add_end_docstrings(_a ) class _lowerCamelCase( _a ): def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Union[str, Any]: """simple docstring""" super().__init__(*lowerCamelCase, **lowerCamelCase) requires_backends(self, 'vision') self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING) def UpperCamelCase ( self, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None) -> List[Any]: """simple docstring""" _lowercase : Tuple = {} _lowercase : List[Any] = {} if prompt is not None: _lowercase : List[str] = prompt if generate_kwargs is not None: _lowercase : Optional[Any] = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: _lowercase : str = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( '\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,' ' please use only one') _lowercase : Union[str, Any] = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self, lowerCamelCase, **lowerCamelCase) -> Any: """simple docstring""" return super().__call__(lowerCamelCase, **lowerCamelCase) def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=None) -> List[Any]: """simple docstring""" _lowercase : Any = load_image(lowerCamelCase) if prompt is not None: if not isinstance(lowerCamelCase, lowerCamelCase): raise ValueError( F'''Received an invalid text input, got - {type(lowerCamelCase)} - but expected a single string. ''' 'Note also that one single text can be provided for conditional image to text generation.') _lowercase : int = self.model.config.model_type if model_type == "git": _lowercase : Optional[int] = self.image_processor(images=lowerCamelCase, return_tensors=self.framework) _lowercase : str = self.tokenizer(text=lowerCamelCase, add_special_tokens=lowerCamelCase).input_ids _lowercase : Optional[int] = [self.tokenizer.cls_token_id] + input_ids _lowercase : int = torch.tensor(lowerCamelCase).unsqueeze(0) model_inputs.update({'input_ids': input_ids}) elif model_type == "pix2struct": _lowercase : List[Any] = self.image_processor(images=lowerCamelCase, header_text=lowerCamelCase, return_tensors=self.framework) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation _lowercase : Optional[int] = self.image_processor(images=lowerCamelCase, return_tensors=self.framework) _lowercase : Optional[int] = self.tokenizer(lowerCamelCase, return_tensors=self.framework) model_inputs.update(lowerCamelCase) else: raise ValueError(F'''Model type {model_type} does not support conditional text generation''') else: _lowercase : int = self.image_processor(images=lowerCamelCase, return_tensors=self.framework) if self.model.config.model_type == "git" and prompt is None: _lowercase : Optional[Any] = None return model_inputs def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=None) -> Any: """simple docstring""" if ( "input_ids" in model_inputs and isinstance(model_inputs['input_ids'], lowerCamelCase) and all(x is None for x in model_inputs['input_ids']) ): _lowercase : List[Any] = None if generate_kwargs is None: _lowercase : int = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. _lowercase : Optional[int] = model_inputs.pop(self.model.main_input_name) _lowercase : Optional[Any] = self.model.generate(lowerCamelCase, **lowerCamelCase, **lowerCamelCase) return model_outputs def UpperCamelCase ( self, lowerCamelCase) -> Dict: """simple docstring""" _lowercase : Any = [] for output_ids in model_outputs: _lowercase : List[str] = { 'generated_text': self.tokenizer.decode( lowerCamelCase, skip_special_tokens=lowerCamelCase, ) } records.append(lowerCamelCase) return records
21
import gc import unittest from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline from transformers.pipelines import PipelineException from transformers.testing_utils import ( is_pipeline_test, is_torch_available, nested_simplify, require_tf, require_torch, require_torch_gpu, slow, ) from .test_pipelines_common import ANY @is_pipeline_test class a ( unittest.TestCase ): __lowerCAmelCase : Any = MODEL_FOR_MASKED_LM_MAPPING __lowerCAmelCase : Optional[Any] = TF_MODEL_FOR_MASKED_LM_MAPPING def __lowerCamelCase ( self :str ): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() if is_torch_available(): import torch torch.cuda.empty_cache() @require_tf def __lowerCamelCase ( self :Any ): snake_case__ : Optional[Any] = pipeline(task='''fill-mask''' ,model='''sshleifer/tiny-distilroberta-base''' ,top_k=2 ,framework='''tf''' ) snake_case__ : int = unmasker('''My name is <mask>''' ) self.assertEqual( nested_simplify(__lowercase ,decimals=6 ) ,[ {'''sequence''': '''My name is grouped''', '''score''': 2.1e-0_5, '''token''': 3_8_0_1_5, '''token_str''': ''' grouped'''}, {'''sequence''': '''My name is accuser''', '''score''': 2.1e-0_5, '''token''': 2_5_5_0_6, '''token_str''': ''' accuser'''}, ] ,) snake_case__ : int = unmasker('''The largest city in France is <mask>''' ) self.assertEqual( nested_simplify(__lowercase ,decimals=6 ) ,[ { '''sequence''': '''The largest city in France is grouped''', '''score''': 2.1e-0_5, '''token''': 3_8_0_1_5, '''token_str''': ''' grouped''', }, { '''sequence''': '''The largest city in France is accuser''', '''score''': 2.1e-0_5, '''token''': 2_5_5_0_6, '''token_str''': ''' accuser''', }, ] ,) snake_case__ : Optional[int] = unmasker('''My name is <mask>''' ,targets=[''' Patrick''', ''' Clara''', ''' Teven'''] ,top_k=3 ) self.assertEqual( nested_simplify(__lowercase ,decimals=6 ) ,[ {'''sequence''': '''My name is Clara''', '''score''': 2e-0_5, '''token''': 1_3_6_0_6, '''token_str''': ''' Clara'''}, {'''sequence''': '''My name is Patrick''', '''score''': 2e-0_5, '''token''': 3_4_9_9, '''token_str''': ''' Patrick'''}, {'''sequence''': '''My name is Te''', '''score''': 1.9e-0_5, '''token''': 2_9_4_1, '''token_str''': ''' Te'''}, ] ,) @require_torch def __lowerCamelCase ( self :Optional[int] ): snake_case__ : str = pipeline(task='''fill-mask''' ,model='''sshleifer/tiny-distilroberta-base''' ,top_k=2 ,framework='''pt''' ) snake_case__ : str = unmasker('''My name is <mask>''' ) self.assertEqual( nested_simplify(__lowercase ,decimals=6 ) ,[ {'''sequence''': '''My name is Maul''', '''score''': 2.2e-0_5, '''token''': 3_5_6_7_6, '''token_str''': ''' Maul'''}, {'''sequence''': '''My name isELS''', '''score''': 2.2e-0_5, '''token''': 1_6_4_1_6, '''token_str''': '''ELS'''}, ] ,) snake_case__ : List[str] = unmasker('''The largest city in France is <mask>''' ) self.assertEqual( nested_simplify(__lowercase ,decimals=6 ) ,[ { '''sequence''': '''The largest city in France is Maul''', '''score''': 2.2e-0_5, '''token''': 3_5_6_7_6, '''token_str''': ''' Maul''', }, {'''sequence''': '''The largest city in France isELS''', '''score''': 2.2e-0_5, '''token''': 1_6_4_1_6, '''token_str''': '''ELS'''}, ] ,) snake_case__ : Union[str, Any] = unmasker('''My name is <mask>''' ,targets=[''' Patrick''', ''' Clara''', ''' Teven'''] ,top_k=3 ) self.assertEqual( nested_simplify(__lowercase ,decimals=6 ) ,[ {'''sequence''': '''My name is Patrick''', '''score''': 2.1e-0_5, '''token''': 3_4_9_9, '''token_str''': ''' Patrick'''}, {'''sequence''': '''My name is Te''', '''score''': 2e-0_5, '''token''': 2_9_4_1, '''token_str''': ''' Te'''}, {'''sequence''': '''My name is Clara''', '''score''': 2e-0_5, '''token''': 1_3_6_0_6, '''token_str''': ''' Clara'''}, ] ,) snake_case__ : Optional[int] = unmasker('''My name is <mask> <mask>''' ,top_k=2 ) self.assertEqual( nested_simplify(__lowercase ,decimals=6 ) ,[ [ { '''score''': 2.2e-0_5, '''token''': 3_5_6_7_6, '''token_str''': ''' Maul''', '''sequence''': '''<s>My name is Maul<mask></s>''', }, {'''score''': 2.2e-0_5, '''token''': 1_6_4_1_6, '''token_str''': '''ELS''', '''sequence''': '''<s>My name isELS<mask></s>'''}, ], [ { '''score''': 2.2e-0_5, '''token''': 3_5_6_7_6, '''token_str''': ''' Maul''', '''sequence''': '''<s>My name is<mask> Maul</s>''', }, {'''score''': 2.2e-0_5, '''token''': 1_6_4_1_6, '''token_str''': '''ELS''', '''sequence''': '''<s>My name is<mask>ELS</s>'''}, ], ] ,) @require_torch_gpu def __lowerCamelCase ( self :int ): snake_case__ : Optional[int] = pipeline('''fill-mask''' ,model='''hf-internal-testing/tiny-random-distilbert''' ,device=0 ,framework='''pt''' ) # convert model to fp16 pipe.model.half() snake_case__ : List[str] = pipe('''Paris is the [MASK] of France.''' ) # We actually don't care about the result, we just want to make sure # it works, meaning the float16 tensor got casted back to float32 # for postprocessing. self.assertIsInstance(__lowercase ,__lowercase ) @slow @require_torch def __lowerCamelCase ( self :str ): snake_case__ : List[str] = pipeline(task='''fill-mask''' ,model='''distilroberta-base''' ,top_k=2 ,framework='''pt''' ) self.run_large_test(__lowercase ) @slow @require_tf def __lowerCamelCase ( self :Any ): snake_case__ : Optional[Any] = pipeline(task='''fill-mask''' ,model='''distilroberta-base''' ,top_k=2 ,framework='''tf''' ) self.run_large_test(__lowercase ) def __lowerCamelCase ( self :Optional[Any] ,__lowercase :List[Any] ): snake_case__ : Optional[Any] = unmasker('''My name is <mask>''' ) self.assertEqual( nested_simplify(__lowercase ) ,[ {'''sequence''': '''My name is John''', '''score''': 0.008, '''token''': 6_1_0, '''token_str''': ''' John'''}, {'''sequence''': '''My name is Chris''', '''score''': 0.007, '''token''': 1_5_7_3, '''token_str''': ''' Chris'''}, ] ,) snake_case__ : str = unmasker('''The largest city in France is <mask>''' ) self.assertEqual( nested_simplify(__lowercase ) ,[ { '''sequence''': '''The largest city in France is Paris''', '''score''': 0.251, '''token''': 2_2_0_1, '''token_str''': ''' Paris''', }, { '''sequence''': '''The largest city in France is Lyon''', '''score''': 0.214, '''token''': 1_2_7_9_0, '''token_str''': ''' Lyon''', }, ] ,) snake_case__ : Dict = unmasker('''My name is <mask>''' ,targets=[''' Patrick''', ''' Clara''', ''' Teven'''] ,top_k=3 ) self.assertEqual( nested_simplify(__lowercase ) ,[ {'''sequence''': '''My name is Patrick''', '''score''': 0.005, '''token''': 3_4_9_9, '''token_str''': ''' Patrick'''}, {'''sequence''': '''My name is Clara''', '''score''': 0.000, '''token''': 1_3_6_0_6, '''token_str''': ''' Clara'''}, {'''sequence''': '''My name is Te''', '''score''': 0.000, '''token''': 2_9_4_1, '''token_str''': ''' Te'''}, ] ,) @require_torch def __lowerCamelCase ( self :List[str] ): snake_case__ : List[Any] = pipeline(task='''fill-mask''' ,model='''sshleifer/tiny-distilroberta-base''' ,framework='''pt''' ) snake_case__ : str = None snake_case__ : int = None self.run_pipeline_test(__lowercase ,[] ) @require_tf def __lowerCamelCase ( self :int ): snake_case__ : Optional[int] = pipeline(task='''fill-mask''' ,model='''sshleifer/tiny-distilroberta-base''' ,framework='''tf''' ) snake_case__ : int = None snake_case__ : List[str] = None self.run_pipeline_test(__lowercase ,[] ) def __lowerCamelCase ( self :Any ,__lowercase :Any ,__lowercase :str ,__lowercase :Union[str, Any] ): if tokenizer is None or tokenizer.mask_token_id is None: self.skipTest('''The provided tokenizer has no mask token, (probably reformer or wav2vec2)''' ) snake_case__ : Optional[int] = FillMaskPipeline(model=__lowercase ,tokenizer=__lowercase ) snake_case__ : List[str] = [ F"""This is another {tokenizer.mask_token} test""", ] return fill_masker, examples def __lowerCamelCase ( self :Optional[Any] ,__lowercase :List[Any] ,__lowercase :Optional[Any] ): snake_case__ : List[str] = fill_masker.tokenizer snake_case__ : List[Any] = fill_masker.model snake_case__ : Dict = fill_masker( F"""This is a {tokenizer.mask_token}""" ,) self.assertEqual( __lowercase ,[ {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, ] ,) snake_case__ : Tuple = fill_masker([F"""This is a {tokenizer.mask_token}"""] ) self.assertEqual( __lowercase ,[ {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, ] ,) snake_case__ : List[str] = fill_masker([F"""This is a {tokenizer.mask_token}""", F"""Another {tokenizer.mask_token} great test."""] ) self.assertEqual( __lowercase ,[ [ {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, ], [ {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, ], ] ,) with self.assertRaises(__lowercase ): fill_masker([None] ) # No mask_token is not supported with self.assertRaises(__lowercase ): fill_masker('''This is''' ) self.run_test_top_k(__lowercase ,__lowercase ) self.run_test_targets(__lowercase ,__lowercase ) self.run_test_top_k_targets(__lowercase ,__lowercase ) self.fill_mask_with_duplicate_targets_and_top_k(__lowercase ,__lowercase ) self.fill_mask_with_multiple_masks(__lowercase ,__lowercase ) def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :Optional[int] ,__lowercase :int ): snake_case__ : int = tokenizer.get_vocab() snake_case__ : Dict = sorted(vocab.keys() )[:2] # Pipeline argument snake_case__ : List[Any] = FillMaskPipeline(model=__lowercase ,tokenizer=__lowercase ,targets=__lowercase ) snake_case__ : str = fill_masker(F"""This is a {tokenizer.mask_token}""" ) self.assertEqual( __lowercase ,[ {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, ] ,) snake_case__ : Optional[Any] = {vocab[el] for el in targets} self.assertEqual({el['''token'''] for el in outputs} ,__lowercase ) snake_case__ : Any = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el['''token_str'''] for el in outputs} ,set(__lowercase ) ) # Call argument snake_case__ : str = FillMaskPipeline(model=__lowercase ,tokenizer=__lowercase ) snake_case__ : int = fill_masker(F"""This is a {tokenizer.mask_token}""" ,targets=__lowercase ) self.assertEqual( __lowercase ,[ {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, ] ,) snake_case__ : str = {vocab[el] for el in targets} self.assertEqual({el['''token'''] for el in outputs} ,__lowercase ) snake_case__ : Optional[Any] = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el['''token_str'''] for el in outputs} ,set(__lowercase ) ) # Score equivalence snake_case__ : Dict = fill_masker(F"""This is a {tokenizer.mask_token}""" ,targets=__lowercase ) snake_case__ : Union[str, Any] = [top_mask['''token_str'''] for top_mask in outputs] snake_case__ : Tuple = [top_mask['''score'''] for top_mask in outputs] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(__lowercase ) == set(__lowercase ): snake_case__ : List[Any] = fill_masker(F"""This is a {tokenizer.mask_token}""" ,targets=__lowercase ) snake_case__ : int = [top_mask['''score'''] for top_mask in unmasked_targets] self.assertEqual(nested_simplify(__lowercase ) ,nested_simplify(__lowercase ) ) # Raises with invalid with self.assertRaises(__lowercase ): snake_case__ : List[str] = fill_masker(F"""This is a {tokenizer.mask_token}""" ,targets=[] ) # For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised if "" not in tokenizer.get_vocab(): with self.assertRaises(__lowercase ): snake_case__ : List[Any] = fill_masker(F"""This is a {tokenizer.mask_token}""" ,targets=[''''''] ) with self.assertRaises(__lowercase ): snake_case__ : Optional[int] = fill_masker(F"""This is a {tokenizer.mask_token}""" ,targets='''''' ) def __lowerCamelCase ( self :Any ,__lowercase :Union[str, Any] ,__lowercase :Dict ): snake_case__ : int = FillMaskPipeline(model=__lowercase ,tokenizer=__lowercase ,top_k=2 ) snake_case__ : Tuple = fill_masker(F"""This is a {tokenizer.mask_token}""" ) self.assertEqual( __lowercase ,[ {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, ] ,) snake_case__ : Any = FillMaskPipeline(model=__lowercase ,tokenizer=__lowercase ) snake_case__ : Optional[Any] = fill_masker(F"""This is a {tokenizer.mask_token}""" ,top_k=2 ) self.assertEqual( __lowercase ,[ {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, ] ,) self.assertEqual(nested_simplify(__lowercase ) ,nested_simplify(__lowercase ) ) def __lowerCamelCase ( self :List[Any] ,__lowercase :Tuple ,__lowercase :str ): snake_case__ : Optional[int] = tokenizer.get_vocab() snake_case__ : int = FillMaskPipeline(model=__lowercase ,tokenizer=__lowercase ) # top_k=2, ntargets=3 snake_case__ : int = sorted(vocab.keys() )[:3] snake_case__ : List[Any] = fill_masker(F"""This is a {tokenizer.mask_token}""" ,top_k=2 ,targets=__lowercase ) # If we use the most probably targets, and filter differently, we should still # have the same results snake_case__ : Dict = [el['''token_str'''] for el in sorted(__lowercase ,key=lambda __lowercase : x["score"] ,reverse=__lowercase )] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(__lowercase ).issubset(__lowercase ): snake_case__ : List[Any] = fill_masker(F"""This is a {tokenizer.mask_token}""" ,top_k=3 ,targets=__lowercase ) # They should yield exactly the same result self.assertEqual(nested_simplify(__lowercase ) ,nested_simplify(__lowercase ) ) def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :Dict ,__lowercase :Dict ): snake_case__ : Union[str, Any] = FillMaskPipeline(model=__lowercase ,tokenizer=__lowercase ) snake_case__ : str = tokenizer.get_vocab() # String duplicates + id duplicates snake_case__ : int = sorted(vocab.keys() )[:3] snake_case__ : Optional[Any] = [targets[0], targets[1], targets[0], targets[2], targets[1]] snake_case__ : Optional[Any] = fill_masker(F"""My name is {tokenizer.mask_token}""" ,targets=__lowercase ,top_k=1_0 ) # The target list contains duplicates, so we can't output more # than them self.assertEqual(len(__lowercase ) ,3 ) def __lowerCamelCase ( self :Optional[Any] ,__lowercase :List[Any] ,__lowercase :Optional[Any] ): snake_case__ : Any = FillMaskPipeline(model=__lowercase ,tokenizer=__lowercase ) snake_case__ : Tuple = fill_masker( F"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" ,top_k=2 ) self.assertEqual( __lowercase ,[ [ {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, ], [ {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, ], [ {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, {'''sequence''': ANY(__lowercase ), '''score''': ANY(__lowercase ), '''token''': ANY(__lowercase ), '''token_str''': ANY(__lowercase )}, ], ] ,)
230
0
"""simple docstring""" import argparse import logging import pickle from collections import Counter logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO ) __A : Dict = logging.getLogger(__name__) if __name__ == "__main__": __A : str = argparse.ArgumentParser( description="Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)" ) parser.add_argument( "--data_file", type=str, default="data/dump.bert-base-uncased.pickle", help="The binarized dataset." ) parser.add_argument( "--token_counts_dump", type=str, default="data/token_counts.bert-base-uncased.pickle", help="The dump file." ) parser.add_argument("--vocab_size", default=30522, type=int) __A : Dict = parser.parse_args() logger.info(f'''Loading data from {args.data_file}''') with open(args.data_file, "rb") as fp: __A : List[str] = pickle.load(fp) logger.info("Counting occurrences for MLM.") __A : Dict = Counter() for tk_ids in data: counter.update(tk_ids) __A : Any = [0] * args.vocab_size for k, v in counter.items(): __A : Optional[Any] = v logger.info(f'''Dump to {args.token_counts_dump}''') with open(args.token_counts_dump, "wb") as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
366
"""simple docstring""" import math def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int = 0 , _SCREAMING_SNAKE_CASE : int = 0 ): '''simple docstring''' _UpperCAmelCase = end or len(_SCREAMING_SNAKE_CASE ) for i in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = i _UpperCAmelCase = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: _UpperCAmelCase = array[temp_index - 1] temp_index -= 1 _UpperCAmelCase = temp_index_value return array def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): # Max Heap '''simple docstring''' _UpperCAmelCase = index _UpperCAmelCase = 2 * index + 1 # Left Node _UpperCAmelCase = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: _UpperCAmelCase = left_index if right_index < heap_size and array[largest] < array[right_index]: _UpperCAmelCase = right_index if largest != index: _UpperCAmelCase , _UpperCAmelCase = array[largest], array[index] heapify(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def lowercase ( _SCREAMING_SNAKE_CASE : list ): '''simple docstring''' _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) for i in range(n // 2 , -1 , -1 ): heapify(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for i in range(n - 1 , 0 , -1 ): _UpperCAmelCase , _UpperCAmelCase = array[0], array[i] heapify(_SCREAMING_SNAKE_CASE , 0 , _SCREAMING_SNAKE_CASE ) return array def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' _UpperCAmelCase = low _UpperCAmelCase = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i _UpperCAmelCase , _UpperCAmelCase = array[j], array[i] i += 1 def lowercase ( _SCREAMING_SNAKE_CASE : list ): '''simple docstring''' if len(_SCREAMING_SNAKE_CASE ) == 0: return array _UpperCAmelCase = 2 * math.ceil(math.loga(len(_SCREAMING_SNAKE_CASE ) ) ) _UpperCAmelCase = 16 return intro_sort(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' while end - start > size_threshold: if max_depth == 0: return heap_sort(_SCREAMING_SNAKE_CASE ) max_depth -= 1 _UpperCAmelCase = median_of_a(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , start + ((end - start) // 2) + 1 , end - 1 ) _UpperCAmelCase = partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) intro_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = p return insertion_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod() __A : List[str] = input("Enter numbers separated by a comma : ").strip() __A : Optional[Any] = [float(item) for item in user_input.split(",")] print(sort(unsorted))
326
0
import argparse import collections import os import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_table.py SCREAMING_SNAKE_CASE : int = "src/transformers" SCREAMING_SNAKE_CASE : Any = "docs/source/en" SCREAMING_SNAKE_CASE : str = "." def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]: with open(lowerCamelCase_ , 'r' , encoding='utf-8' , newline='\n' ) as f: _lowercase : Dict = f.readlines() # Find the start prompt. _lowercase : int = 0 while not lines[start_index].startswith(lowerCamelCase_ ): start_index += 1 start_index += 1 _lowercase : Any = start_index while not lines[end_index].startswith(lowerCamelCase_ ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # Add here suffixes that are used to identify models, separated by | SCREAMING_SNAKE_CASE : Dict = "Model|Encoder|Decoder|ForConditionalGeneration" # Regexes that match TF/Flax/PT model names. SCREAMING_SNAKE_CASE : Dict = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") SCREAMING_SNAKE_CASE : int = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. SCREAMING_SNAKE_CASE : List[Any] = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # This is to make sure the transformers module imported is the one in the repo. SCREAMING_SNAKE_CASE : str = direct_transformers_import(TRANSFORMERS_PATH) def UpperCamelCase_( lowerCamelCase_ ) -> Optional[Any]: _lowercase : List[Any] = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' , lowerCamelCase_ ) return [m.group(0 ) for m in matches] def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]: _lowercase : Optional[int] = 2 if text == '✅' or text == '❌' else len(lowerCamelCase_ ) _lowercase : Union[str, Any] = (width - text_length) // 2 _lowercase : str = width - text_length - left_indent return " " * left_indent + text + " " * right_indent def UpperCamelCase_( ) -> List[Any]: _lowercase : Any = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES _lowercase : str = { name: config_maping_names[code] for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if code in config_maping_names } _lowercase : str = {name: config.replace('Config' , '' ) for name, config in model_name_to_config.items()} # Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax. _lowercase : Optional[Any] = collections.defaultdict(lowerCamelCase_ ) _lowercase : Tuple = collections.defaultdict(lowerCamelCase_ ) _lowercase : int = collections.defaultdict(lowerCamelCase_ ) _lowercase : str = collections.defaultdict(lowerCamelCase_ ) _lowercase : str = collections.defaultdict(lowerCamelCase_ ) # Let's lookup through all transformers object (once). for attr_name in dir(lowerCamelCase_ ): _lowercase : Optional[int] = None if attr_name.endswith('Tokenizer' ): _lowercase : str = slow_tokenizers _lowercase : Any = attr_name[:-9] elif attr_name.endswith('TokenizerFast' ): _lowercase : Union[str, Any] = fast_tokenizers _lowercase : str = attr_name[:-13] elif _re_tf_models.match(lowerCamelCase_ ) is not None: _lowercase : str = tf_models _lowercase : Dict = _re_tf_models.match(lowerCamelCase_ ).groups()[0] elif _re_flax_models.match(lowerCamelCase_ ) is not None: _lowercase : Union[str, Any] = flax_models _lowercase : List[Any] = _re_flax_models.match(lowerCamelCase_ ).groups()[0] elif _re_pt_models.match(lowerCamelCase_ ) is not None: _lowercase : Any = pt_models _lowercase : Dict = _re_pt_models.match(lowerCamelCase_ ).groups()[0] if lookup_dict is not None: while len(lowerCamelCase_ ) > 0: if attr_name in model_name_to_prefix.values(): _lowercase : Dict = True break # Try again after removing the last word in the name _lowercase : List[str] = ''.join(camel_case_split(lowerCamelCase_ )[:-1] ) # Let's build that table! _lowercase : Optional[Any] = list(model_name_to_config.keys() ) model_names.sort(key=str.lower ) _lowercase : Tuple = ['Model', 'Tokenizer slow', 'Tokenizer fast', 'PyTorch support', 'TensorFlow support', 'Flax Support'] # We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side). _lowercase : List[Any] = [len(lowerCamelCase_ ) + 2 for c in columns] _lowercase : str = max([len(lowerCamelCase_ ) for name in model_names] ) + 2 # Build the table per se _lowercase : int = '|' + '|'.join([_center_text(lowerCamelCase_ , lowerCamelCase_ ) for c, w in zip(lowerCamelCase_ , lowerCamelCase_ )] ) + '|\n' # Use ":-----:" format to center-aligned table cell texts table += "|" + "|".join([':' + '-' * (w - 2) + ':' for w in widths] ) + "|\n" _lowercase : List[str] = {True: '✅', False: '❌'} for name in model_names: _lowercase : List[str] = model_name_to_prefix[name] _lowercase : Dict = [ name, check[slow_tokenizers[prefix]], check[fast_tokenizers[prefix]], check[pt_models[prefix]], check[tf_models[prefix]], check[flax_models[prefix]], ] table += "|" + "|".join([_center_text(lowerCamelCase_ , lowerCamelCase_ ) for l, w in zip(lowerCamelCase_ , lowerCamelCase_ )] ) + "|\n" return table def UpperCamelCase_( lowerCamelCase_=False ) -> List[Any]: _lowercase , _lowercase , _lowercase , _lowercase : Dict = _find_text_in_file( filename=os.path.join(lowerCamelCase_ , 'index.md' ) , start_prompt='<!--This table is updated automatically from the auto modules' , end_prompt='<!-- End table-->' , ) _lowercase : Union[str, Any] = get_model_table_from_auto_modules() if current_table != new_table: if overwrite: with open(os.path.join(lowerCamelCase_ , 'index.md' ) , 'w' , encoding='utf-8' , newline='\n' ) as f: f.writelines(lines[:start_index] + [new_table] + lines[end_index:] ) else: raise ValueError( 'The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") SCREAMING_SNAKE_CASE : Tuple = parser.parse_args() check_model_table(args.fix_and_overwrite)
21
'''simple docstring''' import inspect from typing import Optional, Union import numpy as np import PIL import torch from torch.nn import functional as F from torchvision import transforms from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.utils import ( PIL_INTERPOLATION, randn_tensor, ) def lowerCamelCase__ ( _A , _A , _A ): if isinstance(_A , torch.Tensor ): return image elif isinstance(_A , PIL.Image.Image ): a : Any = [image] if isinstance(image[0] , PIL.Image.Image ): a : List[str] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image] a : int = np.concatenate(_A , axis=0 ) a : int = np.array(_A ).astype(np.floataa ) / 255.0 a : str = image.transpose(0 , 3 , 1 , 2 ) a : str = 2.0 * image - 1.0 a : Optional[int] = torch.from_numpy(_A ) elif isinstance(image[0] , torch.Tensor ): a : Optional[Any] = torch.cat(_A , dim=0 ) return image def lowerCamelCase__ ( _A , _A , _A , _A=0.9995 ): if not isinstance(_A , np.ndarray ): a : Dict = True a : Optional[Any] = va.device a : Optional[int] = va.cpu().numpy() a : Union[str, Any] = va.cpu().numpy() a : Any = np.sum(va * va / (np.linalg.norm(_A ) * np.linalg.norm(_A )) ) if np.abs(_A ) > DOT_THRESHOLD: a : Any = (1 - t) * va + t * va else: a : Any = np.arccos(_A ) a : Tuple = np.sin(_A ) a : Optional[Any] = theta_a * t a : List[Any] = np.sin(_A ) a : Dict = np.sin(theta_a - theta_t ) / sin_theta_a a : int = sin_theta_t / sin_theta_a a : Any = sa * va + sa * va if inputs_are_torch: a : Dict = torch.from_numpy(_A ).to(_A ) return va def lowerCamelCase__ ( _A , _A ): a : Optional[int] = F.normalize(_A , dim=-1 ) a : str = F.normalize(_A , dim=-1 ) return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 ) def lowerCamelCase__ ( _A , _A ): for param in model.parameters(): a : int = value class a__( lowerCamelCase__ ): def __init__( self : str , __snake_case : AutoencoderKL , __snake_case : CLIPTextModel , __snake_case : CLIPModel , __snake_case : CLIPTokenizer , __snake_case : UNetaDConditionModel , __snake_case : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , __snake_case : CLIPFeatureExtractor , __snake_case : List[str]=None , __snake_case : List[str]=None , __snake_case : List[Any]=None , ): super().__init__() self.register_modules( vae=__snake_case , text_encoder=__snake_case , clip_model=__snake_case , tokenizer=__snake_case , unet=__snake_case , scheduler=__snake_case , feature_extractor=__snake_case , coca_model=__snake_case , coca_tokenizer=__snake_case , coca_transform=__snake_case , ) a : Optional[Any] = ( feature_extractor.size if isinstance(feature_extractor.size , __snake_case ) else feature_extractor.size['shortest_edge'] ) a : Optional[int] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std ) set_requires_grad(self.text_encoder , __snake_case ) set_requires_grad(self.clip_model , __snake_case ) def lowercase_ ( self : int , __snake_case : Optional[Union[str, int]] = "auto" ): if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory a : Union[str, Any] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(__snake_case ) def lowercase_ ( self : Union[str, Any] ): self.enable_attention_slicing(__snake_case ) def lowercase_ ( self : Optional[Any] ): set_requires_grad(self.vae , __snake_case ) def lowercase_ ( self : Tuple ): set_requires_grad(self.vae , __snake_case ) def lowercase_ ( self : int ): set_requires_grad(self.unet , __snake_case ) def lowercase_ ( self : Union[str, Any] ): set_requires_grad(self.unet , __snake_case ) def lowercase_ ( self : int , __snake_case : Dict , __snake_case : str , __snake_case : Optional[int] ): # get the original timestep using init_timestep a : Optional[Any] = min(int(num_inference_steps * strength ) , __snake_case ) a : Union[str, Any] = max(num_inference_steps - init_timestep , 0 ) a : List[Any] = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def lowercase_ ( self : Dict , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : Optional[Any]=None ): if not isinstance(__snake_case , torch.Tensor ): raise ValueError(F"""`image` has to be of type `torch.Tensor` but is {type(__snake_case )}""" ) a : Optional[Any] = image.to(device=__snake_case , dtype=__snake_case ) if isinstance(__snake_case , __snake_case ): a : Optional[int] = [ self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__snake_case ) ] a : Optional[Any] = torch.cat(__snake_case , dim=0 ) else: a : Union[str, Any] = self.vae.encode(__snake_case ).latent_dist.sample(__snake_case ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor a : List[str] = 0.18215 * init_latents a : str = init_latents.repeat_interleave(__snake_case , dim=0 ) a : Dict = randn_tensor(init_latents.shape , generator=__snake_case , device=__snake_case , dtype=__snake_case ) # get latents a : Dict = self.scheduler.add_noise(__snake_case , __snake_case , __snake_case ) a : int = init_latents return latents def lowercase_ ( self : List[str] , __snake_case : Dict ): a : List[Any] = self.coca_transform(__snake_case ).unsqueeze(0 ) with torch.no_grad(), torch.cuda.amp.autocast(): a : Optional[Any] = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) ) a : Union[str, Any] = self.coca_tokenizer.decode(generated[0].cpu().numpy() ) return generated.split('<end_of_text>' )[0].replace('<start_of_text>' , '' ).rstrip(' .,' ) def lowercase_ ( self : Tuple , __snake_case : Any , __snake_case : Optional[Any] ): a : List[Any] = self.feature_extractor.preprocess(__snake_case ) a : Optional[Any] = torch.from_numpy(clip_image_input['pixel_values'][0] ).unsqueeze(0 ).to(self.device ).half() a : int = self.clip_model.get_image_features(__snake_case ) a : str = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__snake_case ) a : Tuple = image_embeddings_clip.repeat_interleave(__snake_case , dim=0 ) return image_embeddings_clip @torch.enable_grad() def lowercase_ ( self : Tuple , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : List[Any] , ): a : Optional[Any] = latents.detach().requires_grad_() a : List[Any] = self.scheduler.scale_model_input(__snake_case , __snake_case ) # predict the noise residual a : Any = self.unet(__snake_case , __snake_case , encoder_hidden_states=__snake_case ).sample if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ): a : int = self.scheduler.alphas_cumprod[timestep] a : Any = 1 - alpha_prod_t # compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf a : List[str] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5 a : Tuple = torch.sqrt(__snake_case ) a : str = pred_original_sample * (fac) + latents * (1 - fac) elif isinstance(self.scheduler , __snake_case ): a : List[Any] = self.scheduler.sigmas[index] a : Optional[int] = latents - sigma * noise_pred else: raise ValueError(F"""scheduler type {type(self.scheduler )} not supported""" ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor a : Union[str, Any] = 1 / 0.18215 * sample a : str = self.vae.decode(__snake_case ).sample a : List[Any] = (image / 2 + 0.5).clamp(0 , 1 ) a : Tuple = transforms.Resize(self.feature_extractor_size )(__snake_case ) a : List[str] = self.normalize(__snake_case ).to(latents.dtype ) a : List[str] = self.clip_model.get_image_features(__snake_case ) a : Tuple = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__snake_case ) a : int = spherical_dist_loss(__snake_case , __snake_case ).mean() * clip_guidance_scale a : List[str] = -torch.autograd.grad(__snake_case , __snake_case )[0] if isinstance(self.scheduler , __snake_case ): a : List[Any] = latents.detach() + grads * (sigma**2) a : Optional[int] = noise_pred_original else: a : List[Any] = noise_pred_original - torch.sqrt(__snake_case ) * grads return noise_pred, latents @torch.no_grad() def __call__( self : Optional[int] , __snake_case : Union[torch.FloatTensor, PIL.Image.Image] , __snake_case : Union[torch.FloatTensor, PIL.Image.Image] , __snake_case : Optional[str] = None , __snake_case : Optional[str] = None , __snake_case : Optional[int] = 5_12 , __snake_case : Optional[int] = 5_12 , __snake_case : float = 0.6 , __snake_case : Optional[int] = 50 , __snake_case : Optional[float] = 7.5 , __snake_case : Optional[int] = 1 , __snake_case : float = 0.0 , __snake_case : Optional[float] = 1_00 , __snake_case : Optional[torch.Generator] = None , __snake_case : Optional[str] = "pil" , __snake_case : bool = True , __snake_case : float = 0.8 , __snake_case : float = 0.1 , __snake_case : float = 0.1 , ): if isinstance(__snake_case , __snake_case ) and len(__snake_case ) != batch_size: raise ValueError(F"""You have passed {batch_size} batch_size, but only {len(__snake_case )} generators.""" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" ) if isinstance(__snake_case , torch.Generator ) and batch_size > 1: a : Dict = [generator] + [None] * (batch_size - 1) a : Any = [ ('model', self.coca_model is None), ('tokenizer', self.coca_tokenizer is None), ('transform', self.coca_transform is None), ] a : List[str] = [x[0] for x in coca_is_none if x[1]] a : List[str] = ', '.join(__snake_case ) # generate prompts with coca model if prompt is None if content_prompt is None: if len(__snake_case ): raise ValueError( F"""Content prompt is None and CoCa [{coca_is_none_str}] is None.""" F"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" ) a : int = self.get_image_description(__snake_case ) if style_prompt is None: if len(__snake_case ): raise ValueError( F"""Style prompt is None and CoCa [{coca_is_none_str}] is None.""" F""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" ) a : Union[str, Any] = self.get_image_description(__snake_case ) # get prompt text embeddings for content and style a : Optional[Any] = self.tokenizer( __snake_case , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=__snake_case , return_tensors='pt' , ) a : Dict = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0] a : Dict = self.tokenizer( __snake_case , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=__snake_case , return_tensors='pt' , ) a : Dict = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0] a : Any = slerp(__snake_case , __snake_case , __snake_case ) # duplicate text embeddings for each generation per prompt a : Optional[Any] = text_embeddings.repeat_interleave(__snake_case , dim=0 ) # set timesteps a : int = 'offset' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() ) a : Any = {} if accepts_offset: a : Optional[Any] = 1 self.scheduler.set_timesteps(__snake_case , **__snake_case ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand self.scheduler.timesteps.to(self.device ) a , a : Tuple = self.get_timesteps(__snake_case , __snake_case , self.device ) a : Optional[int] = timesteps[:1].repeat(__snake_case ) # Preprocess image a : Optional[Any] = preprocess(__snake_case , __snake_case , __snake_case ) a : List[Any] = self.prepare_latents( __snake_case , __snake_case , __snake_case , text_embeddings.dtype , self.device , __snake_case ) a : str = preprocess(__snake_case , __snake_case , __snake_case ) a : Union[str, Any] = self.prepare_latents( __snake_case , __snake_case , __snake_case , text_embeddings.dtype , self.device , __snake_case ) a : Union[str, Any] = slerp(__snake_case , __snake_case , __snake_case ) if clip_guidance_scale > 0: a : Dict = self.get_clip_image_embeddings(__snake_case , __snake_case ) a : int = self.get_clip_image_embeddings(__snake_case , __snake_case ) a : List[str] = slerp( __snake_case , __snake_case , __snake_case ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. a : int = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: a : Any = content_text_input.input_ids.shape[-1] a : List[Any] = self.tokenizer([''] , padding='max_length' , max_length=__snake_case , return_tensors='pt' ) a : List[str] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt a : Dict = uncond_embeddings.repeat_interleave(__snake_case , dim=0 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes a : Any = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. a : List[str] = (batch_size, self.unet.config.in_channels, height // 8, width // 8) a : List[str] = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not work reproducibly on mps a : int = torch.randn(__snake_case , generator=__snake_case , device='cpu' , dtype=__snake_case ).to( self.device ) else: a : Optional[int] = torch.randn(__snake_case , generator=__snake_case , device=self.device , dtype=__snake_case ) else: if latents.shape != latents_shape: raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" ) a : List[str] = latents.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler a : Any = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] a : Optional[Any] = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) a : Union[str, Any] = {} if accepts_eta: a : List[str] = eta # check if the scheduler accepts generator a : List[Any] = 'generator' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) if accepts_generator: a : Any = generator with self.progress_bar(total=__snake_case ): for i, t in enumerate(__snake_case ): # expand the latents if we are doing classifier free guidance a : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents a : Dict = self.scheduler.scale_model_input(__snake_case , __snake_case ) # predict the noise residual a : List[Any] = self.unet(__snake_case , __snake_case , encoder_hidden_states=__snake_case ).sample # perform classifier free guidance if do_classifier_free_guidance: a , a : List[str] = noise_pred.chunk(2 ) a : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # perform clip guidance if clip_guidance_scale > 0: a : Optional[Any] = ( text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings ) a , a : Union[str, Any] = self.cond_fn( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) # compute the previous noisy sample x_t -> x_t-1 a : Any = self.scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor a : Tuple = 1 / 0.18215 * latents a : Optional[int] = self.vae.decode(__snake_case ).sample a : List[str] = (image / 2 + 0.5).clamp(0 , 1 ) a : Any = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": a : str = self.numpy_to_pil(__snake_case ) if not return_dict: return (image, None) return StableDiffusionPipelineOutput(images=__snake_case , nsfw_content_detected=__snake_case )
297
0
"""simple docstring""" import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1_0 ) -> int: __lowerCAmelCase: Optional[Any] = [] for _ in range(__SCREAMING_SNAKE_CASE ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1_0 ) -> Optional[int]: __lowerCAmelCase: List[Any] = [] for step in range(__SCREAMING_SNAKE_CASE ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: __lowerCAmelCase: str = os.path.join(__SCREAMING_SNAKE_CASE , "schedule.bin" ) torch.save(scheduler.state_dict() , __SCREAMING_SNAKE_CASE ) __lowerCAmelCase: Optional[int] = torch.load(__SCREAMING_SNAKE_CASE ) scheduler.load_state_dict(__SCREAMING_SNAKE_CASE ) return lrs @require_torch class snake_case ( unittest.TestCase ): def lowercase_ ( self : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any])-> Dict: '''simple docstring''' self.assertEqual(len(UpperCamelCase__) , len(UpperCamelCase__)) for a, b in zip(UpperCamelCase__ , UpperCamelCase__): self.assertAlmostEqual(UpperCamelCase__ , UpperCamelCase__ , delta=UpperCamelCase__) def lowercase_ ( self : List[str])-> Tuple: '''simple docstring''' __lowerCAmelCase: int = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase__) __lowerCAmelCase: Union[str, Any] = torch.tensor([0.4, 0.2, -0.5]) __lowerCAmelCase: str = nn.MSELoss() # No warmup, constant schedule, no gradient clipping __lowerCAmelCase: Optional[int] = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0) for _ in range(1_0_0): __lowerCAmelCase: Optional[Any] = criterion(UpperCamelCase__ , UpperCamelCase__) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2) def lowercase_ ( self : str)-> Optional[Any]: '''simple docstring''' __lowerCAmelCase: Optional[Any] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase__) __lowerCAmelCase: Tuple = torch.tensor([0.4, 0.2, -0.5]) __lowerCAmelCase: str = nn.MSELoss() # No warmup, constant schedule, no gradient clipping __lowerCAmelCase: Any = Adafactor( params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCamelCase__ , weight_decay=0.0 , relative_step=UpperCamelCase__ , scale_parameter=UpperCamelCase__ , warmup_init=UpperCamelCase__ , ) for _ in range(1_0_0_0): __lowerCAmelCase: Optional[int] = criterion(UpperCamelCase__ , UpperCamelCase__) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2) @require_torch class snake_case ( unittest.TestCase ): SCREAMING_SNAKE_CASE_ : List[str] = nn.Linear(50, 50 ) if is_torch_available() else None SCREAMING_SNAKE_CASE_ : Tuple = AdamW(m.parameters(), lr=10.0 ) if is_torch_available() else None SCREAMING_SNAKE_CASE_ : Dict = 10 def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int]=None)-> Dict: '''simple docstring''' self.assertEqual(len(UpperCamelCase__) , len(UpperCamelCase__)) for a, b in zip(UpperCamelCase__ , UpperCamelCase__): self.assertAlmostEqual(UpperCamelCase__ , UpperCamelCase__ , delta=UpperCamelCase__ , msg=UpperCamelCase__) def lowercase_ ( self : Dict)-> Tuple: '''simple docstring''' __lowerCAmelCase: List[Any] = {"num_warmup_steps": 2, "num_training_steps": 1_0} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) __lowerCAmelCase: Tuple = { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {"num_warmup_steps": 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, "num_cycles": 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, "power": 2.0, "lr_end": 1e-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {"num_warmup_steps": 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): __lowerCAmelCase , __lowerCAmelCase: Union[str, Any] = data __lowerCAmelCase: Tuple = scheduler_func(self.optimizer , **UpperCamelCase__) self.assertEqual(len([scheduler.get_lr()[0]]) , 1) __lowerCAmelCase: Optional[int] = unwrap_schedule(UpperCamelCase__ , self.num_steps) self.assertListAlmostEqual( UpperCamelCase__ , UpperCamelCase__ , tol=1e-2 , msg=f"failed for {scheduler_func} in normal scheduler" , ) __lowerCAmelCase: Union[str, Any] = scheduler_func(self.optimizer , **UpperCamelCase__) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(UpperCamelCase__) # wrap to test picklability of the schedule __lowerCAmelCase: Dict = unwrap_and_save_reload_schedule(UpperCamelCase__ , self.num_steps) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ , msg=f"failed for {scheduler_func} in save and reload") class snake_case : def __init__( self : Tuple , UpperCamelCase__ : Union[str, Any])-> List[Any]: '''simple docstring''' __lowerCAmelCase: List[Any] = fn def __call__( self : Tuple , *UpperCamelCase__ : int , **UpperCamelCase__ : List[str])-> Any: '''simple docstring''' return self.fn(*UpperCamelCase__ , **UpperCamelCase__) @classmethod def lowercase_ ( self : List[Any] , UpperCamelCase__ : Dict)-> Optional[int]: '''simple docstring''' __lowerCAmelCase: Any = list(map(self , scheduler.lr_lambdas))
108
"""simple docstring""" from math import ceil def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str: __lowerCAmelCase: Tuple = list(range(0 , __SCREAMING_SNAKE_CASE ) ) __lowerCAmelCase: Optional[Any] = [item for sublist in list(device_map.values() ) for item in sublist] # Duplicate check __lowerCAmelCase: List[Any] = [] for i in device_map_blocks: if device_map_blocks.count(__SCREAMING_SNAKE_CASE ) > 1 and i not in duplicate_blocks: duplicate_blocks.append(__SCREAMING_SNAKE_CASE ) # Missing blocks __lowerCAmelCase: Optional[Any] = [i for i in blocks if i not in device_map_blocks] __lowerCAmelCase: List[Any] = [i for i in device_map_blocks if i not in blocks] if len(__SCREAMING_SNAKE_CASE ) != 0: raise ValueError( "Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device." " These attention blocks were specified more than once: " + str(__SCREAMING_SNAKE_CASE ) ) if len(__SCREAMING_SNAKE_CASE ) != 0: raise ValueError( "There are attention blocks for this model that are not specified in the device_map. Add these attention " "blocks to a device on the device_map: " + str(__SCREAMING_SNAKE_CASE ) ) if len(__SCREAMING_SNAKE_CASE ) != 0: raise ValueError( "The device_map contains more attention blocks than this model has. Remove these from the device_map:" + str(__SCREAMING_SNAKE_CASE ) ) def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str: __lowerCAmelCase: List[Any] = list(range(__SCREAMING_SNAKE_CASE ) ) __lowerCAmelCase: Dict = int(ceil(n_layers / len(__SCREAMING_SNAKE_CASE ) ) ) __lowerCAmelCase: Union[str, Any] = [layers[i : i + n_blocks] for i in range(0 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )] return dict(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
108
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) A__: List[str] = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__: List[Any] = ['''DeiTFeatureExtractor'''] A__: Dict = ['''DeiTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__: Optional[Any] = [ '''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''DeiTForImageClassification''', '''DeiTForImageClassificationWithTeacher''', '''DeiTForMaskedImageModeling''', '''DeiTModel''', '''DeiTPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__: Optional[int] = [ '''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFDeiTForImageClassification''', '''TFDeiTForImageClassificationWithTeacher''', '''TFDeiTForMaskedImageModeling''', '''TFDeiTModel''', '''TFDeiTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_deit import DeiTFeatureExtractor from .image_processing_deit import DeiTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deit import ( DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, DeiTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deit import ( TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, TFDeiTPreTrainedModel, ) else: import sys A__: Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
149
from typing import Optional import numpy as np import torch from torch import nn from transformers import GPTaConfig, GPTaLMHeadModel from transformers.modeling_utils import ModuleUtilsMixin from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class _a ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__): """simple docstring""" UpperCamelCase__ = [r"""h\.\d+\.attn\.bias""", r"""h\.\d+\.attn\.masked_bias"""] @register_to_config def __init__( self: Tuple , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: int = 5_0257 , __lowerCamelCase: int = 1024 , __lowerCamelCase: int = 768 , __lowerCamelCase: int = 12 , __lowerCamelCase: int = 12 , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: str = "gelu_new" , __lowerCamelCase: float = 0.1 , __lowerCamelCase: float = 0.1 , __lowerCamelCase: float = 0.1 , __lowerCamelCase: float = 1e-5 , __lowerCamelCase: float = 0.02 , __lowerCamelCase: bool = True , __lowerCamelCase: bool = True , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , ): '''simple docstring''' super().__init__() UpperCamelCase__: Union[str, Any] = prefix_length if prefix_inner_dim != n_embd and prefix_hidden_dim is None: raise ValueError( F"`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and" F" `n_embd`: {n_embd} are not equal." ) UpperCamelCase__: List[str] = prefix_inner_dim UpperCamelCase__: Optional[int] = prefix_hidden_dim UpperCamelCase__: Dict = ( nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim ) if self.prefix_hidden_dim is not None else nn.Identity() ) UpperCamelCase__: Tuple = ( nn.Linear(self.prefix_hidden_dim , __lowerCamelCase ) if self.prefix_hidden_dim is not None else nn.Identity() ) UpperCamelCase__: List[str] = GPTaConfig( vocab_size=__lowerCamelCase , n_positions=__lowerCamelCase , n_embd=__lowerCamelCase , n_layer=__lowerCamelCase , n_head=__lowerCamelCase , n_inner=__lowerCamelCase , activation_function=__lowerCamelCase , resid_pdrop=__lowerCamelCase , embd_pdrop=__lowerCamelCase , attn_pdrop=__lowerCamelCase , layer_norm_epsilon=__lowerCamelCase , initializer_range=__lowerCamelCase , scale_attn_weights=__lowerCamelCase , use_cache=__lowerCamelCase , scale_attn_by_inverse_layer_idx=__lowerCamelCase , reorder_and_upcast_attn=__lowerCamelCase , ) UpperCamelCase__: Any = GPTaLMHeadModel(__lowerCamelCase ) def UpperCAmelCase_ ( self: int , __lowerCamelCase: torch.Tensor , __lowerCamelCase: torch.Tensor , __lowerCamelCase: Optional[torch.Tensor] = None , __lowerCamelCase: Optional[torch.Tensor] = None , ): '''simple docstring''' UpperCamelCase__: Optional[int] = self.transformer.transformer.wte(__lowerCamelCase ) UpperCamelCase__: Dict = self.encode_prefix(__lowerCamelCase ) UpperCamelCase__: List[Any] = self.decode_prefix(__lowerCamelCase ) UpperCamelCase__: str = torch.cat((prefix_embeds, embedding_text) , dim=1 ) if labels is not None: UpperCamelCase__: Union[str, Any] = self.get_dummy_token(input_ids.shape[0] , input_ids.device ) UpperCamelCase__: Any = torch.cat((dummy_token, input_ids) , dim=1 ) UpperCamelCase__: str = self.transformer(inputs_embeds=__lowerCamelCase , labels=__lowerCamelCase , attention_mask=__lowerCamelCase ) if self.prefix_hidden_dim is not None: return out, hidden else: return out def UpperCAmelCase_ ( self: Any , __lowerCamelCase: int , __lowerCamelCase: torch.device ): '''simple docstring''' return torch.zeros(__lowerCamelCase , self.prefix_length , dtype=torch.intaa , device=__lowerCamelCase ) def UpperCAmelCase_ ( self: Union[str, Any] , __lowerCamelCase: List[Any] ): '''simple docstring''' return self.encode_prefix(__lowerCamelCase ) @torch.no_grad() def UpperCAmelCase_ ( self: List[str] , __lowerCamelCase: Dict , __lowerCamelCase: Any , __lowerCamelCase: List[str] ): '''simple docstring''' UpperCamelCase__: Any = torch.split(__lowerCamelCase , 1 , dim=0 ) UpperCamelCase__: Dict = [] UpperCamelCase__: Union[str, Any] = [] for feature in features: UpperCamelCase__: Tuple = self.decode_prefix(feature.to(__lowerCamelCase ) ) # back to the clip feature # Only support beam search for now UpperCamelCase__ , UpperCamelCase__: List[Any] = self.generate_beam( input_embeds=__lowerCamelCase , device=__lowerCamelCase , eos_token_id=__lowerCamelCase ) generated_tokens.append(output_tokens[0] ) generated_seq_lengths.append(seq_lengths[0] ) UpperCamelCase__: str = torch.stack(__lowerCamelCase ) UpperCamelCase__: str = torch.stack(__lowerCamelCase ) return generated_tokens, generated_seq_lengths @torch.no_grad() def UpperCAmelCase_ ( self: str , __lowerCamelCase: Optional[Any]=None , __lowerCamelCase: Optional[int]=None , __lowerCamelCase: Dict=None , __lowerCamelCase: int = 5 , __lowerCamelCase: int = 67 , __lowerCamelCase: float = 1.0 , __lowerCamelCase: Optional[int] = None , ): '''simple docstring''' UpperCamelCase__: Tuple = eos_token_id UpperCamelCase__: List[str] = None UpperCamelCase__: Any = None UpperCamelCase__: Optional[int] = torch.ones(__lowerCamelCase , device=__lowerCamelCase , dtype=torch.int ) UpperCamelCase__: Dict = torch.zeros(__lowerCamelCase , device=__lowerCamelCase , dtype=torch.bool ) if input_embeds is not None: UpperCamelCase__: Dict = input_embeds else: UpperCamelCase__: Optional[int] = self.transformer.transformer.wte(__lowerCamelCase ) for i in range(__lowerCamelCase ): UpperCamelCase__: Union[str, Any] = self.transformer(inputs_embeds=__lowerCamelCase ) UpperCamelCase__: Tuple = outputs.logits UpperCamelCase__: Dict = logits[:, -1, :] / (temperature if temperature > 0 else 1.0) UpperCamelCase__: List[str] = logits.softmax(-1 ).log() if scores is None: UpperCamelCase__ , UpperCamelCase__: Union[str, Any] = logits.topk(__lowerCamelCase , -1 ) UpperCamelCase__: str = generated.expand(__lowerCamelCase , *generated.shape[1:] ) UpperCamelCase__ , UpperCamelCase__: Dict = next_tokens.permute(1 , 0 ), scores.squeeze(0 ) if tokens is None: UpperCamelCase__: int = next_tokens else: UpperCamelCase__: Optional[int] = tokens.expand(__lowerCamelCase , *tokens.shape[1:] ) UpperCamelCase__: str = torch.cat((tokens, next_tokens) , dim=1 ) else: UpperCamelCase__: Optional[Any] = -float(np.inf ) UpperCamelCase__: Any = 0 UpperCamelCase__: List[str] = scores[:, None] + logits seq_lengths[~is_stopped] += 1 UpperCamelCase__: Any = scores_sum / seq_lengths[:, None] UpperCamelCase__ , UpperCamelCase__: Union[str, Any] = scores_sum_average.view(-1 ).topk(__lowerCamelCase , -1 ) UpperCamelCase__: Dict = next_tokens // scores_sum.shape[1] UpperCamelCase__: Optional[int] = seq_lengths[next_tokens_source] UpperCamelCase__: int = next_tokens % scores_sum.shape[1] UpperCamelCase__: Optional[int] = next_tokens.unsqueeze(1 ) UpperCamelCase__: Tuple = tokens[next_tokens_source] UpperCamelCase__: Tuple = torch.cat((tokens, next_tokens) , dim=1 ) UpperCamelCase__: List[Any] = generated[next_tokens_source] UpperCamelCase__: int = scores_sum_average * seq_lengths UpperCamelCase__: Dict = is_stopped[next_tokens_source] UpperCamelCase__: List[str] = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 ) UpperCamelCase__: Any = torch.cat((generated, next_token_embed) , dim=1 ) UpperCamelCase__: Union[str, Any] = is_stopped + next_tokens.eq(__lowerCamelCase ).squeeze() if is_stopped.all(): break UpperCamelCase__: Optional[Any] = scores / seq_lengths UpperCamelCase__: int = scores.argsort(descending=__lowerCamelCase ) # tokens tensors are already padded to max_seq_length UpperCamelCase__: Dict = [tokens[i] for i in order] UpperCamelCase__: Any = torch.stack(__lowerCamelCase , dim=0 ) UpperCamelCase__: int = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype ) return output_texts, seq_lengths
149
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_dpt import DPTImageProcessor A__ : str =logging.get_logger(__name__) class UpperCAmelCase ( snake_case_ ): def __init__( self : Optional[Any] , *__snake_case : Optional[Any] , **__snake_case : List[str] ) -> None: warnings.warn( """The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use DPTImageProcessor instead.""" , __snake_case , ) super().__init__(*__snake_case , **__snake_case )
220
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available A__ : int ={'''tokenization_herbert''': ['''HerbertTokenizer''']} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Dict =['''HerbertTokenizerFast'''] if TYPE_CHECKING: from .tokenization_herbert import HerbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_herbert_fast import HerbertTokenizerFast else: import sys A__ : List[Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
220
1
from typing import Any import numpy as np def _UpperCamelCase ( lowercase__ ): return np.array_equal(lowercase__ , matrix.conjugate().T ) def _UpperCamelCase ( lowercase__ , lowercase__ ): __SCREAMING_SNAKE_CASE : Union[str, Any] = v.conjugate().T __SCREAMING_SNAKE_CASE : Tuple = v_star.dot(lowercase__ ) assert isinstance(lowercase__ , np.ndarray ) return (v_star_dot.dot(lowercase__ )) / (v_star.dot(lowercase__ )) def _UpperCamelCase ( ): __SCREAMING_SNAKE_CASE : List[Any] = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] ) __SCREAMING_SNAKE_CASE : List[str] = np.array([[1], [2], [3]] ) assert is_hermitian(lowercase__ ), F'''{a} is not hermitian.''' print(rayleigh_quotient(lowercase__ , lowercase__ ) ) __SCREAMING_SNAKE_CASE : Dict = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] ) assert is_hermitian(lowercase__ ), F'''{a} is not hermitian.''' assert rayleigh_quotient(lowercase__ , lowercase__ ) == float(3 ) if __name__ == "__main__": import doctest doctest.testmod() tests()
9
import os import jsonlines import numpy as np from tqdm import tqdm __snake_case : Any =2_0_4_8 __snake_case : Union[str, Any] =4_0_9_6 __snake_case : Optional[Any] =4_2 __snake_case : Dict =os.environ.pop('PROCESS_TRAIN', 'false') __snake_case : List[str] ={'null': 0, 'short': 1, 'long': 2, 'yes': 3, 'no': 4} def lowerCAmelCase__ ( lowerCamelCase_ : List[Any]): '''simple docstring''' def choose_first(lowerCamelCase_ : List[str] ,lowerCamelCase_ : Any=False): assert isinstance(lowerCamelCase_ ,lowerCamelCase_) if len(lowerCamelCase_) == 1: lowerCAmelCase__ : Optional[int] = answer[0] return {k: [answer[k]] for k in answer} if is_long_answer else answer for a in answer: if is_long_answer: lowerCAmelCase__ : Any = {k: [a[k]] for k in a} if len(a['''start_token''']) > 0: break return a lowerCAmelCase__ : Optional[Any] = {'''id''': example['''id''']} lowerCAmelCase__ : int = example['''annotations'''] lowerCAmelCase__ : str = annotation['''yes_no_answer'''] if 0 in yes_no_answer or 1 in yes_no_answer: lowerCAmelCase__ : Union[str, Any] = ['''yes'''] if 1 in yes_no_answer else ['''no'''] lowerCAmelCase__ : int = [] lowerCAmelCase__ : Union[str, Any] = [] lowerCAmelCase__ : int = ['''<cls>'''] else: lowerCAmelCase__ : Tuple = ['''short'''] lowerCAmelCase__ : int = choose_first(annotation['''short_answers''']) if len(out['''start_token''']) == 0: # answer will be long if short is not available lowerCAmelCase__ : Optional[Any] = ['''long'''] lowerCAmelCase__ : str = choose_first(annotation['''long_answer'''] ,is_long_answer=lowerCamelCase_) lowerCAmelCase__ : Optional[int] = [] answer.update(lowerCamelCase_) # disregard some samples if len(answer['''start_token''']) > 1 or answer["start_token"] == answer["end_token"]: lowerCAmelCase__ : Optional[Any] = True else: lowerCAmelCase__ : Union[str, Any] = False lowerCAmelCase__ : Tuple = ['''start_token''', '''end_token''', '''start_byte''', '''end_byte''', '''text'''] if not all(isinstance(answer[k] ,lowerCamelCase_) for k in cols): raise ValueError('''Issue in ID''' ,example['''id''']) return answer def lowerCAmelCase__ ( lowerCamelCase_ : List[Any] ,lowerCamelCase_ : Union[str, Any]=False): '''simple docstring''' lowerCAmelCase__ : Any = _get_single_answer(lowerCamelCase_) # bytes are of no use del answer["start_byte"] del answer["end_byte"] # handle yes_no answers explicitly if answer["category"][0] in ["yes", "no"]: # category is list with one element lowerCAmelCase__ : List[Any] = example['''document''']['''tokens'''] lowerCAmelCase__ : Any = [] for i in range(len(doc['''token'''])): if not doc["is_html"][i]: context.append(doc['''token'''][i]) return { "context": " ".join(lowerCamelCase_), "answer": { "start_token": -100, # ignore index in cross-entropy "end_token": -100, # ignore index in cross-entropy "category": answer["category"], "span": answer["category"], # extra }, } # later, help in removing all no answers if answer["start_token"] == [-1]: return { "context": "None", "answer": { "start_token": -1, "end_token": -1, "category": "null", "span": "None", # extra }, } # handling normal samples lowerCAmelCase__ : Union[str, Any] = ['''start_token''', '''end_token'''] answer.update({k: answer[k][0] if len(answer[k]) > 0 else answer[k] for k in cols}) # e.g. [10] == 10 lowerCAmelCase__ : List[Any] = example['''document''']['''tokens'''] lowerCAmelCase__ : Optional[Any] = answer['''start_token'''] lowerCAmelCase__ : Union[str, Any] = answer['''end_token'''] lowerCAmelCase__ : int = [] for i in range(len(doc['''token'''])): if not doc["is_html"][i]: context.append(doc['''token'''][i]) else: if answer["start_token"] > i: start_token -= 1 if answer["end_token"] > i: end_token -= 1 lowerCAmelCase__ : List[Any] = ''' '''.join(context[start_token:end_token]) # checking above code if assertion: lowerCAmelCase__ : str = doc['''is_html'''][answer['''start_token'''] : answer['''end_token''']] lowerCAmelCase__ : List[Any] = doc['''token'''][answer['''start_token'''] : answer['''end_token''']] lowerCAmelCase__ : Optional[int] = ''' '''.join([old[i] for i in range(len(lowerCamelCase_)) if not is_html[i]]) if new != old: print('''ID:''' ,example['''id''']) print('''New:''' ,lowerCamelCase_ ,end='''\n''') print('''Old:''' ,lowerCamelCase_ ,end='''\n\n''') return { "context": " ".join(lowerCamelCase_), "answer": { "start_token": start_token, "end_token": end_token - 1, # this makes it inclusive "category": answer["category"], # either long or short "span": new, # extra }, } def lowerCAmelCase__ ( lowerCamelCase_ : int ,lowerCamelCase_ : str ,lowerCamelCase_ : Tuple=2048 ,lowerCamelCase_ : Dict=4096 ,lowerCamelCase_ : Optional[Any]=True): '''simple docstring''' lowerCAmelCase__ : int = get_context_and_ans(lowerCamelCase_ ,assertion=lowerCamelCase_) lowerCAmelCase__ : Union[str, Any] = out['''answer'''] # later, removing these samples if answer["start_token"] == -1: return { "example_id": example["id"], "input_ids": [[-1]], "labels": { "start_token": [-1], "end_token": [-1], "category": ["null"], }, } lowerCAmelCase__ : Union[str, Any] = tokenizer(example['''question''']['''text'''] ,out['''context''']).input_ids lowerCAmelCase__ : List[str] = input_ids.index(tokenizer.sep_token_id) + 1 # return yes/no if answer["category"][0] in ["yes", "no"]: # category is list with one element lowerCAmelCase__ : Dict = [] lowerCAmelCase__ : Dict = [] lowerCAmelCase__ : List[Any] = input_ids[:q_len] lowerCAmelCase__ : List[Any] = range(lowerCamelCase_ ,len(lowerCamelCase_) ,max_length - doc_stride) for i in doc_start_indices: lowerCAmelCase__ : Union[str, Any] = i + max_length - q_len lowerCAmelCase__ : Any = input_ids[i:end_index] inputs.append(q_indices + slice) category.append(answer['''category'''][0]) if slice[-1] == tokenizer.sep_token_id: break return { "example_id": example["id"], "input_ids": inputs, "labels": { "start_token": [-100] * len(lowerCamelCase_), "end_token": [-100] * len(lowerCamelCase_), "category": category, }, } lowerCAmelCase__ : Optional[Any] = out['''context'''].split() lowerCAmelCase__ : Union[str, Any] = splitted_context[answer['''end_token''']] lowerCAmelCase__ : Optional[int] = len( tokenizer( ''' '''.join(splitted_context[: answer['''start_token''']]) ,add_special_tokens=lowerCamelCase_ ,).input_ids) lowerCAmelCase__ : Dict = len( tokenizer(''' '''.join(splitted_context[: answer['''end_token''']]) ,add_special_tokens=lowerCamelCase_).input_ids) answer["start_token"] += q_len answer["end_token"] += q_len # fixing end token lowerCAmelCase__ : int = len(tokenizer(lowerCamelCase_ ,add_special_tokens=lowerCamelCase_).input_ids) if num_sub_tokens > 1: answer["end_token"] += num_sub_tokens - 1 lowerCAmelCase__ : Union[str, Any] = input_ids[answer['''start_token'''] : answer['''end_token'''] + 1] # right & left are inclusive lowerCAmelCase__ : List[str] = answer['''start_token'''] lowerCAmelCase__ : Union[str, Any] = answer['''end_token'''] if assertion: lowerCAmelCase__ : int = tokenizer.decode(lowerCamelCase_) if answer["span"] != new: print('''ISSUE IN TOKENIZATION''') print('''OLD:''' ,answer['''span''']) print('''NEW:''' ,lowerCamelCase_ ,end='''\n\n''') if len(lowerCamelCase_) <= max_length: return { "example_id": example["id"], "input_ids": [input_ids], "labels": { "start_token": [answer["start_token"]], "end_token": [answer["end_token"]], "category": answer["category"], }, } lowerCAmelCase__ : int = input_ids[:q_len] lowerCAmelCase__ : Optional[Any] = range(lowerCamelCase_ ,len(lowerCamelCase_) ,max_length - doc_stride) lowerCAmelCase__ : Tuple = [] lowerCAmelCase__ : Optional[Any] = [] lowerCAmelCase__ : Union[str, Any] = [] lowerCAmelCase__ : Any = [] # null, yes, no, long, short for i in doc_start_indices: lowerCAmelCase__ : str = i + max_length - q_len lowerCAmelCase__ : List[str] = input_ids[i:end_index] inputs.append(q_indices + slice) assert len(inputs[-1]) <= max_length, "Issue in truncating length" if start_token >= i and end_token <= end_index - 1: lowerCAmelCase__ : int = start_token - i + q_len lowerCAmelCase__ : str = end_token - i + q_len answers_category.append(answer['''category'''][0]) # ["short"] -> "short" else: lowerCAmelCase__ : Tuple = -100 lowerCAmelCase__ : List[str] = -100 answers_category.append('''null''') lowerCAmelCase__ : int = inputs[-1][start_token : end_token + 1] answers_start_token.append(lowerCamelCase_) answers_end_token.append(lowerCamelCase_) if assertion: if new != old and new != [tokenizer.cls_token_id]: print('''ISSUE in strided for ID:''' ,example['''id''']) print('''New:''' ,tokenizer.decode(lowerCamelCase_)) print('''Old:''' ,tokenizer.decode(lowerCamelCase_) ,end='''\n\n''') if slice[-1] == tokenizer.sep_token_id: break return { "example_id": example["id"], "input_ids": inputs, "labels": { "start_token": answers_start_token, "end_token": answers_end_token, "category": answers_category, }, } def lowerCAmelCase__ ( lowerCamelCase_ : Any ,lowerCamelCase_ : List[Any] ,lowerCamelCase_ : int=2048 ,lowerCamelCase_ : Tuple=4096 ,lowerCamelCase_ : Optional[int]=False): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = get_strided_contexts_and_ans( lowerCamelCase_ ,lowerCamelCase_ ,doc_stride=lowerCamelCase_ ,max_length=lowerCamelCase_ ,assertion=lowerCamelCase_ ,) return example def lowerCAmelCase__ ( lowerCamelCase_ : str ,lowerCamelCase_ : int): '''simple docstring''' with jsonlines.open(lowerCamelCase_ ,'''a''') as writer: for example in tqdm(lowerCamelCase_ ,total=len(lowerCamelCase_) ,desc='''Saving samples ... '''): lowerCAmelCase__ : Optional[Any] = example['''labels'''] for ids, start, end, cat in zip( example['''input_ids'''] ,labels['''start_token'''] ,labels['''end_token'''] ,labels['''category'''] ,): if start == -1 and end == -1: continue # leave waste samples with no answer if cat == "null" and np.random.rand() < 0.6: continue # removing 50 % samples writer.write( { '''input_ids''': ids, '''start_token''': start, '''end_token''': end, '''category''': CATEGORY_MAPPING[cat], }) if __name__ == "__main__": from datasets import load_dataset from transformers import BigBirdTokenizer __snake_case : Optional[int] =load_dataset('natural_questions') __snake_case : Union[str, Any] =BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base') __snake_case : Tuple =data['train' if PROCESS_TRAIN == 'true' else 'validation'] __snake_case : Optional[int] ={ 'tokenizer': tokenizer, 'doc_stride': DOC_STRIDE, 'max_length': MAX_LENGTH, 'assertion': False, } __snake_case : Dict =data.map(prepare_inputs, fn_kwargs=fn_kwargs) __snake_case : Dict =data.remove_columns(['annotations', 'document', 'id', 'question']) print(data) np.random.seed(SEED) __snake_case : int ='nq-training.jsonl' if PROCESS_TRAIN == 'true' else 'nq-validation.jsonl' save_to_disk(data, file_name=cache_file_name)
129
0
from typing import TYPE_CHECKING from ....utils import _LazyModule lowerCAmelCase_ = {'tokenization_tapex': ['TapexTokenizer']} if TYPE_CHECKING: from .tokenization_tapex import TapexTokenizer else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
355
import math def snake_case( __magic_name__ ) -> bool: '''simple docstring''' lowercase : Union[str, Any] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 ) return exponent == int(__magic_name__ ) def snake_case( __magic_name__ = 1 / 1_23_45 ) -> int: '''simple docstring''' lowercase : Union[str, Any] = 0 lowercase : str = 0 lowercase : Optional[int] = 3 while True: lowercase : Any = (integer**2 - 1) / 4 # if candidate is an integer, then there is a partition for k if partition_candidate == int(__magic_name__ ): lowercase : Any = int(__magic_name__ ) total_partitions += 1 if check_partition_perfect(__magic_name__ ): perfect_partitions += 1 if perfect_partitions > 0: if perfect_partitions / total_partitions < max_proportion: return int(__magic_name__ ) integer += 1 if __name__ == "__main__": print(f'''{solution() = }''')
116
0
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=UpperCAmelCase ) class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ): '''simple docstring''' _UpperCAmelCase : str = field(default="language-modeling" ,metadata={"include_in_asdict_even_if_is_default": True} ) _UpperCAmelCase : ClassVar[Features] = Features({"text": Value("string" )} ) _UpperCAmelCase : ClassVar[Features] = Features({} ) _UpperCAmelCase : str = "text" @property def A ( self : str ): '''simple docstring''' return {self.text_column: "text"}
282
class SCREAMING_SNAKE_CASE__ : '''simple docstring''' def __init__( self : List[str] , lowercase : list[int] ): '''simple docstring''' _snake_case = len(lowercase ) _snake_case = [0] * len_array if len_array > 0: _snake_case = array[0] for i in range(1 , lowercase ): _snake_case = self.prefix_sum[i - 1] + array[i] def A ( self : Optional[Any] , lowercase : int , lowercase : int ): '''simple docstring''' if start == 0: return self.prefix_sum[end] return self.prefix_sum[end] - self.prefix_sum[start - 1] def A ( self : Union[str, Any] , lowercase : int ): '''simple docstring''' _snake_case = {0} for sum_item in self.prefix_sum: if sum_item - target_sum in sums: return True sums.add(lowercase ) return False if __name__ == "__main__": import doctest doctest.testmod()
282
1
'''simple docstring''' from __future__ import annotations from math import ceil, floor, sqrt def SCREAMING_SNAKE_CASE__ ( __A = 2_000_000 ) -> int: _snake_case = [0] _snake_case = 42 for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ): triangle_numbers.append(triangle_numbers[-1] + idx ) # we want this to be as close as possible to target _snake_case = 0 # the area corresponding to the grid that gives the product closest to target _snake_case = 0 # an estimate of b, using the quadratic formula _snake_case = 42 # the largest integer less than b_estimate _snake_case = 42 # the largest integer less than b_estimate _snake_case = 42 # the triangle number corresponding to b_floor _snake_case = 42 # the triangle number corresponding to b_ceil _snake_case = 42 for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ): _snake_case = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2 _snake_case = floor(__A ) _snake_case = ceil(__A ) _snake_case = triangle_numbers[b_floor] _snake_case = triangle_numbers[b_ceil] if abs(target - triangle_b_first_guess * triangle_a ) < abs( target - best_product ): _snake_case = triangle_b_first_guess * triangle_a _snake_case = idx_a * b_floor if abs(target - triangle_b_second_guess * triangle_a ) < abs( target - best_product ): _snake_case = triangle_b_second_guess * triangle_a _snake_case = idx_a * b_ceil return area if __name__ == "__main__": print(F'''{solution() = }''')
350
'''simple docstring''' import os import sys import unittest lowercase : List[str] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path lowercase : List[Any] = os.path.join(git_repo_path, "src", "diffusers") class __UpperCAmelCase ( unittest.TestCase ): def lowerCamelCase ( self ): """simple docstring""" _snake_case = find_backend(' if not is_torch_available():' ) self.assertEqual(lowerCAmelCase_ , 'torch' ) # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") _snake_case = find_backend(' if not (is_torch_available() and is_transformers_available()):' ) self.assertEqual(lowerCAmelCase_ , 'torch_and_transformers' ) # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") _snake_case = find_backend( ' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):' ) self.assertEqual(lowerCAmelCase_ , 'torch_and_transformers_and_onnx' ) def lowerCamelCase ( self ): """simple docstring""" _snake_case = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn('torch' , lowerCAmelCase_ ) self.assertIn('torch_and_transformers' , lowerCAmelCase_ ) self.assertIn('flax_and_transformers' , lowerCAmelCase_ ) self.assertIn('torch_and_transformers_and_onnx' , lowerCAmelCase_ ) # Likewise, we can't assert on the exact content of a key self.assertIn('UNet2DModel' , objects['torch'] ) self.assertIn('FlaxUNet2DConditionModel' , objects['flax'] ) self.assertIn('StableDiffusionPipeline' , objects['torch_and_transformers'] ) self.assertIn('FlaxStableDiffusionPipeline' , objects['flax_and_transformers'] ) self.assertIn('LMSDiscreteScheduler' , objects['torch_and_scipy'] ) self.assertIn('OnnxStableDiffusionPipeline' , objects['torch_and_transformers_and_onnx'] ) def lowerCamelCase ( self ): """simple docstring""" _snake_case = create_dummy_object('CONSTANT' , '\'torch\'' ) self.assertEqual(lowerCAmelCase_ , '\nCONSTANT = None\n' ) _snake_case = create_dummy_object('function' , '\'torch\'' ) self.assertEqual( lowerCAmelCase_ , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' ) _snake_case = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n' _snake_case = create_dummy_object('FakeClass' , '\'torch\'' ) self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ ) def lowerCamelCase ( self ): """simple docstring""" _snake_case = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n' _snake_case = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} ) self.assertEqual(dummy_files['torch'] , lowerCAmelCase_ )
160
0
"""simple docstring""" def __a ( __lowerCamelCase, __lowerCamelCase ): return int(input_a == input_a == 0 ) def __a ( ): print("Truth Table of NOR Gate:" ) print("| Input 1 | Input 2 | Output |" ) print(f"""| 0 | 0 | {nor_gate(0, 0 )} |""" ) print(f"""| 0 | 1 | {nor_gate(0, 1 )} |""" ) print(f"""| 1 | 0 | {nor_gate(1, 0 )} |""" ) print(f"""| 1 | 1 | {nor_gate(1, 1 )} |""" ) if __name__ == "__main__": import doctest doctest.testmod() main()
61
"""simple docstring""" import inspect import unittest import numpy as np from transformers import ViTConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel class A_ (unittest.TestCase ): '''simple docstring''' def __init__( self , lowercase_ , lowercase_=13 , lowercase_=30 , lowercase_=2 , lowercase_=3 , lowercase_=True , lowercase_=True , lowercase_=32 , lowercase_=5 , lowercase_=4 , lowercase_=37 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=10 , lowercase_=0.02 , ): """simple docstring""" UpperCAmelCase_ : Tuple = parent UpperCAmelCase_ : List[str] = batch_size UpperCAmelCase_ : Union[str, Any] = image_size UpperCAmelCase_ : List[str] = patch_size UpperCAmelCase_ : Union[str, Any] = num_channels UpperCAmelCase_ : Optional[int] = is_training UpperCAmelCase_ : Dict = use_labels UpperCAmelCase_ : Any = hidden_size UpperCAmelCase_ : Optional[Any] = num_hidden_layers UpperCAmelCase_ : Optional[Any] = num_attention_heads UpperCAmelCase_ : Dict = intermediate_size UpperCAmelCase_ : Optional[Any] = hidden_act UpperCAmelCase_ : Optional[Any] = hidden_dropout_prob UpperCAmelCase_ : Tuple = attention_probs_dropout_prob UpperCAmelCase_ : Dict = type_sequence_label_size UpperCAmelCase_ : Optional[Any] = initializer_range # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) UpperCAmelCase_ : Any = (image_size // patch_size) ** 2 UpperCAmelCase_ : List[str] = num_patches + 1 def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase_ : Dict = ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase_ , initializer_range=self.initializer_range , ) return config, pixel_values def UpperCamelCase__ ( self , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : List[str] = FlaxViTModel(config=lowercase_ ) UpperCAmelCase_ : int = model(lowercase_ ) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) UpperCAmelCase_ : Optional[Any] = (self.image_size, self.image_size) UpperCAmelCase_ : List[Any] = (self.patch_size, self.patch_size) UpperCAmelCase_ : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Tuple = self.type_sequence_label_size UpperCAmelCase_ : Tuple = FlaxViTForImageClassification(config=lowercase_ ) UpperCAmelCase_ : str = model(lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCAmelCase_ : Any = 1 UpperCAmelCase_ : Optional[int] = FlaxViTForImageClassification(lowercase_ ) UpperCAmelCase_ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase_ : List[Any] = model(lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : Tuple = config_and_inputs UpperCAmelCase_ : Union[str, Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_flax class A_ (lowercase__ ,unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else () def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : List[Any] = FlaxViTModelTester(self ) UpperCAmelCase_ : Dict = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 ) def UpperCamelCase__ ( self ): """simple docstring""" self.config_tester.run_common_tests() def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : Optional[Any] = model_class(lowercase_ ) UpperCAmelCase_ : Optional[int] = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ : List[str] = [*signature.parameters.keys()] UpperCAmelCase_ : List[str] = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCAmelCase_ : Union[str, Any] = self._prepare_for_class(lowercase_ , lowercase_ ) UpperCAmelCase_ : Tuple = model_class(lowercase_ ) @jax.jit def model_jitted(lowercase_ , **lowercase_ ): return model(pixel_values=lowercase_ , **lowercase_ ) with self.subTest("JIT Enabled" ): UpperCAmelCase_ : Union[str, Any] = model_jitted(**lowercase_ ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): UpperCAmelCase_ : Tuple = model_jitted(**lowercase_ ).to_tuple() self.assertEqual(len(lowercase_ ) , len(lowercase_ ) ) for jitted_output, output in zip(lowercase_ , lowercase_ ): self.assertEqual(jitted_output.shape , output.shape ) @slow def UpperCamelCase__ ( self ): """simple docstring""" for model_class_name in self.all_model_classes: UpperCAmelCase_ : Union[str, Any] = model_class_name.from_pretrained("google/vit-base-patch16-224" ) UpperCAmelCase_ : List[str] = model(np.ones((1, 3, 224, 224) ) ) self.assertIsNotNone(lowercase_ )
61
1
'''simple docstring''' from transformers import DistilBertTokenizer, DistilBertTokenizerFast from transformers.testing_utils import require_tokenizers, slow from ..bert.test_tokenization_bert import BertTokenizationTest @require_tokenizers class a__ ( UpperCAmelCase__ ): lowerCamelCase : List[str] =DistilBertTokenizer lowerCamelCase : str =DistilBertTokenizerFast lowerCamelCase : Any =True @slow def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" __lowerCamelCase = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' ) __lowerCamelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=a ) __lowerCamelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=a ) __lowerCamelCase = tokenizer.build_inputs_with_special_tokens(a ) __lowerCamelCase = tokenizer.build_inputs_with_special_tokens(a , a ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ]
237
'''simple docstring''' import os import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers.models.realm.configuration_realm import RealmConfig from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer class a__ ( UpperCAmelCase__ ): def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" __lowerCamelCase = tempfile.mkdtemp() __lowerCamelCase = 5 # Realm tok __lowerCamelCase = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''test''', '''question''', '''this''', '''is''', '''the''', '''first''', '''second''', '''third''', '''fourth''', '''fifth''', '''record''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] __lowerCamelCase = os.path.join(self.tmpdirname , '''realm_tokenizer''' ) os.makedirs(a , exist_ok=a ) __lowerCamelCase = os.path.join(a , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) __lowerCamelCase = os.path.join(self.tmpdirname , '''realm_block_records''' ) os.makedirs(a , exist_ok=a ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) ) def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" __lowerCamelCase = RealmConfig(num_block_records=self.num_block_records ) return config def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" __lowerCamelCase = Dataset.from_dict( { '''id''': ['''0''', '''1'''], '''question''': ['''foo''', '''bar'''], '''answers''': [['''Foo''', '''Bar'''], ['''Bar''']], } ) return dataset def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" __lowerCamelCase = np.array( [ b'''This is the first record''', b'''This is the second record''', b'''This is the third record''', b'''This is the fourth record''', b'''This is the fifth record''', b'''This is a longer longer longer record''', ] , dtype=a , ) return block_records def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" __lowerCamelCase = RealmRetriever( block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , ) return retriever def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" __lowerCamelCase = self.get_config() __lowerCamelCase = self.get_dummy_retriever() __lowerCamelCase = retriever.tokenizer __lowerCamelCase = np.array([0, 3] , dtype='''long''' ) __lowerCamelCase = tokenizer(['''Test question'''] ).input_ids __lowerCamelCase = tokenizer( ['''the fourth'''] , add_special_tokens=a , return_token_type_ids=a , return_attention_mask=a , ).input_ids __lowerCamelCase = config.reader_seq_len __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = retriever( a , a , answer_ids=a , max_length=a , return_tensors='''np''' ) self.assertEqual(len(a ) , 2 ) self.assertEqual(len(a ) , 2 ) self.assertEqual(len(a ) , 2 ) self.assertEqual(concat_inputs.input_ids.shape , (2, 10) ) self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) ) self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) ) self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" __lowerCamelCase = self.get_config() __lowerCamelCase = self.get_dummy_retriever() __lowerCamelCase = retriever.tokenizer __lowerCamelCase = np.array([0, 3, 5] , dtype='''long''' ) __lowerCamelCase = tokenizer(['''Test question'''] ).input_ids __lowerCamelCase = tokenizer( ['''the fourth''', '''longer longer'''] , add_special_tokens=a , return_token_type_ids=a , return_attention_mask=a , ).input_ids __lowerCamelCase = config.reader_seq_len __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = retriever( a , a , answer_ids=a , max_length=a , return_tensors='''np''' ) self.assertEqual([False, True, True] , a ) self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , a ) self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , a ) def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" __lowerCamelCase = self.get_dummy_retriever() retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) ) # Test local path __lowerCamelCase = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) ) self.assertEqual(retriever.block_records[0] , b'''This is the first record''' ) # Test mocked remote path with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download: __lowerCamelCase = os.path.join( os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME ) __lowerCamelCase = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' ) self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
237
1
import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class _snake_case ( unittest.TestCase ): '''simple docstring''' def A__ ( self: Optional[int] ) -> Any: UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) UpperCAmelCase_ : Union[str, Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ ) UpperCAmelCase_ : str = -1 UpperCAmelCase_ : Dict = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ) UpperCAmelCase_ : Any = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: UpperCAmelCase_ : List[Any] = TextStreamer(lowerCamelCase_ ) model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer UpperCAmelCase_ : Optional[int] = cs.out[:-1] self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ ) def A__ ( self: Dict ) -> Optional[Any]: UpperCAmelCase_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = -1 UpperCAmelCase_ : List[Any] = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ ) UpperCAmelCase_ : List[str] = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ) UpperCAmelCase_ : Dict = tokenizer.decode(greedy_ids[0] ) UpperCAmelCase_ : str = TextIteratorStreamer(lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer} UpperCAmelCase_ : str = Thread(target=model.generate ,kwargs=lowerCamelCase_ ) thread.start() UpperCAmelCase_ : int = """""" for new_text in streamer: streamer_text += new_text self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ ) def A__ ( self: List[Any] ) -> Dict: UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = -1 UpperCAmelCase_ : Tuple = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ ) UpperCAmelCase_ : Dict = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ) UpperCAmelCase_ : str = greedy_ids[:, input_ids.shape[1] :] UpperCAmelCase_ : Dict = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: UpperCAmelCase_ : List[Any] = TextStreamer(lowerCamelCase_ ,skip_prompt=lowerCamelCase_ ) model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer UpperCAmelCase_ : List[str] = cs.out[:-1] self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ ) def A__ ( self: str ) -> str: # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained("""distilgpt2""" ) UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(lowerCamelCase_ ) UpperCAmelCase_ : Any = -1 UpperCAmelCase_ : Union[str, Any] = torch.ones((1, 5) ,device=lowerCamelCase_ ).long() * model.config.bos_token_id with CaptureStdout() as cs: UpperCAmelCase_ : Union[str, Any] = TextStreamer(lowerCamelCase_ ,skip_special_tokens=lowerCamelCase_ ) model.generate(lowerCamelCase_ ,max_new_tokens=1 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token UpperCAmelCase_ : List[str] = cs.out[:-1] # Remove the final "\n" UpperCAmelCase_ : Dict = tokenizer(lowerCamelCase_ ,return_tensors="""pt""" ) self.assertEqual(streamer_text_tokenized.input_ids.shape ,(1, 1) ) def A__ ( self: List[str] ) -> Any: UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) UpperCAmelCase_ : Any = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ ) UpperCAmelCase_ : List[str] = -1 UpperCAmelCase_ : Optional[Any] = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = TextIteratorStreamer(lowerCamelCase_ ,timeout=0.0_0_1 ) UpperCAmelCase_ : Any = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer} UpperCAmelCase_ : Dict = Thread(target=model.generate ,kwargs=lowerCamelCase_ ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(lowerCamelCase_ ): UpperCAmelCase_ : Union[str, Any] = """""" for new_text in streamer: streamer_text += new_text
345
import json import os import unittest from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class _snake_case ( __snake_case , unittest.TestCase ): '''simple docstring''' A__ : Optional[Any] = CTRLTokenizer A__ : Optional[Any] = False A__ : str = False def A__ ( self: Optional[int] ) -> List[Any]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCAmelCase_ : Dict = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""] UpperCAmelCase_ : Union[str, Any] = dict(zip(lowerCamelCase_ ,range(len(lowerCamelCase_ ) ) ) ) UpperCAmelCase_ : List[Any] = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""] UpperCAmelCase_ : Optional[Any] = {"""unk_token""": """<unk>"""} UpperCAmelCase_ : Union[str, Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCAmelCase_ : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write(json.dumps(lowerCamelCase_ ) + """\n""" ) with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write("""\n""".join(lowerCamelCase_ ) ) def A__ ( self: Optional[int] ,**lowerCamelCase_: Any ) -> str: kwargs.update(self.special_tokens_map ) return CTRLTokenizer.from_pretrained(self.tmpdirname ,**lowerCamelCase_ ) def A__ ( self: int ,lowerCamelCase_: int ) -> str: UpperCAmelCase_ : List[str] = """adapt react readapt apt""" UpperCAmelCase_ : List[Any] = """adapt react readapt apt""" return input_text, output_text def A__ ( self: Union[str, Any] ) -> Optional[int]: UpperCAmelCase_ : Union[str, Any] = CTRLTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map ) UpperCAmelCase_ : List[Any] = """adapt react readapt apt""" UpperCAmelCase_ : Optional[int] = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split() UpperCAmelCase_ : Tuple = tokenizer.tokenize(lowerCamelCase_ ) self.assertListEqual(lowerCamelCase_ ,lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = tokens + [tokenizer.unk_token] UpperCAmelCase_ : List[str] = [0, 1, 2, 4, 5, 1, 0, 3, 6] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) ,lowerCamelCase_ )
345
1
import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch _UpperCAmelCase : List[str] = logging.get_logger(__name__) class lowerCAmelCase : def __init__( self : str , UpperCAmelCase : str = None , UpperCAmelCase : uuid.UUID = None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : int=None ) -> Optional[int]: if not conversation_id: lowerCamelCase__ : List[str] = uuid.uuida() if past_user_inputs is None: lowerCamelCase__ : Union[str, Any] = [] if generated_responses is None: lowerCamelCase__ : Optional[Any] = [] lowerCamelCase__ : uuid.UUID = conversation_id lowerCamelCase__ : List[str] = past_user_inputs lowerCamelCase__ : List[str] = generated_responses lowerCamelCase__ : Optional[str] = text def __eq__( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] ) -> Optional[int]: if not isinstance(UpperCAmelCase , UpperCAmelCase ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def A_ ( self : List[Any] , UpperCAmelCase : str , UpperCAmelCase : bool = False ) -> List[str]: if self.new_user_input: if overwrite: logger.warning( F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """ F"""with: \"{text}\".""" ) lowerCamelCase__ : List[Any] = text else: logger.warning( F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """ F"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" ) else: lowerCamelCase__ : str = text def A_ ( self : Tuple ) -> Union[str, Any]: if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) lowerCamelCase__ : Dict = None def A_ ( self : List[str] , UpperCAmelCase : str ) -> Dict: self.generated_responses.append(UpperCAmelCase ) def A_ ( self : Dict ) -> Tuple: for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self : Any ) -> Tuple: lowerCamelCase__ : List[Any] = F"""Conversation id: {self.uuid} \n""" for is_user, text in self.iter_texts(): lowerCamelCase__ : Any = 'user' if is_user else 'bot' output += F"""{name} >> {text} \n""" return output @add_end_docstrings( __UpperCamelCase, r""" min_length_for_response (`int`, *optional*, defaults to 32): The minimum length (in number of tokens) for a response. minimum_tokens (`int`, *optional*, defaults to 10): The minimum length of tokens to leave for a response. """, ) class lowerCAmelCase ( __UpperCamelCase ): def __init__( self : Tuple , *UpperCAmelCase : int , **UpperCAmelCase : Any ) -> Union[str, Any]: super().__init__(*UpperCAmelCase , **UpperCAmelCase ) if self.tokenizer.pad_token_id is None: lowerCamelCase__ : List[str] = self.tokenizer.eos_token def A_ ( self : Any , UpperCAmelCase : List[str]=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : Union[str, Any]=None , **UpperCAmelCase : Optional[Any] ) -> int: lowerCamelCase__ : Optional[Any] = {} lowerCamelCase__ : Union[str, Any] = {} lowerCamelCase__ : Any = {} if min_length_for_response is not None: lowerCamelCase__ : str = min_length_for_response if minimum_tokens is not None: lowerCamelCase__ : Tuple = minimum_tokens if "max_length" in generate_kwargs: lowerCamelCase__ : str = generate_kwargs['max_length'] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: lowerCamelCase__ : int = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(UpperCAmelCase ) return preprocess_params, forward_params, postprocess_params def __call__( self : Tuple , UpperCAmelCase : Union[Conversation, List[Conversation]] , UpperCAmelCase : Union[str, Any]=0 , **UpperCAmelCase : int ) -> Tuple: lowerCamelCase__ : List[str] = super().__call__(UpperCAmelCase , num_workers=UpperCAmelCase , **UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) and len(UpperCAmelCase ) == 1: return outputs[0] return outputs def A_ ( self : List[Any] , UpperCAmelCase : Conversation , UpperCAmelCase : Dict=32 ) -> Dict[str, Any]: if not isinstance(UpperCAmelCase , UpperCAmelCase ): raise ValueError('ConversationalPipeline, expects Conversation as inputs' ) if conversation.new_user_input is None: raise ValueError( F"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """ 'Add user inputs with the conversation\'s `add_user_input` method' ) if hasattr(self.tokenizer , '_build_conversation_input_ids' ): lowerCamelCase__ : Any = self.tokenizer._build_conversation_input_ids(UpperCAmelCase ) else: # If the tokenizer cannot handle conversations, we default to only the old version lowerCamelCase__ : Union[str, Any] = self._legacy_parse_and_tokenize(UpperCAmelCase ) if self.framework == "pt": lowerCamelCase__ : List[str] = torch.LongTensor([input_ids] ) elif self.framework == "tf": lowerCamelCase__ : List[Any] = tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def A_ ( self : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : int=10 , **UpperCAmelCase : List[str] ) -> Any: lowerCamelCase__ : List[str] = generate_kwargs.get('max_length' , self.model.config.max_length ) lowerCamelCase__ : Optional[int] = model_inputs['input_ids'].shape[1] if max_length - minimum_tokens < n: logger.warning(F"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" ) lowerCamelCase__ : List[Any] = max_length - minimum_tokens lowerCamelCase__ : List[Any] = model_inputs['input_ids'][:, -trim:] if "attention_mask" in model_inputs: lowerCamelCase__ : Union[str, Any] = model_inputs['attention_mask'][:, -trim:] lowerCamelCase__ : str = model_inputs.pop('conversation' ) lowerCamelCase__ : str = max_length lowerCamelCase__ : int = self.model.generate(**UpperCAmelCase , **UpperCAmelCase ) if self.model.config.is_encoder_decoder: lowerCamelCase__ : Dict = 1 else: lowerCamelCase__ : Any = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def A_ ( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : Any=True ) -> List[Any]: lowerCamelCase__ : Union[str, Any] = model_outputs['output_ids'] lowerCamelCase__ : Union[str, Any] = self.tokenizer.decode( output_ids[0] , skip_special_tokens=UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase , ) lowerCamelCase__ : Tuple = model_outputs['conversation'] conversation.mark_processed() conversation.append_response(UpperCAmelCase ) return conversation def A_ ( self : Optional[Any] , UpperCAmelCase : Conversation ) -> Dict: lowerCamelCase__ : Union[str, Any] = self.tokenizer.eos_token_id lowerCamelCase__ : Optional[int] = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) ) if len(UpperCAmelCase ) > self.tokenizer.model_max_length: lowerCamelCase__ : Dict = input_ids[-self.tokenizer.model_max_length :] return input_ids
45
import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import List import timm import torch import torch.nn as nn from huggingface_hub import hf_hub_download from torch import Tensor from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification from transformers.utils import logging logging.set_verbosity_info() _UpperCAmelCase : List[str] = logging.get_logger() @dataclass class lowerCAmelCase : UpperCAmelCase__ = 42 UpperCAmelCase__ = field(default_factory=__UpperCamelCase ) UpperCAmelCase__ = field(default_factory=__UpperCamelCase ) def A_ ( self : List[str] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tensor , UpperCAmelCase : Tensor ) -> Any: lowerCamelCase__ : List[str] = len(list(m.modules() ) ) == 1 or isinstance(UpperCAmelCase , nn.Convad ) or isinstance(UpperCAmelCase , nn.BatchNormad ) if has_not_submodules: self.traced.append(UpperCAmelCase ) def __call__( self : Any , UpperCAmelCase : Tensor ) -> Dict: for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(UpperCAmelCase ) [x.remove() for x in self.handles] return self @property def A_ ( self : List[str] ) -> int: # check the len of the state_dict keys to see if we have learnable params return list(filter(lambda UpperCAmelCase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class lowerCAmelCase : UpperCAmelCase__ = 42 UpperCAmelCase__ = 42 UpperCAmelCase__ = 0 UpperCAmelCase__ = field(default_factory=__UpperCamelCase ) UpperCAmelCase__ = field(default_factory=__UpperCamelCase ) def __call__( self : Any , UpperCAmelCase : Tensor ) -> int: lowerCamelCase__ : Union[str, Any] = Tracker(self.dest )(UpperCAmelCase ).parametrized lowerCamelCase__ : List[Any] = Tracker(self.src )(UpperCAmelCase ).parametrized lowerCamelCase__ : Any = list(filter(lambda UpperCAmelCase : type(UpperCAmelCase ) not in self.src_skip , UpperCAmelCase ) ) lowerCamelCase__ : int = list(filter(lambda UpperCAmelCase : type(UpperCAmelCase ) not in self.dest_skip , UpperCAmelCase ) ) if len(UpperCAmelCase ) != len(UpperCAmelCase ): raise Exception( F"""Numbers of operations are different. Source module has {len(UpperCAmelCase )} operations while""" F""" destination module has {len(UpperCAmelCase )}.""" ) for dest_m, src_m in zip(UpperCAmelCase , UpperCAmelCase ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(F"""Transfered from={src_m} to={dest_m}""" ) def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True ) -> Any: print(F"""Converting {name}...""" ) with torch.no_grad(): lowerCamelCase__ : int = timm.create_model(_UpperCAmelCase , pretrained=_UpperCAmelCase ).eval() lowerCamelCase__ : Union[str, Any] = ResNetForImageClassification(_UpperCAmelCase ).eval() lowerCamelCase__ : str = ModuleTransfer(src=_UpperCAmelCase , dest=_UpperCAmelCase ) lowerCamelCase__ : Optional[int] = torch.randn((1, 3, 224, 224) ) module_transfer(_UpperCAmelCase ) assert torch.allclose(from_model(_UpperCAmelCase ) , our_model(_UpperCAmelCase ).logits ), "The model logits don't match the original one." lowerCamelCase__ : Union[str, Any] = F"""resnet{"-".join(name.split("resnet" ) )}""" print(_UpperCAmelCase ) if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=_UpperCAmelCase , ) # we can use the convnext one lowerCamelCase__ : Union[str, Any] = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' ) image_processor.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=_UpperCAmelCase , ) print(F"""Pushed {checkpoint_name}""" ) def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = True ) -> List[str]: lowerCamelCase__ : Dict = 'imagenet-1k-id2label.json' lowerCamelCase__ : Optional[int] = 1000 lowerCamelCase__ : int = (1, num_labels) lowerCamelCase__ : Any = 'huggingface/label-files' lowerCamelCase__ : str = num_labels lowerCamelCase__ : Any = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type='dataset' ) , 'r' ) ) lowerCamelCase__ : Any = {int(_UpperCAmelCase ): v for k, v in idalabel.items()} lowerCamelCase__ : str = idalabel lowerCamelCase__ : Any = {v: k for k, v in idalabel.items()} lowerCamelCase__ : Tuple = partial(_UpperCAmelCase , num_labels=_UpperCAmelCase , idalabel=_UpperCAmelCase , labelaid=_UpperCAmelCase ) lowerCamelCase__ : Optional[Any] = { 'resnet18': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ), 'resnet26': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), 'resnet34': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ), 'resnet50': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), 'resnet101': ImageNetPreTrainedConfig( depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), 'resnet152': ImageNetPreTrainedConfig( depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), } if model_name: convert_weight_and_push(_UpperCAmelCase , names_to_config[model_name] , _UpperCAmelCase , _UpperCAmelCase ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) return config, expected_shape if __name__ == "__main__": _UpperCAmelCase : int = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default=None, type=str, help=( """The name of the model you wish to convert, it must be one of the supported resnet* architecture,""" """ currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=Path, required=True, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", default=True, type=bool, required=False, help="""If True, push model and image processor to the hub.""", ) _UpperCAmelCase : str = parser.parse_args() _UpperCAmelCase : Path = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
45
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A : List[Any] = logging.get_logger(__name__) A : List[str] = { "facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json", # See all LeViT models at https://huggingface.co/models?filter=levit } class lowerCamelCase (SCREAMING_SNAKE_CASE__ ): """simple docstring""" lowerCamelCase__ = '''levit''' def __init__( self : Dict , __magic_name__ : Optional[Any]=224 , __magic_name__ : Optional[Any]=3 , __magic_name__ : Optional[Any]=3 , __magic_name__ : str=2 , __magic_name__ : Tuple=1 , __magic_name__ : List[Any]=16 , __magic_name__ : Optional[Any]=[128, 256, 384] , __magic_name__ : Dict=[4, 8, 12] , __magic_name__ : int=[4, 4, 4] , __magic_name__ : List[Any]=[16, 16, 16] , __magic_name__ : List[Any]=0 , __magic_name__ : str=[2, 2, 2] , __magic_name__ : Dict=[2, 2, 2] , __magic_name__ : Optional[Any]=0.02 , **__magic_name__ : str , ) -> int: super().__init__(**__magic_name__ ) SCREAMING_SNAKE_CASE_ = image_size SCREAMING_SNAKE_CASE_ = num_channels SCREAMING_SNAKE_CASE_ = kernel_size SCREAMING_SNAKE_CASE_ = stride SCREAMING_SNAKE_CASE_ = padding SCREAMING_SNAKE_CASE_ = hidden_sizes SCREAMING_SNAKE_CASE_ = num_attention_heads SCREAMING_SNAKE_CASE_ = depths SCREAMING_SNAKE_CASE_ = key_dim SCREAMING_SNAKE_CASE_ = drop_path_rate SCREAMING_SNAKE_CASE_ = patch_size SCREAMING_SNAKE_CASE_ = attention_ratio SCREAMING_SNAKE_CASE_ = mlp_ratio SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = [ ["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] class lowerCamelCase (SCREAMING_SNAKE_CASE__ ): """simple docstring""" lowerCamelCase__ = version.parse('''1.11''' ) @property def __A ( self : Dict ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def __A ( self : Union[str, Any] ) -> float: return 1e-4
118
from itertools import product from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros def a__ ( __UpperCamelCase , __UpperCamelCase ): SCREAMING_SNAKE_CASE_ = k_size // 2 SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = mgrid[0 - center : k_size - center, 0 - center : k_size - center] SCREAMING_SNAKE_CASE_ = 1 / (2 * pi * sigma) * exp(-(square(__UpperCamelCase ) + square(__UpperCamelCase )) / (2 * square(__UpperCamelCase )) ) return g def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = image.shape[0], image.shape[1] # dst image height and width SCREAMING_SNAKE_CASE_ = height - k_size + 1 SCREAMING_SNAKE_CASE_ = width - k_size + 1 # im2col, turn the k_size*k_size pixels into a row and np.vstack all rows SCREAMING_SNAKE_CASE_ = zeros((dst_height * dst_width, k_size * k_size) ) SCREAMING_SNAKE_CASE_ = 0 for i, j in product(range(__UpperCamelCase ) , range(__UpperCamelCase ) ): SCREAMING_SNAKE_CASE_ = ravel(image[i : i + k_size, j : j + k_size] ) SCREAMING_SNAKE_CASE_ = window row += 1 # turn the kernel into shape(k*k, 1) SCREAMING_SNAKE_CASE_ = gen_gaussian_kernel(__UpperCamelCase , __UpperCamelCase ) SCREAMING_SNAKE_CASE_ = ravel(__UpperCamelCase ) # reshape and get the dst image SCREAMING_SNAKE_CASE_ = dot(__UpperCamelCase , __UpperCamelCase ).reshape(__UpperCamelCase , __UpperCamelCase ).astype(__UpperCamelCase ) return dst if __name__ == "__main__": # read original image A : Tuple = imread(r"../image_data/lena.jpg") # turn image in gray scale value A : Optional[int] = cvtColor(img, COLOR_BGR2GRAY) # get values with two different mask size A : Tuple = gaussian_filter(gray, 3, sigma=1) A : Optional[int] = gaussian_filter(gray, 5, sigma=0.8) # show result images imshow("gaussian filter with 3x3 mask", gaussianaxa) imshow("gaussian filter with 5x5 mask", gaussianaxa) waitKey()
118
1
'''simple docstring''' import datasets lowercase : Tuple = '\\n@InProceedings{conneau2018xnli,\n author = "Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin",\n title = "XNLI: Evaluating Cross-lingual Sentence Representations",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n location = "Brussels, Belgium",\n}\n' lowercase : Union[str, Any] = '\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n' lowercase : Dict = '\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric("xnli")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n' def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' return (preds == labels).mean() @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A ( datasets.Metric ): def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ), '''references''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ), } ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" return {"accuracy": simple_accuracy(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}
311
'''simple docstring''' import warnings from typing import Dict import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' return 1.0 / (1.0 + np.exp(-_outputs )) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Optional[int] = np.max(_outputs , axis=-1 , keepdims=snake_case__ ) A : Any = np.exp(_outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=snake_case__ ) class A ( __snake_case ): __magic_name__ = '''sigmoid''' __magic_name__ = '''softmax''' __magic_name__ = '''none''' @add_end_docstrings( __snake_case , R''' return_all_scores (`bool`, *optional*, defaults to `False`): Whether to return all prediction scores or just the one of the predicted class. function_to_apply (`str`, *optional*, defaults to `"default"`): The function to apply to the model outputs in order to retrieve the scores. Accepts four different values: - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model has several labels, will apply the softmax function on the output. - `"sigmoid"`: Applies the sigmoid function on the output. - `"softmax"`: Applies the softmax function on the output. - `"none"`: Does not apply any function on the output. ''' , ) class A ( __snake_case ): __magic_name__ = False __magic_name__ = ClassificationFunction.NONE def __init__( self , **SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE ) self.check_model_type( TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if self.framework == '''tf''' else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE="" , **SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" A : Optional[Any] = tokenizer_kwargs A : int = {} if hasattr(self.model.config , '''return_all_scores''' ) and return_all_scores is None: A : int = self.model.config.return_all_scores if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or top_k is None: A : Union[str, Any] = top_k A : Dict = False elif return_all_scores is not None: warnings.warn( '''`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of''' ''' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.''' , SCREAMING_SNAKE_CASE , ) if return_all_scores: A : Optional[int] = None else: A : Dict = 1 if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A : Dict = ClassificationFunction[function_to_apply.upper()] if function_to_apply is not None: A : int = function_to_apply return preprocess_params, {}, postprocess_params def __call__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : str = super().__call__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) # TODO try and retrieve it in a nicer way from _sanitize_parameters. A : Any = '''top_k''' not in kwargs if isinstance(args[0] , SCREAMING_SNAKE_CASE ) and _legacy: # This pipeline is odd, and return a list when single item is run return [result] else: return result def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict[str, GenericTensor]: """simple docstring""" A : List[Any] = self.framework if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): return self.tokenizer(**SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(SCREAMING_SNAKE_CASE ) == 1 and isinstance(inputs[0] , SCREAMING_SNAKE_CASE ) and len(inputs[0] ) == 2: # It used to be valid to use a list of list of list for text pairs, keeping this path for BC return self.tokenizer( text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): # This is likely an invalid usage of the pipeline attempting to pass text pairs. raise ValueError( '''The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a''' ''' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.''' ) return self.tokenizer(SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" return self.model(**SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=True ) -> List[str]: """simple docstring""" if function_to_apply is None: if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: A : Optional[int] = ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: A : Any = ClassificationFunction.SOFTMAX elif hasattr(self.model.config , '''function_to_apply''' ) and function_to_apply is None: A : Optional[int] = self.model.config.function_to_apply else: A : Optional[int] = ClassificationFunction.NONE A : Any = model_outputs['''logits'''][0] A : List[Any] = outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: A : int = sigmoid(SCREAMING_SNAKE_CASE ) elif function_to_apply == ClassificationFunction.SOFTMAX: A : Any = softmax(SCREAMING_SNAKE_CASE ) elif function_to_apply == ClassificationFunction.NONE: A : int = outputs else: raise ValueError(F'Unrecognized `function_to_apply` argument: {function_to_apply}' ) if top_k == 1 and _legacy: return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()} A : int = [ {'''label''': self.model.config.idalabel[i], '''score''': score.item()} for i, score in enumerate(SCREAMING_SNAKE_CASE ) ] if not _legacy: dict_scores.sort(key=lambda SCREAMING_SNAKE_CASE : x["score"] , reverse=SCREAMING_SNAKE_CASE ) if top_k is not None: A : Union[str, Any] = dict_scores[:top_k] return dict_scores
311
1
'''simple docstring''' import math from typing import Callable, List, Optional, Union import numpy as np import PIL import torch from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__=[] ): '''simple docstring''' A : Union[str, Any] = size[0] - overlap_pixels * 2 A : str = size[1] - overlap_pixels * 2 for letter in ["l", "r"]: if letter in remove_borders: size_x += overlap_pixels for letter in ["t", "b"]: if letter in remove_borders: size_y += overlap_pixels A : str = np.ones((size_y, size_x) , dtype=np.uinta ) * 255 A : Dict = np.pad(snake_case__ , mode='''linear_ramp''' , pad_width=snake_case__ , end_values=0 ) if "l" in remove_borders: A : Any = mask[:, overlap_pixels : mask.shape[1]] if "r" in remove_borders: A : Any = mask[:, 0 : mask.shape[1] - overlap_pixels] if "t" in remove_borders: A : Any = mask[overlap_pixels : mask.shape[0], :] if "b" in remove_borders: A : Union[str, Any] = mask[0 : mask.shape[0] - overlap_pixels, :] return mask def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' return max(snake_case__ , min(snake_case__ , snake_case__ ) ) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' return ( clamp(rect[0] , min[0] , max[0] ), clamp(rect[1] , min[1] , max[1] ), clamp(rect[2] , min[0] , max[0] ), clamp(rect[3] , min[1] , max[1] ), ) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' A : List[Any] = list(snake_case__ ) rect[0] -= overlap rect[1] -= overlap rect[2] += overlap rect[3] += overlap A : str = clamp_rect(snake_case__ , [0, 0] , [image_size[0], image_size[1]] ) return rect def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' A : int = Image.new('''RGB''' , (tile.size[0] + original_slice, tile.size[1]) ) result.paste( original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop( (slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , ) result.paste(snake_case__ , (original_slice, 0) ) return result def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Dict = (original_image_slice * 4, 0, tile.size[0], tile.size[1]) A : Union[str, Any] = tile.crop(snake_case__ ) return tile def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Union[str, Any] = n % d return n - divisor class A ( __snake_case ): def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 350 , ) -> List[Any]: """simple docstring""" super().__init__( vae=SCREAMING_SNAKE_CASE , text_encoder=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , unet=SCREAMING_SNAKE_CASE , low_res_scheduler=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , max_noise_level=SCREAMING_SNAKE_CASE , ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" torch.manual_seed(0 ) A : List[Any] = ( min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ), min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ), min(image.size[0] , (x + 1) * tile_size ), min(image.size[1] , (y + 1) * tile_size ), ) A : List[Any] = add_overlap_rect(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , image.size ) A : Dict = image.crop(SCREAMING_SNAKE_CASE ) A : Tuple = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0] A : Any = translated_slice_x - (original_image_slice / 2) A : Optional[Any] = max(0 , SCREAMING_SNAKE_CASE ) A : List[str] = squeeze_tile(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : List[str] = to_input.size A : Optional[int] = to_input.resize((tile_size, tile_size) , Image.BICUBIC ) A : str = super(SCREAMING_SNAKE_CASE , self ).__call__(image=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).images[0] A : str = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC ) A : int = unsqueeze_tile(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : List[Any] = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC ) A : Optional[int] = [] if x == 0: remove_borders.append('''l''' ) elif crop_rect[2] == image.size[0]: remove_borders.append('''r''' ) if y == 0: remove_borders.append('''t''' ) elif crop_rect[3] == image.size[1]: remove_borders.append('''b''' ) A : Optional[Any] = Image.fromarray( make_transparency_mask( (upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=SCREAMING_SNAKE_CASE ) , mode='''L''' , ) final_image.paste( SCREAMING_SNAKE_CASE , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , SCREAMING_SNAKE_CASE ) @torch.no_grad() def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 75 , SCREAMING_SNAKE_CASE = 9.0 , SCREAMING_SNAKE_CASE = 50 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 0.0 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 128 , SCREAMING_SNAKE_CASE = 32 , SCREAMING_SNAKE_CASE = 32 , ) -> Dict: """simple docstring""" A : str = Image.new('''RGB''' , (image.size[0] * 4, image.size[1] * 4) ) A : Tuple = math.ceil(image.size[0] / tile_size ) A : List[Any] = math.ceil(image.size[1] / tile_size ) A : Optional[int] = tcx * tcy A : int = 0 for y in range(SCREAMING_SNAKE_CASE ): for x in range(SCREAMING_SNAKE_CASE ): self._process_tile( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , prompt=SCREAMING_SNAKE_CASE , num_inference_steps=SCREAMING_SNAKE_CASE , guidance_scale=SCREAMING_SNAKE_CASE , noise_level=SCREAMING_SNAKE_CASE , negative_prompt=SCREAMING_SNAKE_CASE , num_images_per_prompt=SCREAMING_SNAKE_CASE , eta=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , latents=SCREAMING_SNAKE_CASE , ) current_count += 1 if callback is not None: callback({'''progress''': current_count / total_tile_count, '''image''': final_image} ) return final_image def lowerCAmelCase_ ( ): '''simple docstring''' A : Dict = '''stabilityai/stable-diffusion-x4-upscaler''' A : int = StableDiffusionTiledUpscalePipeline.from_pretrained(snake_case__ , revision='''fp16''' , torch_dtype=torch.floataa ) A : Dict = pipe.to('''cuda''' ) A : Tuple = Image.open('''../../docs/source/imgs/diffusers_library.jpg''' ) def callback(snake_case__ ): print(F'progress: {obj["progress"]:.4f}' ) obj["image"].save('''diffusers_library_progress.jpg''' ) A : Optional[int] = pipe(image=snake_case__ , prompt='''Black font, white background, vector''' , noise_level=40 , callback=snake_case__ ) final_image.save('''diffusers_library.jpg''' ) if __name__ == "__main__": main()
3
'''simple docstring''' import os from pickle import UnpicklingError from typing import Dict, Tuple import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict, unflatten_dict import transformers from .utils import logging lowercase : Dict = logging.get_logger(__name__) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False ): '''simple docstring''' try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise if not is_sharded: A : Union[str, Any] = os.path.abspath(snake_case__ ) logger.info(F'Loading PyTorch weights from {pt_path}' ) A : Any = torch.load(snake_case__ , map_location='''cpu''' ) logger.info(F'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' ) A : List[str] = convert_pytorch_state_dict_to_flax(snake_case__ , snake_case__ ) else: # model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files A : Any = convert_pytorch_sharded_state_dict_to_flax(snake_case__ , snake_case__ ) return flax_state_dict def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' def is_key_or_prefix_key_in_dict(snake_case__ ) -> bool: return len(set(snake_case__ ) & {key, (model_prefix,) + key} ) > 0 # layer norm A : Union[str, Any] = pt_tuple_key[:-1] + ('''scale''',) if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # batch norm layer mean A : Tuple = pt_tuple_key[:-1] + ('''mean''',) if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # batch norm layer var A : Dict = pt_tuple_key[:-1] + ('''var''',) if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # embedding A : Any = pt_tuple_key[:-1] + ('''embedding''',) if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # conv layer A : Optional[Any] = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(snake_case__ ): A : List[Any] = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer A : Optional[int] = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(snake_case__ ): A : str = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight A : Dict = pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias A : List[Any] = pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 A : Dict = None if pt_tuple_key[-3::2] == ("parametrizations", "original0"): A : List[Any] = pt_tuple_key[-2] + '''_g''' elif pt_tuple_key[-3::2] == ("parametrizations", "original1"): A : List[str] = pt_tuple_key[-2] + '''_v''' if name is not None: A : int = pt_tuple_key[:-3] + (name,) return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Dict = {k: v.numpy() for k, v in pt_state_dict.items()} A : int = flax_model.base_model_prefix # use params dict if the model contains batch norm layers if "params" in flax_model.params: A : List[str] = flax_model.params['''params'''] else: A : Dict = flax_model.params A : List[Any] = flatten_dict(snake_case__ ) # add batch_stats keys,values to dict if "batch_stats" in flax_model.params: A : List[str] = flatten_dict(flax_model.params['''batch_stats'''] ) random_flax_state_dict.update(snake_case__ ) A : int = {} A : List[str] = (model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) A : int = (model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): A : str = tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary A : Union[str, Any] = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: A : Any = pt_tuple_key[1:] # Correctly rename weight parameters A, A : Dict = rename_key_and_reshape_tensor( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # add model prefix if necessary A : Any = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: A : int = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ' F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1] or "var" in flax_key[-1]: A : Tuple = jnp.asarray(snake_case__ ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(snake_case__ , snake_case__ ) continue # also add unexpected weight so that warning is thrown A : List[str] = jnp.asarray(snake_case__ ) else: # also add unexpected weight so that warning is thrown A : Optional[Any] = jnp.asarray(snake_case__ ) return unflatten_dict(snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' import torch # Load the index A : Union[str, Any] = {} for shard_file in shard_filenames: # load using msgpack utils A : List[str] = torch.load(snake_case__ ) A : int = {k: v.numpy() for k, v in pt_state_dict.items()} A : Tuple = flax_model.base_model_prefix # use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict if "batch_stats" in flax_model.params: A : Optional[int] = flax_model.params['''params'''] A : List[Any] = flatten_dict(snake_case__ ) random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) ) else: A : Dict = flax_model.params A : Tuple = flatten_dict(snake_case__ ) A : List[str] = (model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) A : List[str] = (model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): A : int = tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary A : List[str] = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: A : Union[str, Any] = pt_tuple_key[1:] # Correctly rename weight parameters A, A : Any = rename_key_and_reshape_tensor( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # add model prefix if necessary A : int = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: A : int = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ' F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1]: A : Optional[int] = jnp.asarray(snake_case__ ) continue if "var" in flax_key[-1]: A : Optional[int] = jnp.asarray(snake_case__ ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(snake_case__ , snake_case__ ) continue # also add unexpected weight so that warning is thrown A : Optional[Any] = jnp.asarray(snake_case__ ) else: # also add unexpected weight so that warning is thrown A : Optional[Any] = jnp.asarray(snake_case__ ) return unflatten_dict(snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Dict = os.path.abspath(snake_case__ ) logger.info(F'Loading Flax weights from {flax_checkpoint_path}' ) # import correct flax class A : List[str] = getattr(snake_case__ , '''Flax''' + model.__class__.__name__ ) # load flax weight dict with open(snake_case__ , '''rb''' ) as state_f: try: A : int = from_bytes(snake_case__ , state_f.read() ) except UnpicklingError: raise EnvironmentError(F'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' ) return load_flax_weights_in_pytorch_model(snake_case__ , snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise # check if we have bf16 weights A : List[str] = flatten_dict(jax.tree_util.tree_map(lambda snake_case__ : x.dtype == jnp.bfloataa , snake_case__ ) ).values() if any(snake_case__ ): # convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( '''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` ''' '''before loading those in PyTorch model.''' ) A : Optional[Any] = jax.tree_util.tree_map( lambda snake_case__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , snake_case__ ) A : Union[str, Any] = flatten_dict(snake_case__ ) A : List[Any] = pt_model.state_dict() A : Union[str, Any] = (pt_model.base_model_prefix in flax_state) and ( pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) A : Tuple = (pt_model.base_model_prefix not in flax_state) and ( pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) # keep track of unexpected & missing keys A : int = [] A : Any = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): A : Union[str, Any] = flax_key_tuple[0] == pt_model.base_model_prefix A : int = '''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict # adapt flax_key to prepare for loading from/to base model only if load_model_with_head_into_base_model and has_base_model_prefix: A : List[str] = flax_key_tuple[1:] elif load_base_model_into_model_with_head and require_base_model_prefix: A : Optional[Any] = (pt_model.base_model_prefix,) + flax_key_tuple # rename flax weights to PyTorch format if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(snake_case__ ) not in pt_model_dict: # conv layer A : Optional[int] = flax_key_tuple[:-1] + ('''weight''',) A : Optional[int] = jnp.transpose(snake_case__ , (3, 2, 0, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(snake_case__ ) not in pt_model_dict: # linear layer A : Tuple = flax_key_tuple[:-1] + ('''weight''',) A : Tuple = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: A : Optional[int] = flax_key_tuple[:-1] + ('''weight''',) # adding batch stats from flax batch norm to pt elif "mean" in flax_key_tuple[-1]: A : Tuple = flax_key_tuple[:-1] + ('''running_mean''',) elif "var" in flax_key_tuple[-1]: A : Tuple = flax_key_tuple[:-1] + ('''running_var''',) if "batch_stats" in flax_state: A : List[Any] = '''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header else: A : Union[str, Any] = '''.'''.join(snake_case__ ) # We also need to look at `pt_model_dict` and see if there are keys requiring further transformation. A : int = {} # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 for key in pt_model_dict: A : Optional[int] = key.split('''.''' ) A : Dict = None if key_components[-3::2] == ["parametrizations", "original0"]: A : List[str] = key_components[-2] + '''_g''' elif key_components[-3::2] == ["parametrizations", "original1"]: A : List[Any] = key_components[-2] + '''_v''' if name is not None: A : str = key_components[:-3] + [name] A : Optional[Any] = '''.'''.join(snake_case__ ) A : Optional[Any] = key if flax_key in special_pt_names: A : Optional[Any] = special_pt_names[flax_key] if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected ' F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) else: # add weight to pytorch dict A : Dict = np.asarray(snake_case__ ) if not isinstance(snake_case__ , np.ndarray ) else flax_tensor A : Dict = torch.from_numpy(snake_case__ ) # remove from missing keys missing_keys.remove(snake_case__ ) else: # weight is not expected by PyTorch model unexpected_keys.append(snake_case__ ) pt_model.load_state_dict(snake_case__ ) # re-transform missing_keys to list A : List[Any] = list(snake_case__ ) if len(snake_case__ ) > 0: logger.warning( '''Some weights of the Flax model were not used when initializing the PyTorch model''' F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing' F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture' ''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This''' F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect' ''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a''' ''' FlaxBertForSequenceClassification model).''' ) else: logger.warning(F'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' ) if len(snake_case__ ) > 0: logger.warning( F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly' F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to' ''' use it for predictions and inference.''' ) else: logger.warning( F'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n' '''If your task is similar to the task the model of the checkpoint was trained on, ''' F'you can already use {pt_model.__class__.__name__} for predictions without further training.' ) return pt_model
3
1
'''simple docstring''' from __future__ import annotations import math import random from typing import Any class A_ : '''simple docstring''' def __init__( self : Tuple ) -> None: UpperCAmelCase : list[Any] = [] UpperCAmelCase : int = 0 UpperCAmelCase : int = 0 def UpperCAmelCase_ ( self : List[Any] ) -> bool: return self.head == self.tail def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : Any ) -> None: self.data.append(lowercase_ ) UpperCAmelCase : Dict = self.tail + 1 def UpperCAmelCase_ ( self : Optional[Any] ) -> Any: UpperCAmelCase : Optional[int] = self.data[self.head] UpperCAmelCase : List[Any] = self.head + 1 return ret def UpperCAmelCase_ ( self : Union[str, Any] ) -> int: return self.tail - self.head def UpperCAmelCase_ ( self : Tuple ) -> None: print(self.data ) print('**************' ) print(self.data[self.head : self.tail] ) class A_ : '''simple docstring''' def __init__( self : Union[str, Any] , lowercase_ : Any ) -> None: UpperCAmelCase : Optional[int] = data UpperCAmelCase : MyNode | None = None UpperCAmelCase : MyNode | None = None UpperCAmelCase : int = 1 def UpperCAmelCase_ ( self : List[Any] ) -> Any: return self.data def UpperCAmelCase_ ( self : Dict ) -> MyNode | None: return self.left def UpperCAmelCase_ ( self : Any ) -> MyNode | None: return self.right def UpperCAmelCase_ ( self : Optional[Any] ) -> int: return self.height def UpperCAmelCase_ ( self : List[str] , lowercase_ : Any ) -> None: UpperCAmelCase : List[str] = data def UpperCAmelCase_ ( self : int , lowercase_ : MyNode | None ) -> None: UpperCAmelCase : Dict = node def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : MyNode | None ) -> None: UpperCAmelCase : Any = node def UpperCAmelCase_ ( self : Any , lowercase_ : int ) -> None: UpperCAmelCase : List[Any] = height def UpperCamelCase( UpperCAmelCase_ ): if node is None: return 0 return node.get_height() def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ): if a > b: return a return b def UpperCamelCase( UpperCAmelCase_ ): print('left rotation node:' , node.get_data() ) UpperCAmelCase : List[str] = node.get_left() assert ret is not None node.set_left(ret.get_right() ) ret.set_right(UpperCAmelCase_ ) UpperCAmelCase : str = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1 node.set_height(UpperCAmelCase_ ) UpperCAmelCase : str = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1 ret.set_height(UpperCAmelCase_ ) return ret def UpperCamelCase( UpperCAmelCase_ ): print('right rotation node:' , node.get_data() ) UpperCAmelCase : Dict = node.get_right() assert ret is not None node.set_right(ret.get_left() ) ret.set_left(UpperCAmelCase_ ) UpperCAmelCase : Tuple = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1 node.set_height(UpperCAmelCase_ ) UpperCAmelCase : Tuple = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1 ret.set_height(UpperCAmelCase_ ) return ret def UpperCamelCase( UpperCAmelCase_ ): UpperCAmelCase : List[str] = node.get_left() assert left_child is not None node.set_left(left_rotation(UpperCAmelCase_ ) ) return right_rotation(UpperCAmelCase_ ) def UpperCamelCase( UpperCAmelCase_ ): UpperCAmelCase : List[Any] = node.get_right() assert right_child is not None node.set_right(right_rotation(UpperCAmelCase_ ) ) return left_rotation(UpperCAmelCase_ ) def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ): if node is None: return MyNode(UpperCAmelCase_ ) if data < node.get_data(): node.set_left(insert_node(node.get_left() , UpperCAmelCase_ ) ) if ( get_height(node.get_left() ) - get_height(node.get_right() ) == 2 ): # an unbalance detected UpperCAmelCase : Union[str, Any] = node.get_left() assert left_child is not None if ( data < left_child.get_data() ): # new node is the left child of the left child UpperCAmelCase : List[str] = right_rotation(UpperCAmelCase_ ) else: UpperCAmelCase : int = lr_rotation(UpperCAmelCase_ ) else: node.set_right(insert_node(node.get_right() , UpperCAmelCase_ ) ) if get_height(node.get_right() ) - get_height(node.get_left() ) == 2: UpperCAmelCase : str = node.get_right() assert right_child is not None if data < right_child.get_data(): UpperCAmelCase : str = rl_rotation(UpperCAmelCase_ ) else: UpperCAmelCase : Optional[Any] = left_rotation(UpperCAmelCase_ ) UpperCAmelCase : Optional[Any] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1 node.set_height(UpperCAmelCase_ ) return node def UpperCamelCase( UpperCAmelCase_ ): while True: UpperCAmelCase : int = root.get_right() if right_child is None: break UpperCAmelCase : Union[str, Any] = right_child return root.get_data() def UpperCamelCase( UpperCAmelCase_ ): while True: UpperCAmelCase : List[str] = root.get_left() if left_child is None: break UpperCAmelCase : Optional[int] = left_child return root.get_data() def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ): UpperCAmelCase : Optional[Any] = root.get_left() UpperCAmelCase : List[Any] = root.get_right() if root.get_data() == data: if left_child is not None and right_child is not None: UpperCAmelCase : List[Any] = get_left_most(UpperCAmelCase_ ) root.set_data(UpperCAmelCase_ ) root.set_right(del_node(UpperCAmelCase_ , UpperCAmelCase_ ) ) elif left_child is not None: UpperCAmelCase : List[str] = left_child elif right_child is not None: UpperCAmelCase : Optional[int] = right_child else: return None elif root.get_data() > data: if left_child is None: print('No such data' ) return root else: root.set_left(del_node(UpperCAmelCase_ , UpperCAmelCase_ ) ) else: # root.get_data() < data if right_child is None: return root else: root.set_right(del_node(UpperCAmelCase_ , UpperCAmelCase_ ) ) if get_height(UpperCAmelCase_ ) - get_height(UpperCAmelCase_ ) == 2: assert right_child is not None if get_height(right_child.get_right() ) > get_height(right_child.get_left() ): UpperCAmelCase : List[str] = left_rotation(UpperCAmelCase_ ) else: UpperCAmelCase : int = rl_rotation(UpperCAmelCase_ ) elif get_height(UpperCAmelCase_ ) - get_height(UpperCAmelCase_ ) == -2: assert left_child is not None if get_height(left_child.get_left() ) > get_height(left_child.get_right() ): UpperCAmelCase : str = right_rotation(UpperCAmelCase_ ) else: UpperCAmelCase : Any = lr_rotation(UpperCAmelCase_ ) UpperCAmelCase : List[str] = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1 root.set_height(UpperCAmelCase_ ) return root class A_ : '''simple docstring''' def __init__( self : List[str] ) -> None: UpperCAmelCase : MyNode | None = None def UpperCAmelCase_ ( self : Optional[Any] ) -> int: return get_height(self.root ) def UpperCAmelCase_ ( self : Tuple , lowercase_ : Any ) -> None: print('insert:' + str(lowercase_ ) ) UpperCAmelCase : Tuple = insert_node(self.root , lowercase_ ) def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Any ) -> None: print('delete:' + str(lowercase_ ) ) if self.root is None: print('Tree is empty!' ) return UpperCAmelCase : Tuple = del_node(self.root , lowercase_ ) def __str__( self : Optional[int] , ) -> str: # a level traversale, gives a more intuitive look on the tree UpperCAmelCase : Dict = '' UpperCAmelCase : Tuple = MyQueue() q.push(self.root ) UpperCAmelCase : Dict = self.get_height() if layer == 0: return output UpperCAmelCase : List[Any] = 0 while not q.is_empty(): UpperCAmelCase : Dict = q.pop() UpperCAmelCase : Dict = ' ' * int(math.pow(2 , layer - 1 ) ) output += space if node is None: output += "*" q.push(lowercase_ ) q.push(lowercase_ ) else: output += str(node.get_data() ) q.push(node.get_left() ) q.push(node.get_right() ) output += space UpperCAmelCase : Dict = cnt + 1 for i in range(100 ): if cnt == math.pow(2 , lowercase_ ) - 1: UpperCAmelCase : Optional[int] = layer - 1 if layer == 0: output += "\n*************************************" return output output += "\n" break output += "\n*************************************" return output def UpperCamelCase( ): import doctest doctest.testmod() if __name__ == "__main__": _test() lowercase__ = AVLtree() lowercase__ = list(range(10)) random.shuffle(lst) for i in lst: t.insert(i) print(str(t)) random.shuffle(lst) for i in lst: t.del_node(i) print(str(t))
280
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowercase__ = logging.get_logger(__name__) def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ): UpperCAmelCase : Optional[Any] = b.T UpperCAmelCase : Optional[int] = np.sum(np.square(UpperCAmelCase_ ) , axis=1 ) UpperCAmelCase : List[Any] = np.sum(np.square(UpperCAmelCase_ ) , axis=0 ) UpperCAmelCase : List[str] = np.matmul(UpperCAmelCase_ , UpperCAmelCase_ ) UpperCAmelCase : Union[str, Any] = aa[:, None] - 2 * ab + ba[None, :] return d def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ): UpperCAmelCase : int = x.reshape(-1 , 3 ) UpperCAmelCase : Optional[int] = squared_euclidean_distance(UpperCAmelCase_ , UpperCAmelCase_ ) return np.argmin(UpperCAmelCase_ , axis=1 ) class A_ ( _snake_case ): '''simple docstring''' UpperCAmelCase_ : List[Any] = ["""pixel_values"""] def __init__( self : List[Any] , lowercase_ : Optional[Union[List[List[int]], np.ndarray]] = None , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : bool = True , lowercase_ : bool = True , **lowercase_ : Optional[Any] , ) -> None: super().__init__(**lowercase_ ) UpperCAmelCase : Any = size if size is not None else {'height': 256, 'width': 256} UpperCAmelCase : List[Any] = get_size_dict(lowercase_ ) UpperCAmelCase : str = np.array(lowercase_ ) if clusters is not None else None UpperCAmelCase : Any = do_resize UpperCAmelCase : List[Any] = size UpperCAmelCase : Any = resample UpperCAmelCase : Dict = do_normalize UpperCAmelCase : List[Any] = do_color_quantize def UpperCAmelCase_ ( self : int , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Any , ) -> np.ndarray: UpperCAmelCase : Dict = get_size_dict(lowercase_ ) if "height" not in size or "width" not in size: raise ValueError(f"""Size dictionary must contain both height and width keys. Got {size.keys()}""" ) return resize( lowercase_ , size=(size['height'], size['width']) , resample=lowercase_ , data_format=lowercase_ , **lowercase_ ) def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : np.ndarray , lowercase_ : Optional[Union[str, ChannelDimension]] = None , ) -> np.ndarray: UpperCAmelCase : int = rescale(image=lowercase_ , scale=1 / 127.5 , data_format=lowercase_ ) UpperCAmelCase : Dict = image - 1 return image def UpperCAmelCase_ ( self : str , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[Union[List[List[int]], np.ndarray]] = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **lowercase_ : List[str] , ) -> PIL.Image.Image: UpperCAmelCase : Optional[int] = do_resize if do_resize is not None else self.do_resize UpperCAmelCase : Optional[Any] = size if size is not None else self.size UpperCAmelCase : Optional[int] = get_size_dict(lowercase_ ) UpperCAmelCase : Any = resample if resample is not None else self.resample UpperCAmelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase : str = do_color_quantize if do_color_quantize is not None else self.do_color_quantize UpperCAmelCase : Optional[int] = clusters if clusters is not None else self.clusters UpperCAmelCase : List[str] = np.array(lowercase_ ) UpperCAmelCase : int = make_list_of_images(lowercase_ ) if not valid_images(lowercase_ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.' ) if do_color_quantize and clusters is None: raise ValueError('Clusters must be specified if do_color_quantize is True.' ) # All transformations expect numpy arrays. UpperCAmelCase : Dict = [to_numpy_array(lowercase_ ) for image in images] if do_resize: UpperCAmelCase : List[Any] = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images] if do_normalize: UpperCAmelCase : Tuple = [self.normalize(image=lowercase_ ) for image in images] if do_color_quantize: UpperCAmelCase : List[str] = [to_channel_dimension_format(lowercase_ , ChannelDimension.LAST ) for image in images] # color quantize from (batch_size, height, width, 3) to (batch_size, height, width) UpperCAmelCase : int = np.array(lowercase_ ) UpperCAmelCase : str = color_quantize(lowercase_ , lowercase_ ).reshape(images.shape[:-1] ) # flatten to (batch_size, height*width) UpperCAmelCase : Optional[int] = images.shape[0] UpperCAmelCase : Union[str, Any] = images.reshape(lowercase_ , -1 ) # We need to convert back to a list of images to keep consistent behaviour across processors. UpperCAmelCase : int = list(lowercase_ ) else: UpperCAmelCase : Dict = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images] UpperCAmelCase : Any = {'input_ids': images} return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
280
1
"""simple docstring""" import argparse import intel_extension_for_pytorch as ipex import torch from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline __lowerCamelCase = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False) parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not") parser.add_argument("--steps", default=None, type=int, help="Num inference steps") __lowerCamelCase = parser.parse_args() __lowerCamelCase = "cpu" __lowerCamelCase = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings" __lowerCamelCase = "path-to-your-trained-model" __lowerCamelCase = StableDiffusionPipeline.from_pretrained(model_id) if args.dpm: __lowerCamelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) __lowerCamelCase = pipe.to(device) # to channels last __lowerCamelCase = pipe.unet.to(memory_format=torch.channels_last) __lowerCamelCase = pipe.vae.to(memory_format=torch.channels_last) __lowerCamelCase = pipe.text_encoder.to(memory_format=torch.channels_last) if pipe.requires_safety_checker: __lowerCamelCase = pipe.safety_checker.to(memory_format=torch.channels_last) # optimize with ipex __lowerCamelCase = torch.randn(2, 4, 64, 64) __lowerCamelCase = torch.rand(1) * 9_99 __lowerCamelCase = torch.randn(2, 77, 7_68) __lowerCamelCase = (sample, timestep, encoder_hidden_status) try: __lowerCamelCase = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example) except Exception: __lowerCamelCase = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True) __lowerCamelCase = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True) __lowerCamelCase = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True) if pipe.requires_safety_checker: __lowerCamelCase = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True) # compute __lowerCamelCase = 6_66 __lowerCamelCase = torch.Generator(device).manual_seed(seed) __lowerCamelCase = {"generator": generator} if args.steps is not None: __lowerCamelCase = args.steps with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa): __lowerCamelCase = pipe(prompt, **generate_kwargs).images[0] # save image image.save("generated.png")
221
"""simple docstring""" import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ConvNextConfig, UperNetConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import UperNetForSemanticSegmentation from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCamelCase__: def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=13 ,__UpperCAmelCase=32 ,__UpperCAmelCase=3 ,__UpperCAmelCase=4 ,__UpperCAmelCase=[10, 20, 30, 40] ,__UpperCAmelCase=[2, 2, 3, 2] ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=37 ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=10 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=["stage2", "stage3", "stage4"] ,__UpperCAmelCase=3 ,__UpperCAmelCase=None ,) -> Optional[int]: A__ = parent A__ = batch_size A__ = image_size A__ = num_channels A__ = num_stages A__ = hidden_sizes A__ = depths A__ = is_training A__ = use_labels A__ = intermediate_size A__ = hidden_act A__ = type_sequence_label_size A__ = initializer_range A__ = out_features A__ = num_labels A__ = scope A__ = num_stages def snake_case__ ( self ) -> List[Any]: A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) A__ = self.get_config() return config, pixel_values, labels def snake_case__ ( self ) -> str: return ConvNextConfig( num_channels=self.num_channels ,num_stages=self.num_stages ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,is_training=self.is_training ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,out_features=self.out_features ,) def snake_case__ ( self ) -> Tuple: return UperNetConfig( backbone_config=self.get_backbone_config() ,hidden_size=5_12 ,pool_scales=[1, 2, 3, 6] ,use_auxiliary_head=__UpperCAmelCase ,auxiliary_loss_weight=0.4 ,auxiliary_in_channels=40 ,auxiliary_channels=2_56 ,auxiliary_num_convs=1 ,auxiliary_concat_input=__UpperCAmelCase ,loss_ignore_index=2_55 ,num_labels=self.num_labels ,) def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Dict: A__ = UperNetForSemanticSegmentation(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() A__ = model(__UpperCAmelCase ) self.parent.assertEqual( result.logits.shape ,(self.batch_size, self.num_labels, self.image_size, self.image_size) ) def snake_case__ ( self ) -> str: A__ = self.prepare_config_and_inputs() ( ( A__ ) , ( A__ ) , ( A__ ) , ) = config_and_inputs A__ = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class UpperCamelCase__( __A , __A , unittest.TestCase ): lowerCAmelCase__ : int = (UperNetForSemanticSegmentation,) if is_torch_available() else () lowerCAmelCase__ : int = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {} lowerCAmelCase__ : Optional[int] = False lowerCAmelCase__ : List[Any] = False lowerCAmelCase__ : Tuple = False lowerCAmelCase__ : Optional[Any] = False lowerCAmelCase__ : Union[str, Any] = False lowerCAmelCase__ : Dict = False def snake_case__ ( self ) -> Union[str, Any]: A__ = UperNetModelTester(self ) A__ = ConfigTester(self ,config_class=__UpperCAmelCase ,has_text_modality=__UpperCAmelCase ,hidden_size=37 ) def snake_case__ ( self ) -> List[Any]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def snake_case__ ( self ) -> int: return def snake_case__ ( self ) -> List[Any]: A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(__UpperCAmelCase ) A__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A__ = [*signature.parameters.keys()] A__ = ['pixel_values'] self.assertListEqual(arg_names[:1] ,__UpperCAmelCase ) def snake_case__ ( self ) -> Tuple: A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCAmelCase ) @unittest.skip(reason='UperNet does not use inputs_embeds' ) def snake_case__ ( self ) -> Optional[int]: pass @unittest.skip(reason='UperNet does not support input and output embeddings' ) def snake_case__ ( self ) -> Tuple: pass @unittest.skip(reason='UperNet does not have a base model' ) def snake_case__ ( self ) -> List[Any]: pass @unittest.skip(reason='UperNet does not have a base model' ) def snake_case__ ( self ) -> Dict: pass @require_torch_multi_gpu @unittest.skip(reason='UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' ) def snake_case__ ( self ) -> Any: pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def snake_case__ ( self ) -> Dict: pass def snake_case__ ( self ) -> Optional[int]: def check_hidden_states_output(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ): A__ = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): A__ = model(**self._prepare_for_class(__UpperCAmelCase ,__UpperCAmelCase ) ) A__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A__ = self.model_tester.num_stages self.assertEqual(len(__UpperCAmelCase ) ,expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,) A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = True check_hidden_states_output(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A__ = True check_hidden_states_output(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) def snake_case__ ( self ) -> Optional[Any]: A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = _config_zero_init(__UpperCAmelCase ) A__ = _config_zero_init(configs_no_init.backbone_config ) for model_class in self.all_model_classes: A__ = model_class(config=__UpperCAmelCase ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' ,) @unittest.skip(reason='UperNet does not have tied weights' ) def snake_case__ ( self ) -> str: pass @slow def snake_case__ ( self ) -> List[str]: for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ = UperNetForSemanticSegmentation.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def UpperCAmelCase ( ): """simple docstring""" A__ = hf_hub_download( repo_id='hf-internal-testing/fixtures_ade20k' , repo_type='dataset' , filename='ADE_val_00000001.jpg' ) A__ = Image.open(UpperCamelCase__ ).convert('RGB' ) return image @require_torch @require_vision @slow class UpperCamelCase__( unittest.TestCase ): def snake_case__ ( self ) -> Dict: A__ = AutoImageProcessor.from_pretrained('openmmlab/upernet-swin-tiny' ) A__ = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-swin-tiny' ).to(__UpperCAmelCase ) A__ = prepare_img() A__ = processor(images=__UpperCAmelCase ,return_tensors='pt' ).to(__UpperCAmelCase ) with torch.no_grad(): A__ = model(**__UpperCAmelCase ) A__ = torch.Size((1, model.config.num_labels, 5_12, 5_12) ) self.assertEqual(outputs.logits.shape ,__UpperCAmelCase ) A__ = torch.tensor( [[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] ,__UpperCAmelCase ,atol=1e-4 ) ) def snake_case__ ( self ) -> str: A__ = AutoImageProcessor.from_pretrained('openmmlab/upernet-convnext-tiny' ) A__ = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-convnext-tiny' ).to(__UpperCAmelCase ) A__ = prepare_img() A__ = processor(images=__UpperCAmelCase ,return_tensors='pt' ).to(__UpperCAmelCase ) with torch.no_grad(): A__ = model(**__UpperCAmelCase ) A__ = torch.Size((1, model.config.num_labels, 5_12, 5_12) ) self.assertEqual(outputs.logits.shape ,__UpperCAmelCase ) A__ = torch.tensor( [[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] ,__UpperCAmelCase ,atol=1e-4 ) )
221
1
import random import sys import numpy as np from matplotlib import pyplot as plt from matplotlib.colors import ListedColormap _UpperCAmelCase = 'Usage of script: script_name <size_of_canvas:int>' _UpperCAmelCase = [0] * 1_0_0 + [1] * 1_0 random.shuffle(choice) def lowerCAmelCase_ ( UpperCamelCase_ ) -> list[list[bool]]: UpperCamelCase_ = [[False for i in range(UpperCamelCase_ )] for j in range(UpperCamelCase_ )] return canvas def lowerCAmelCase_ ( UpperCamelCase_ ) -> None: for i, row in enumerate(UpperCamelCase_ ): for j, _ in enumerate(UpperCamelCase_ ): UpperCamelCase_ = bool(random.getrandbits(1 ) ) def lowerCAmelCase_ ( UpperCamelCase_ ) -> list[list[bool]]: UpperCamelCase_ = np.array(UpperCamelCase_ ) UpperCamelCase_ = np.array(create_canvas(current_canvas.shape[0] ) ) for r, row in enumerate(UpperCamelCase_ ): for c, pt in enumerate(UpperCamelCase_ ): UpperCamelCase_ = __judge_point( UpperCamelCase_ , current_canvas[r - 1 : r + 2, c - 1 : c + 2] ) UpperCamelCase_ = next_gen_canvas del next_gen_canvas # cleaning memory as we move on. UpperCamelCase_ = current_canvas.tolist() return return_canvas def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> bool: UpperCamelCase_ = 0 UpperCamelCase_ = 0 # finding dead or alive neighbours count. for i in neighbours: for status in i: if status: alive += 1 else: dead += 1 # handling duplicate entry for focus pt. if pt: alive -= 1 else: dead -= 1 # running the rules of game here. UpperCamelCase_ = pt if pt: if alive < 2: UpperCamelCase_ = False elif alive == 2 or alive == 3: UpperCamelCase_ = True elif alive > 3: UpperCamelCase_ = False else: if alive == 3: UpperCamelCase_ = True return state if __name__ == "__main__": if len(sys.argv) != 2: raise Exception(usage_doc) _UpperCAmelCase = int(sys.argv[1]) # main working structure of this module. _UpperCAmelCase = create_canvas(canvas_size) seed(c) _UpperCAmelCase , _UpperCAmelCase = plt.subplots() fig.show() _UpperCAmelCase = ListedColormap(['w', 'k']) try: while True: _UpperCAmelCase = run(c) ax.matshow(c, cmap=cmap) fig.canvas.draw() ax.cla() except KeyboardInterrupt: # do nothing. pass
328
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available _UpperCAmelCase = { 'configuration_tapas': ['TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TapasConfig'], 'tokenization_tapas': ['TapasTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase = [ 'TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST', 'TapasForMaskedLM', 'TapasForQuestionAnswering', 'TapasForSequenceClassification', 'TapasModel', 'TapasPreTrainedModel', 'load_tf_weights_in_tapas', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase = [ 'TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFTapasForMaskedLM', 'TFTapasForQuestionAnswering', 'TFTapasForSequenceClassification', 'TFTapasModel', 'TFTapasPreTrainedModel', ] if TYPE_CHECKING: from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig from .tokenization_tapas import TapasTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tapas import ( TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasPreTrainedModel, load_tf_weights_in_tapas, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_tapas import ( TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TFTapasForMaskedLM, TFTapasForQuestionAnswering, TFTapasForSequenceClassification, TFTapasModel, TFTapasPreTrainedModel, ) else: import sys _UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
328
1
from __future__ import annotations import collections import tempfile import unittest import numpy as np from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import is_tf_available, is_vision_available from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_tf_bert import TFBertModelTester from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester from ..deit.test_modeling_tf_deit import TFDeiTModelTester from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester from ..vit.test_modeling_tf_vit import TFViTModelTester if is_tf_available(): from transformers import ( TFBertModel, TFCLIPVisionModel, TFDeiTModel, TFRobertaModel, TFVisionTextDualEncoderModel, TFViTModel, VisionTextDualEncoderConfig, ) if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor def _UpperCAmelCase ( snake_case ): """simple docstring""" if isinstance(snake_case , collections.abc.Iterable ): return x return (x, x) @require_tf class __lowerCAmelCase : def snake_case ( self , _snake_case , _snake_case ): """simple docstring""" pass def snake_case ( self ): """simple docstring""" pass def snake_case ( self ): """simple docstring""" pass def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ): """simple docstring""" _lowerCAmelCase = VisionTextDualEncoderConfig.from_vision_text_configs(_snake_case , _snake_case ) _lowerCAmelCase = TFVisionTextDualEncoderModel(_snake_case ) _lowerCAmelCase = model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) ) def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ): """simple docstring""" _lowerCAmelCase , _lowerCAmelCase = self.get_vision_text_model(_snake_case , _snake_case ) _lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_snake_case , text_model=_snake_case ) _lowerCAmelCase = model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) ) def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ): """simple docstring""" _lowerCAmelCase , _lowerCAmelCase = self.get_vision_text_model(_snake_case , _snake_case ) _lowerCAmelCase = {"""vision_model""": vision_model, """text_model""": text_model} _lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_snake_case ) _lowerCAmelCase = model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) ) def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ): """simple docstring""" _lowerCAmelCase , _lowerCAmelCase = self.get_vision_text_model(_snake_case , _snake_case ) _lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_snake_case , text_model=_snake_case ) _lowerCAmelCase = model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case ) _lowerCAmelCase = output[0].numpy() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_snake_case ) _lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(_snake_case ) _lowerCAmelCase = model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case ) _lowerCAmelCase = after_output[0].numpy() _lowerCAmelCase = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_snake_case , 1e-5 ) def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ): """simple docstring""" _lowerCAmelCase , _lowerCAmelCase = self.get_vision_text_model(_snake_case , _snake_case ) _lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_snake_case , text_model=_snake_case ) _lowerCAmelCase = model( input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case , output_attentions=_snake_case ) _lowerCAmelCase = output.vision_model_output.attentions self.assertEqual(len(_snake_case ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) _lowerCAmelCase = to_atuple(vision_model.config.image_size ) _lowerCAmelCase = to_atuple(vision_model.config.patch_size ) _lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) _lowerCAmelCase = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) _lowerCAmelCase = output.text_model_output.attentions self.assertEqual(len(_snake_case ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def snake_case ( self , _snake_case , _snake_case , _snake_case ): """simple docstring""" _lowerCAmelCase = np.abs((a - b) ).max() self.assertLessEqual(_snake_case , _snake_case , F'Difference between torch and flax is {diff} (>= {tol}).' ) def snake_case ( self ): """simple docstring""" _lowerCAmelCase = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_model(**_snake_case ) def snake_case ( self ): """simple docstring""" _lowerCAmelCase = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**_snake_case ) def snake_case ( self ): """simple docstring""" _lowerCAmelCase = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**_snake_case ) def snake_case ( self ): """simple docstring""" _lowerCAmelCase = self.prepare_config_and_inputs() self.check_save_load(**_snake_case ) def snake_case ( self ): """simple docstring""" _lowerCAmelCase = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**_snake_case ) @slow def snake_case ( self ): """simple docstring""" _lowerCAmelCase , _lowerCAmelCase = self.get_pretrained_model_and_inputs() _lowerCAmelCase = model_a(**_snake_case ) _lowerCAmelCase = outputs[0].numpy() with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(_snake_case ) _lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(_snake_case ) _lowerCAmelCase = model_a(**_snake_case ) _lowerCAmelCase = after_outputs[0].numpy() _lowerCAmelCase = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_snake_case , 1e-5 ) @require_tf class __lowerCAmelCase ( lowerCamelCase__ , unittest.TestCase ): def snake_case ( self ): """simple docstring""" _lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained( """hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" ) _lowerCAmelCase = 13 _lowerCAmelCase = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) _lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) _lowerCAmelCase = random_attention_mask([batch_size, 4] ) _lowerCAmelCase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def snake_case ( self , _snake_case , _snake_case ): """simple docstring""" _lowerCAmelCase = TFViTModel(_snake_case , name="""vision_model""" ) _lowerCAmelCase = TFBertModel(_snake_case , name="""text_model""" ) return vision_model, text_model def snake_case ( self ): """simple docstring""" _lowerCAmelCase = TFViTModelTester(self ) _lowerCAmelCase = TFBertModelTester(self ) _lowerCAmelCase = vit_model_tester.prepare_config_and_inputs() _lowerCAmelCase = bert_model_tester.prepare_config_and_inputs() _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = vision_config_and_inputs ( ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class __lowerCAmelCase ( lowerCamelCase__ , unittest.TestCase ): def snake_case ( self ): """simple docstring""" _lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained( """Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" ) _lowerCAmelCase = 13 _lowerCAmelCase = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) _lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) _lowerCAmelCase = random_attention_mask([batch_size, 4] ) _lowerCAmelCase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ): """simple docstring""" _lowerCAmelCase , _lowerCAmelCase = self.get_vision_text_model(_snake_case , _snake_case ) _lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_snake_case , text_model=_snake_case ) _lowerCAmelCase = model( input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case , output_attentions=_snake_case ) _lowerCAmelCase = output.vision_model_output.attentions self.assertEqual(len(_snake_case ) , vision_config.num_hidden_layers ) # in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) _lowerCAmelCase = to_atuple(vision_model.config.image_size ) _lowerCAmelCase = to_atuple(vision_model.config.patch_size ) _lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) _lowerCAmelCase = num_patches + 2 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) _lowerCAmelCase = output.text_model_output.attentions self.assertEqual(len(_snake_case ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def snake_case ( self , _snake_case , _snake_case ): """simple docstring""" _lowerCAmelCase = TFDeiTModel(_snake_case , name="""vision_model""" ) _lowerCAmelCase = TFRobertaModel(_snake_case , name="""text_model""" ) return vision_model, text_model def snake_case ( self ): """simple docstring""" _lowerCAmelCase = TFDeiTModelTester(self ) _lowerCAmelCase = TFRobertaModelTester(self ) _lowerCAmelCase = vit_model_tester.prepare_config_and_inputs() _lowerCAmelCase = bert_model_tester.prepare_config_and_inputs() _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = vision_config_and_inputs ( ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class __lowerCAmelCase ( lowerCamelCase__ , unittest.TestCase ): def snake_case ( self ): """simple docstring""" _lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained( """Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" ) _lowerCAmelCase = 13 _lowerCAmelCase = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) _lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) _lowerCAmelCase = random_attention_mask([batch_size, 4] ) _lowerCAmelCase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def snake_case ( self , _snake_case , _snake_case ): """simple docstring""" _lowerCAmelCase = TFCLIPVisionModel(_snake_case , name="""vision_model""" ) _lowerCAmelCase = TFBertModel(_snake_case , name="""text_model""" ) return vision_model, text_model def snake_case ( self ): """simple docstring""" _lowerCAmelCase = TFCLIPVisionModelTester(self ) _lowerCAmelCase = TFBertModelTester(self ) _lowerCAmelCase = clip_model_tester.prepare_config_and_inputs() _lowerCAmelCase = bert_model_tester.prepare_config_and_inputs() _lowerCAmelCase , _lowerCAmelCase = vision_config_and_inputs ( ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_vision @require_tf class __lowerCAmelCase ( unittest.TestCase ): @slow def snake_case ( self ): """simple docstring""" _lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained( """clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=_snake_case ) _lowerCAmelCase = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" ) _lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) _lowerCAmelCase = processor( text=["""una foto di un gatto""", """una foto di un cane"""] , images=_snake_case , padding=_snake_case , return_tensors="""np""" ) _lowerCAmelCase = model(**_snake_case ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) _lowerCAmelCase = np.array([[1.228_4727, 0.310_4122]] ) self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _snake_case , atol=1e-3 ) )
82
import gc import unittest import numpy as np import torch import torch.nn.functional as F from transformers import ( ClapTextConfig, ClapTextModelWithProjection, RobertaTokenizer, SpeechTaHifiGan, SpeechTaHifiGanConfig, ) from diffusers import ( AudioLDMPipeline, AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class __lowerCAmelCase ( lowerCamelCase__ , unittest.TestCase ): __lowerCamelCase = AudioLDMPipeline __lowerCamelCase = TEXT_TO_AUDIO_PARAMS __lowerCamelCase = TEXT_TO_AUDIO_BATCH_PARAMS __lowerCamelCase = frozenset( [ '''num_inference_steps''', '''num_waveforms_per_prompt''', '''generator''', '''latents''', '''output_type''', '''return_dict''', '''callback''', '''callback_steps''', ] ) def snake_case ( self ): """simple docstring""" torch.manual_seed(0 ) _lowerCAmelCase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=(32, 64) , class_embed_type="""simple_projection""" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=_snake_case , ) _lowerCAmelCase = DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_snake_case , set_alpha_to_one=_snake_case , ) torch.manual_seed(0 ) _lowerCAmelCase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) _lowerCAmelCase = ClapTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , ) _lowerCAmelCase = ClapTextModelWithProjection(_snake_case ) _lowerCAmelCase = RobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-roberta""" , model_max_length=77 ) _lowerCAmelCase = SpeechTaHifiGanConfig( model_in_dim=8 , sampling_rate=16000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=_snake_case , ) _lowerCAmelCase = SpeechTaHifiGan(_snake_case ) _lowerCAmelCase = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """vocoder""": vocoder, } return components def snake_case ( self , _snake_case , _snake_case=0 ): """simple docstring""" if str(_snake_case ).startswith("""mps""" ): _lowerCAmelCase = torch.manual_seed(_snake_case ) else: _lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case ) _lowerCAmelCase = { """prompt""": """A hammer hitting a wooden surface""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, } return inputs def snake_case ( self ): """simple docstring""" _lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator _lowerCAmelCase = self.get_dummy_components() _lowerCAmelCase = AudioLDMPipeline(**_snake_case ) _lowerCAmelCase = audioldm_pipe.to(_snake_case ) audioldm_pipe.set_progress_bar_config(disable=_snake_case ) _lowerCAmelCase = self.get_dummy_inputs(_snake_case ) _lowerCAmelCase = audioldm_pipe(**_snake_case ) _lowerCAmelCase = output.audios[0] assert audio.ndim == 1 assert len(_snake_case ) == 256 _lowerCAmelCase = audio[:10] _lowerCAmelCase = np.array( [-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] ) assert np.abs(audio_slice - expected_slice ).max() < 1e-2 def snake_case ( self ): """simple docstring""" _lowerCAmelCase = self.get_dummy_components() _lowerCAmelCase = AudioLDMPipeline(**_snake_case ) _lowerCAmelCase = audioldm_pipe.to(_snake_case ) _lowerCAmelCase = audioldm_pipe.to(_snake_case ) audioldm_pipe.set_progress_bar_config(disable=_snake_case ) _lowerCAmelCase = self.get_dummy_inputs(_snake_case ) _lowerCAmelCase = 3 * [inputs["""prompt"""]] # forward _lowerCAmelCase = audioldm_pipe(**_snake_case ) _lowerCAmelCase = output.audios[0] _lowerCAmelCase = self.get_dummy_inputs(_snake_case ) _lowerCAmelCase = 3 * [inputs.pop("""prompt""" )] _lowerCAmelCase = audioldm_pipe.tokenizer( _snake_case , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_snake_case , return_tensors="""pt""" , ) _lowerCAmelCase = text_inputs["""input_ids"""].to(_snake_case ) _lowerCAmelCase = audioldm_pipe.text_encoder( _snake_case , ) _lowerCAmelCase = prompt_embeds.text_embeds # additional L_2 normalization over each hidden-state _lowerCAmelCase = F.normalize(_snake_case , dim=-1 ) _lowerCAmelCase = prompt_embeds # forward _lowerCAmelCase = audioldm_pipe(**_snake_case ) _lowerCAmelCase = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1e-2 def snake_case ( self ): """simple docstring""" _lowerCAmelCase = self.get_dummy_components() _lowerCAmelCase = AudioLDMPipeline(**_snake_case ) _lowerCAmelCase = audioldm_pipe.to(_snake_case ) _lowerCAmelCase = audioldm_pipe.to(_snake_case ) audioldm_pipe.set_progress_bar_config(disable=_snake_case ) _lowerCAmelCase = self.get_dummy_inputs(_snake_case ) _lowerCAmelCase = 3 * ["""this is a negative prompt"""] _lowerCAmelCase = negative_prompt _lowerCAmelCase = 3 * [inputs["""prompt"""]] # forward _lowerCAmelCase = audioldm_pipe(**_snake_case ) _lowerCAmelCase = output.audios[0] _lowerCAmelCase = self.get_dummy_inputs(_snake_case ) _lowerCAmelCase = 3 * [inputs.pop("""prompt""" )] _lowerCAmelCase = [] for p in [prompt, negative_prompt]: _lowerCAmelCase = audioldm_pipe.tokenizer( _snake_case , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_snake_case , return_tensors="""pt""" , ) _lowerCAmelCase = text_inputs["""input_ids"""].to(_snake_case ) _lowerCAmelCase = audioldm_pipe.text_encoder( _snake_case , ) _lowerCAmelCase = text_embeds.text_embeds # additional L_2 normalization over each hidden-state _lowerCAmelCase = F.normalize(_snake_case , dim=-1 ) embeds.append(_snake_case ) _lowerCAmelCase , _lowerCAmelCase = embeds # forward _lowerCAmelCase = audioldm_pipe(**_snake_case ) _lowerCAmelCase = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1e-2 def snake_case ( self ): """simple docstring""" _lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator _lowerCAmelCase = self.get_dummy_components() _lowerCAmelCase = PNDMScheduler(skip_prk_steps=_snake_case ) _lowerCAmelCase = AudioLDMPipeline(**_snake_case ) _lowerCAmelCase = audioldm_pipe.to(_snake_case ) audioldm_pipe.set_progress_bar_config(disable=_snake_case ) _lowerCAmelCase = self.get_dummy_inputs(_snake_case ) _lowerCAmelCase = """egg cracking""" _lowerCAmelCase = audioldm_pipe(**_snake_case , negative_prompt=_snake_case ) _lowerCAmelCase = output.audios[0] assert audio.ndim == 1 assert len(_snake_case ) == 256 _lowerCAmelCase = audio[:10] _lowerCAmelCase = np.array( [-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] ) assert np.abs(audio_slice - expected_slice ).max() < 1e-2 def snake_case ( self ): """simple docstring""" _lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator _lowerCAmelCase = self.get_dummy_components() _lowerCAmelCase = PNDMScheduler(skip_prk_steps=_snake_case ) _lowerCAmelCase = AudioLDMPipeline(**_snake_case ) _lowerCAmelCase = audioldm_pipe.to(_snake_case ) audioldm_pipe.set_progress_bar_config(disable=_snake_case ) _lowerCAmelCase = """A hammer hitting a wooden surface""" # test num_waveforms_per_prompt=1 (default) _lowerCAmelCase = audioldm_pipe(_snake_case , num_inference_steps=2 ).audios assert audios.shape == (1, 256) # test num_waveforms_per_prompt=1 (default) for batch of prompts _lowerCAmelCase = 2 _lowerCAmelCase = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios assert audios.shape == (batch_size, 256) # test num_waveforms_per_prompt for single prompt _lowerCAmelCase = 2 _lowerCAmelCase = audioldm_pipe(_snake_case , num_inference_steps=2 , num_waveforms_per_prompt=_snake_case ).audios assert audios.shape == (num_waveforms_per_prompt, 256) # test num_waveforms_per_prompt for batch of prompts _lowerCAmelCase = 2 _lowerCAmelCase = audioldm_pipe( [prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=_snake_case ).audios assert audios.shape == (batch_size * num_waveforms_per_prompt, 256) def snake_case ( self ): """simple docstring""" _lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator _lowerCAmelCase = self.get_dummy_components() _lowerCAmelCase = AudioLDMPipeline(**_snake_case ) _lowerCAmelCase = audioldm_pipe.to(_snake_case ) audioldm_pipe.set_progress_bar_config(disable=_snake_case ) _lowerCAmelCase = audioldm_pipe.vocoder.config.sampling_rate _lowerCAmelCase = self.get_dummy_inputs(_snake_case ) _lowerCAmelCase = audioldm_pipe(audio_length_in_s=0.016 , **_snake_case ) _lowerCAmelCase = output.audios[0] assert audio.ndim == 1 assert len(_snake_case ) / vocoder_sampling_rate == 0.016 _lowerCAmelCase = audioldm_pipe(audio_length_in_s=0.032 , **_snake_case ) _lowerCAmelCase = output.audios[0] assert audio.ndim == 1 assert len(_snake_case ) / vocoder_sampling_rate == 0.032 def snake_case ( self ): """simple docstring""" _lowerCAmelCase = self.get_dummy_components() _lowerCAmelCase = AudioLDMPipeline(**_snake_case ) _lowerCAmelCase = audioldm_pipe.to(_snake_case ) audioldm_pipe.set_progress_bar_config(disable=_snake_case ) _lowerCAmelCase = ["""hey"""] _lowerCAmelCase = audioldm_pipe(_snake_case , num_inference_steps=1 ) _lowerCAmelCase = output.audios.shape assert audio_shape == (1, 256) _lowerCAmelCase = audioldm_pipe.vocoder.config config.model_in_dim *= 2 _lowerCAmelCase = SpeechTaHifiGan(_snake_case ).to(_snake_case ) _lowerCAmelCase = audioldm_pipe(_snake_case , num_inference_steps=1 ) _lowerCAmelCase = output.audios.shape # waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram assert audio_shape == (1, 256) def snake_case ( self ): """simple docstring""" self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_snake_case ) def snake_case ( self ): """simple docstring""" self._test_inference_batch_single_identical(test_mean_pixel_difference=_snake_case ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def snake_case ( self ): """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_snake_case ) @slow class __lowerCAmelCase ( unittest.TestCase ): def snake_case ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case ( self , _snake_case , _snake_case="cpu" , _snake_case=torch.floataa , _snake_case=0 ): """simple docstring""" _lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case ) _lowerCAmelCase = np.random.RandomState(_snake_case ).standard_normal((1, 8, 128, 16) ) _lowerCAmelCase = torch.from_numpy(_snake_case ).to(device=_snake_case , dtype=_snake_case ) _lowerCAmelCase = { """prompt""": """A hammer hitting a wooden surface""", """latents""": latents, """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 2.5, } return inputs def snake_case ( self ): """simple docstring""" _lowerCAmelCase = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" ) _lowerCAmelCase = audioldm_pipe.to(_snake_case ) audioldm_pipe.set_progress_bar_config(disable=_snake_case ) _lowerCAmelCase = self.get_inputs(_snake_case ) _lowerCAmelCase = 25 _lowerCAmelCase = audioldm_pipe(**_snake_case ).audios[0] assert audio.ndim == 1 assert len(_snake_case ) == 81920 _lowerCAmelCase = audio[77230:77240] _lowerCAmelCase = np.array( [-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] ) _lowerCAmelCase = np.abs(expected_slice - audio_slice ).max() assert max_diff < 1e-2 def snake_case ( self ): """simple docstring""" _lowerCAmelCase = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" ) _lowerCAmelCase = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config ) _lowerCAmelCase = audioldm_pipe.to(_snake_case ) audioldm_pipe.set_progress_bar_config(disable=_snake_case ) _lowerCAmelCase = self.get_inputs(_snake_case ) _lowerCAmelCase = audioldm_pipe(**_snake_case ).audios[0] assert audio.ndim == 1 assert len(_snake_case ) == 81920 _lowerCAmelCase = audio[27780:27790] _lowerCAmelCase = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] ) _lowerCAmelCase = np.abs(expected_slice - audio_slice ).max() assert max_diff < 3e-2
82
1
"""simple docstring""" from maths.prime_factors import prime_factors def __lowerCamelCase ( __UpperCamelCase ) -> int: """simple docstring""" if not isinstance(__UpperCamelCase , __UpperCamelCase ): lowerCAmelCase_ : Dict = f'''Input value of [number={number}] must be an integer''' raise TypeError(__UpperCamelCase ) if number < 1: raise ValueError("Input must be a positive integer" ) return -1 if len(prime_factors(__UpperCamelCase ) ) % 2 else 1 if __name__ == "__main__": import doctest doctest.testmod()
161
"""simple docstring""" import argparse import os import re lowercase__ = """src/transformers""" # Pattern that looks at the indentation in a line. lowercase__ = re.compile(r"""^(\s*)\S""") # Pattern that matches `"key":" and puts `key` in group 0. lowercase__ = re.compile(r"""^\s*\"([^\"]+)\":""") # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. lowercase__ = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""") # Pattern that matches `"key",` and puts `key` in group 0. lowercase__ = re.compile(r"""^\s*\"([^\"]+)\",\s*$""") # Pattern that matches any `[stuff]` and puts `stuff` in group 0. lowercase__ = re.compile(r"""\[([^\]]+)\]""") def __lowerCamelCase ( __UpperCamelCase ) -> int: """simple docstring""" lowerCAmelCase_ : Union[str, Any] = _re_indent.search(__UpperCamelCase ) return "" if search is None else search.groups()[0] def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase="" , __UpperCamelCase=None , __UpperCamelCase=None ) -> str: """simple docstring""" lowerCAmelCase_ : List[Any] = 0 lowerCAmelCase_ : Dict = code.split("\n" ) if start_prompt is not None: while not lines[index].startswith(__UpperCamelCase ): index += 1 lowerCAmelCase_ : Dict = ["\n".join(lines[:index] )] else: lowerCAmelCase_ : List[Any] = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). lowerCAmelCase_ : Optional[Any] = [lines[index]] index += 1 while index < len(__UpperCamelCase ) and (end_prompt is None or not lines[index].startswith(__UpperCamelCase )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(__UpperCamelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ): current_block.append(lines[index] ) blocks.append("\n".join(__UpperCamelCase ) ) if index < len(__UpperCamelCase ) - 1: lowerCAmelCase_ : List[Any] = [lines[index + 1]] index += 1 else: lowerCAmelCase_ : Any = [] else: blocks.append("\n".join(__UpperCamelCase ) ) lowerCAmelCase_ : Any = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(__UpperCamelCase ) > 0: blocks.append("\n".join(__UpperCamelCase ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(__UpperCamelCase ): blocks.append("\n".join(lines[index:] ) ) return blocks def __lowerCamelCase ( __UpperCamelCase ) -> Any: """simple docstring""" def _inner(__UpperCamelCase ): return key(__UpperCamelCase ).lower().replace("_" , "" ) return _inner def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase=None ) -> List[str]: """simple docstring""" def noop(__UpperCamelCase ): return x if key is None: lowerCAmelCase_ : Optional[int] = noop # Constants are all uppercase, they go first. lowerCAmelCase_ : str = [obj for obj in objects if key(__UpperCamelCase ).isupper()] # Classes are not all uppercase but start with a capital, they go second. lowerCAmelCase_ : str = [obj for obj in objects if key(__UpperCamelCase )[0].isupper() and not key(__UpperCamelCase ).isupper()] # Functions begin with a lowercase, they go last. lowerCAmelCase_ : int = [obj for obj in objects if not key(__UpperCamelCase )[0].isupper()] lowerCAmelCase_ : Dict = ignore_underscore(__UpperCamelCase ) return sorted(__UpperCamelCase , key=__UpperCamelCase ) + sorted(__UpperCamelCase , key=__UpperCamelCase ) + sorted(__UpperCamelCase , key=__UpperCamelCase ) def __lowerCamelCase ( __UpperCamelCase ) -> List[str]: """simple docstring""" def _replace(__UpperCamelCase ): lowerCAmelCase_ : Tuple = match.groups()[0] if "," not in imports: return f'''[{imports}]''' lowerCAmelCase_ : Optional[int] = [part.strip().replace("\"" , "" ) for part in imports.split("," )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: lowerCAmelCase_ : Optional[int] = keys[:-1] return "[" + ", ".join([f'''"{k}"''' for k in sort_objects(__UpperCamelCase )] ) + "]" lowerCAmelCase_ : Union[str, Any] = import_statement.split("\n" ) if len(__UpperCamelCase ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. lowerCAmelCase_ : Optional[int] = 2 if lines[1].strip() == "[" else 1 lowerCAmelCase_ : Optional[Any] = [(i, _re_strip_line.search(__UpperCamelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] lowerCAmelCase_ : List[Any] = sort_objects(__UpperCamelCase , key=lambda __UpperCamelCase : x[1] ) lowerCAmelCase_ : List[str] = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(__UpperCamelCase ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: lowerCAmelCase_ : Dict = _re_bracket_content.sub(_replace , lines[1] ) else: lowerCAmelCase_ : Optional[Any] = [part.strip().replace("\"" , "" ) for part in lines[1].split("," )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: lowerCAmelCase_ : Any = keys[:-1] lowerCAmelCase_ : Dict = get_indent(lines[1] ) + ", ".join([f'''"{k}"''' for k in sort_objects(__UpperCamelCase )] ) return "\n".join(__UpperCamelCase ) else: # Finally we have to deal with imports fitting on one line lowerCAmelCase_ : List[str] = _re_bracket_content.sub(_replace , __UpperCamelCase ) return import_statement def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase=True ) -> Optional[int]: """simple docstring""" with open(__UpperCamelCase , encoding="utf-8" ) as f: lowerCAmelCase_ : List[Any] = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 lowerCAmelCase_ : int = split_code_in_indented_blocks( __UpperCamelCase , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" ) # We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(__UpperCamelCase ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. lowerCAmelCase_ : Optional[int] = main_blocks[block_idx] lowerCAmelCase_ : Union[str, Any] = block.split("\n" ) # Get to the start of the imports. lowerCAmelCase_ : str = 0 while line_idx < len(__UpperCamelCase ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: lowerCAmelCase_ : Optional[int] = len(__UpperCamelCase ) else: line_idx += 1 if line_idx >= len(__UpperCamelCase ): continue # Ignore beginning and last line: they don't contain anything. lowerCAmelCase_ : Optional[Any] = "\n".join(block_lines[line_idx:-1] ) lowerCAmelCase_ : Union[str, Any] = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. lowerCAmelCase_ : Tuple = split_code_in_indented_blocks(__UpperCamelCase , indent_level=__UpperCamelCase ) # We have two categories of import key: list or _import_structure[key].append/extend lowerCAmelCase_ : List[Any] = _re_direct_key if "_import_structure = {" in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. lowerCAmelCase_ : Dict = [(pattern.search(__UpperCamelCase ).groups()[0] if pattern.search(__UpperCamelCase ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. lowerCAmelCase_ : Any = [(i, key) for i, key in enumerate(__UpperCamelCase ) if key is not None] lowerCAmelCase_ : Union[str, Any] = [x[0] for x in sorted(__UpperCamelCase , key=lambda __UpperCamelCase : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. lowerCAmelCase_ : Optional[Any] = 0 lowerCAmelCase_ : str = [] for i in range(len(__UpperCamelCase ) ): if keys[i] is None: reorderded_blocks.append(internal_blocks[i] ) else: lowerCAmelCase_ : Optional[Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reorderded_blocks.append(__UpperCamelCase ) count += 1 # And we put our main block back together with its first and last line. lowerCAmelCase_ : Any = "\n".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] ) if code != "\n".join(__UpperCamelCase ): if check_only: return True else: print(f'''Overwriting {file}.''' ) with open(__UpperCamelCase , "w" , encoding="utf-8" ) as f: f.write("\n".join(__UpperCamelCase ) ) def __lowerCamelCase ( __UpperCamelCase=True ) -> List[Any]: """simple docstring""" lowerCAmelCase_ : Any = [] for root, _, files in os.walk(__UpperCamelCase ): if "__init__.py" in files: lowerCAmelCase_ : Dict = sort_imports(os.path.join(__UpperCamelCase , "__init__.py" ) , check_only=__UpperCamelCase ) if result: lowerCAmelCase_ : Union[str, Any] = [os.path.join(__UpperCamelCase , "__init__.py" )] if len(__UpperCamelCase ) > 0: raise ValueError(f'''Would overwrite {len(__UpperCamelCase )} files, run `make style`.''' ) if __name__ == "__main__": lowercase__ = argparse.ArgumentParser() parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""") lowercase__ = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
161
1
import argparse import json from pathlib import Path import torch import torchaudio from datasets import load_dataset from huggingface_hub import hf_hub_download from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification from transformers.utils import logging logging.set_verbosity_info() __lowerCAmelCase : str = logging.get_logger(__name__) def __magic_name__ ( A : int ): '''simple docstring''' a = ASTConfig() if "10-10" in model_name: pass elif "speech-commands" in model_name: a = 128 elif "12-12" in model_name: a = 12 a = 12 elif "14-14" in model_name: a = 14 a = 14 elif "16-16" in model_name: a = 16 a = 16 else: raise ValueError("Model not supported" ) a = "huggingface/label-files" if "speech-commands" in model_name: a = 35 a = "speech-commands-v2-id2label.json" else: a = 527 a = "audioset-id2label.json" a = json.load(open(hf_hub_download(A, A, repo_type="dataset" ), "r" ) ) a = {int(A ): v for k, v in idalabel.items()} a = idalabel a = {v: k for k, v in idalabel.items()} return config def __magic_name__ ( A : Dict ): '''simple docstring''' if "module.v" in name: a = name.replace("module.v", "audio_spectrogram_transformer" ) if "cls_token" in name: a = name.replace("cls_token", "embeddings.cls_token" ) if "dist_token" in name: a = name.replace("dist_token", "embeddings.distillation_token" ) if "pos_embed" in name: a = name.replace("pos_embed", "embeddings.position_embeddings" ) if "patch_embed.proj" in name: a = name.replace("patch_embed.proj", "embeddings.patch_embeddings.projection" ) # transformer blocks if "blocks" in name: a = name.replace("blocks", "encoder.layer" ) if "attn.proj" in name: a = name.replace("attn.proj", "attention.output.dense" ) if "attn" in name: a = name.replace("attn", "attention.self" ) if "norm1" in name: a = name.replace("norm1", "layernorm_before" ) if "norm2" in name: a = name.replace("norm2", "layernorm_after" ) if "mlp.fc1" in name: a = name.replace("mlp.fc1", "intermediate.dense" ) if "mlp.fc2" in name: a = name.replace("mlp.fc2", "output.dense" ) # final layernorm if "audio_spectrogram_transformer.norm" in name: a = name.replace("audio_spectrogram_transformer.norm", "audio_spectrogram_transformer.layernorm" ) # classifier head if "module.mlp_head.0" in name: a = name.replace("module.mlp_head.0", "classifier.layernorm" ) if "module.mlp_head.1" in name: a = name.replace("module.mlp_head.1", "classifier.dense" ) return name def __magic_name__ ( A : Tuple, A : Optional[Any] ): '''simple docstring''' for key in orig_state_dict.copy().keys(): a = orig_state_dict.pop(A ) if "qkv" in key: a = key.split("." ) a = int(key_split[3] ) a = config.hidden_size if "weight" in key: a = val[:dim, :] a = val[dim : dim * 2, :] a = val[-dim:, :] else: a = val[:dim] a = val[dim : dim * 2] a = val[-dim:] else: a = val return orig_state_dict def __magic_name__ ( A : Optional[int] ): '''simple docstring''' a = [ "module.v.head.weight", "module.v.head.bias", "module.v.head_dist.weight", "module.v.head_dist.bias", ] for k in ignore_keys: state_dict.pop(A, A ) @torch.no_grad() def __magic_name__ ( A : List[str], A : Optional[int], A : Any=False ): '''simple docstring''' a = get_audio_spectrogram_transformer_config(A ) a = { "ast-finetuned-audioset-10-10-0.4593": ( "https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.450": ( "https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.448": ( "https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.448-v2": ( "https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1" ), "ast-finetuned-audioset-12-12-0.447": ( "https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1" ), "ast-finetuned-audioset-14-14-0.443": ( "https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1" ), "ast-finetuned-audioset-16-16-0.442": ( "https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1" ), "ast-finetuned-speech-commands-v2": ( "https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1" ), } # load original state_dict a = model_name_to_url[model_name] a = torch.hub.load_state_dict_from_url(A, map_location="cpu" ) # remove some keys remove_keys(A ) # rename some keys a = convert_state_dict(A, A ) # load 🤗 model a = ASTForAudioClassification(A ) model.eval() model.load_state_dict(A ) # verify outputs on dummy input # source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62 a = -4.2_67_73_93 if "speech-commands" not in model_name else -6.84_59_78 a = 4.5_68_99_74 if "speech-commands" not in model_name else 5.5_65_45_26 a = 1024 if "speech-commands" not in model_name else 128 a = ASTFeatureExtractor(mean=A, std=A, max_length=A ) if "speech-commands" in model_name: a = load_dataset("speech_commands", "v0.02", split="validation" ) a = dataset[0]["audio"]["array"] else: a = hf_hub_download( repo_id="nielsr/audio-spectogram-transformer-checkpoint", filename="sample_audio.flac", repo_type="dataset", ) a , a = torchaudio.load(A ) a = waveform.squeeze().numpy() a = feature_extractor(A, sampling_rate=16000, return_tensors="pt" ) # forward pass a = model(**A ) a = outputs.logits if model_name == "ast-finetuned-audioset-10-10-0.4593": a = torch.tensor([-0.87_60, -7.00_42, -8.66_02] ) elif model_name == "ast-finetuned-audioset-10-10-0.450": a = torch.tensor([-1.19_86, -7.09_03, -8.27_18] ) elif model_name == "ast-finetuned-audioset-10-10-0.448": a = torch.tensor([-2.61_28, -8.00_80, -9.43_44] ) elif model_name == "ast-finetuned-audioset-10-10-0.448-v2": a = torch.tensor([-1.50_80, -7.45_34, -8.89_17] ) elif model_name == "ast-finetuned-audioset-12-12-0.447": a = torch.tensor([-0.50_50, -6.58_33, -8.08_43] ) elif model_name == "ast-finetuned-audioset-14-14-0.443": a = torch.tensor([-0.38_26, -7.03_36, -8.24_13] ) elif model_name == "ast-finetuned-audioset-16-16-0.442": a = torch.tensor([-1.21_13, -6.91_01, -8.34_70] ) elif model_name == "ast-finetuned-speech-commands-v2": a = torch.tensor([6.15_89, -8.05_66, -8.79_84] ) else: raise ValueError("Unknown model name" ) if not torch.allclose(logits[0, :3], A, atol=1E-4 ): raise ValueError("Logits don't match" ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(A ).mkdir(exist_ok=A ) print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(A ) print(F"""Saving feature extractor to {pytorch_dump_folder_path}""" ) feature_extractor.save_pretrained(A ) if push_to_hub: print("Pushing model and feature extractor to the hub..." ) model.push_to_hub(F"""MIT/{model_name}""" ) feature_extractor.push_to_hub(F"""MIT/{model_name}""" ) if __name__ == "__main__": __lowerCAmelCase : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='ast-finetuned-audioset-10-10-0.4593', type=str, help='Name of the Audio Spectrogram Transformer model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) __lowerCAmelCase : Dict = parser.parse_args() convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
107
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __lowerCAmelCase : str = logging.get_logger(__name__) __lowerCAmelCase : Dict = {'vocab_file': 'spiece.model'} __lowerCAmelCase : Optional[int] = { 'vocab_file': { 'bert_for_seq_generation': ( 'https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model' ), } } __lowerCAmelCase : Dict = {'bert_for_seq_generation': 512} class snake_case__ (_UpperCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ : List[int] = [] SCREAMING_SNAKE_CASE_ : Optional[Any] = ["""input_ids""", """attention_mask"""] def __init__( self : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : str="<s>" , __lowerCamelCase : Optional[int]="</s>" , __lowerCamelCase : int="<unk>" , __lowerCamelCase : List[str]="<pad>" , __lowerCamelCase : Union[str, Any]="<::::>" , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : List[str] , ) -> None: a = {} if sp_model_kwargs is None else sp_model_kwargs # Add extra_ids to the special token list super().__init__( bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , sep_token=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , ) a = vocab_file a = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__lowerCamelCase ) @property def __UpperCAmelCase ( self : Dict ) -> Dict: return self.sp_model.get_piece_size() def __UpperCAmelCase ( self : Tuple ) -> Optional[int]: a = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Dict ) -> Optional[Any]: a = self.__dict__.copy() a = None return state def __setstate__( self : Optional[Any] , __lowerCamelCase : Dict ) -> Optional[Any]: a = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): a = {} a = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : str ) -> List[str]: return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase ) def __UpperCAmelCase ( self : str , __lowerCamelCase : Union[str, Any] ) -> int: return self.sp_model.piece_to_id(__lowerCamelCase ) def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : List[str] ) -> Any: a = self.sp_model.IdToPiece(__lowerCamelCase ) return token def __UpperCAmelCase ( self : Any , __lowerCamelCase : Dict ) -> Any: a = [] a = "" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(__lowerCamelCase ) + token a = [] else: current_sub_tokens.append(__lowerCamelCase ) out_string += self.sp_model.decode(__lowerCamelCase ) return out_string.strip() def __UpperCAmelCase ( self : Dict , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__lowerCamelCase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return a = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __lowerCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(__lowerCamelCase , "wb" ) as fi: a = self.sp_model.serialized_model_proto() fi.write(__lowerCamelCase ) return (out_vocab_file,)
107
1
def snake_case (__lowercase ) -> bool: '''simple docstring''' if p < 2: raise ValueError("p should not be less than 2!" ) elif p == 2: return True _snake_case : Tuple = 4 _snake_case : Dict = (1 << p) - 1 for _ in range(p - 2 ): _snake_case : int = ((s * s) - 2) % m return s == 0 if __name__ == "__main__": print(lucas_lehmer_test(7)) print(lucas_lehmer_test(1_1))
284
from __future__ import annotations def snake_case (__lowercase , __lowercase ) -> float: '''simple docstring''' _snake_case : Any = sorted(numsa + numsa ) _snake_case ,_snake_case : Any = divmod(len(__lowercase ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() __SCREAMING_SNAKE_CASE : Union[str, Any] = [float(x) for x in input('Enter the elements of first array: ').split()] __SCREAMING_SNAKE_CASE : List[Any] = [float(x) for x in input('Enter the elements of second array: ').split()] print(F'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
284
1
'''simple docstring''' import argparse from collections import OrderedDict from pathlib import Path import requests import torch from PIL import Image from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor from transformers.utils import logging logging.set_verbosity_info() __snake_case =logging.get_logger(__name__) def a_ ( lowerCamelCase : Any ): lowerCAmelCase = OrderedDict() for key, value in state_dict.items(): if key.startswith('module.encoder' ): lowerCAmelCase = key.replace('module.encoder' , 'glpn.encoder' ) if key.startswith('module.decoder' ): lowerCAmelCase = key.replace('module.decoder' , 'decoder.stages' ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 lowerCAmelCase = key[key.find('patch_embed' ) + len('patch_embed' )] lowerCAmelCase = key.replace(f'''patch_embed{idx}''' , f'''patch_embeddings.{int(lowerCamelCase )-1}''' ) if "norm" in key: lowerCAmelCase = key.replace('norm' , 'layer_norm' ) if "glpn.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 lowerCAmelCase = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )] lowerCAmelCase = key.replace(f'''layer_norm{idx}''' , f'''layer_norm.{int(lowerCamelCase )-1}''' ) if "layer_norm1" in key: lowerCAmelCase = key.replace('layer_norm1' , 'layer_norm_1' ) if "layer_norm2" in key: lowerCAmelCase = key.replace('layer_norm2' , 'layer_norm_2' ) if "block" in key: # replace for example block1 by block.0 lowerCAmelCase = key[key.find('block' ) + len('block' )] lowerCAmelCase = key.replace(f'''block{idx}''' , f'''block.{int(lowerCamelCase )-1}''' ) if "attn.q" in key: lowerCAmelCase = key.replace('attn.q' , 'attention.self.query' ) if "attn.proj" in key: lowerCAmelCase = key.replace('attn.proj' , 'attention.output.dense' ) if "attn" in key: lowerCAmelCase = key.replace('attn' , 'attention.self' ) if "fc1" in key: lowerCAmelCase = key.replace('fc1' , 'dense1' ) if "fc2" in key: lowerCAmelCase = key.replace('fc2' , 'dense2' ) if "linear_pred" in key: lowerCAmelCase = key.replace('linear_pred' , 'classifier' ) if "linear_fuse" in key: lowerCAmelCase = key.replace('linear_fuse.conv' , 'linear_fuse' ) lowerCAmelCase = key.replace('linear_fuse.bn' , 'batch_norm' ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 lowerCAmelCase = key[key.find('linear_c' ) + len('linear_c' )] lowerCAmelCase = key.replace(f'''linear_c{idx}''' , f'''linear_c.{int(lowerCamelCase )-1}''' ) if "bot_conv" in key: lowerCAmelCase = key.replace('bot_conv' , '0.convolution' ) if "skip_conv1" in key: lowerCAmelCase = key.replace('skip_conv1' , '1.convolution' ) if "skip_conv2" in key: lowerCAmelCase = key.replace('skip_conv2' , '2.convolution' ) if "fusion1" in key: lowerCAmelCase = key.replace('fusion1' , '1.fusion' ) if "fusion2" in key: lowerCAmelCase = key.replace('fusion2' , '2.fusion' ) if "fusion3" in key: lowerCAmelCase = key.replace('fusion3' , '3.fusion' ) if "fusion" in key and "conv" in key: lowerCAmelCase = key.replace('conv' , 'convolutional_layer' ) if key.startswith('module.last_layer_depth' ): lowerCAmelCase = key.replace('module.last_layer_depth' , 'head.head' ) lowerCAmelCase = value return new_state_dict def a_ ( lowerCamelCase : List[str] , lowerCamelCase : str ): # for each of the encoder blocks: for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) lowerCAmelCase = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' ) lowerCAmelCase = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' ) # next, add keys and values (in that order) to the state dict lowerCAmelCase = kv_weight[ : config.hidden_sizes[i], : ] lowerCAmelCase = kv_bias[: config.hidden_sizes[i]] lowerCAmelCase = kv_weight[ config.hidden_sizes[i] :, : ] lowerCAmelCase = kv_bias[config.hidden_sizes[i] :] def a_ ( ): lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg' lowerCAmelCase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw ) return image @torch.no_grad() def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any]=False , lowerCamelCase : List[str]=None ): lowerCAmelCase = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] ) # load image processor (only resize + rescale) lowerCAmelCase = GLPNImageProcessor() # prepare image lowerCAmelCase = prepare_img() lowerCAmelCase = image_processor(images=lowerCamelCase , return_tensors='pt' ).pixel_values logger.info('Converting model...' ) # load original state dict lowerCAmelCase = torch.load(lowerCamelCase , map_location=torch.device('cpu' ) ) # rename keys lowerCAmelCase = rename_keys(lowerCamelCase ) # key and value matrices need special treatment read_in_k_v(lowerCamelCase , lowerCamelCase ) # create HuggingFace model and load state dict lowerCAmelCase = GLPNForDepthEstimation(lowerCamelCase ) model.load_state_dict(lowerCamelCase ) model.eval() # forward pass lowerCAmelCase = model(lowerCamelCase ) lowerCAmelCase = outputs.predicted_depth # verify output if model_name is not None: if "nyu" in model_name: lowerCAmelCase = torch.tensor( [[4.4_147, 4.0_873, 4.0_673], [3.7_890, 3.2_881, 3.1_525], [3.7_674, 3.5_423, 3.4_913]] ) elif "kitti" in model_name: lowerCAmelCase = torch.tensor( [[3.4_291, 2.7_865, 2.5_151], [3.2_841, 2.7_021, 2.3_502], [3.1_147, 2.4_625, 2.2_481]] ) else: raise ValueError(f'''Unknown model name: {model_name}''' ) lowerCAmelCase = torch.Size([1, 480, 640] ) assert predicted_depth.shape == expected_shape assert torch.allclose(predicted_depth[0, :3, :3] , lowerCamelCase , atol=1e-4 ) print('Looks ok!' ) # finally, push to hub if required if push_to_hub: logger.info('Pushing model and image processor to the hub...' ) model.push_to_hub( repo_path_or_name=Path(lowerCamelCase , lowerCamelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=lowerCamelCase , ) image_processor.push_to_hub( repo_path_or_name=Path(lowerCamelCase , lowerCamelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=lowerCamelCase , ) if __name__ == "__main__": __snake_case =argparse.ArgumentParser() parser.add_argument( """--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file).""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub.""" ) parser.add_argument( """--model_name""", default="""glpn-kitti""", type=str, help="""Name of the model in case you're pushing to the hub.""", ) __snake_case =parser.parse_args() convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
4
'''simple docstring''' a_ : str = """ # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git """ a_ : Any = [{"""type""": """code""", """content""": INSTALL_CONTENT}] a_ : int = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
55
0
"""simple docstring""" import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer UpperCAmelCase : Any = logging.get_logger(__name__) UpperCAmelCase : Dict = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} UpperCAmelCase : List[Any] = { "vocab_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json", }, "merges_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt", }, "tokenizer_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json", }, } UpperCAmelCase : Union[str, Any] = { "allenai/led-base-16384": 1_6384, } class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ): lowercase__ = VOCAB_FILES_NAMES lowercase__ = PRETRAINED_VOCAB_FILES_MAP lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__ = LEDTokenizer lowercase__ = ["input_ids", "attention_mask"] def __init__( self : Dict , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[Any]="replace" , lowerCAmelCase_ : Dict="<s>" , lowerCAmelCase_ : Union[str, Any]="</s>" , lowerCAmelCase_ : List[Any]="</s>" , lowerCAmelCase_ : Optional[Any]="<s>" , lowerCAmelCase_ : Union[str, Any]="<unk>" , lowerCAmelCase_ : List[str]="<pad>" , lowerCAmelCase_ : Dict="<mask>" , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : List[Any]=True , **lowerCAmelCase_ : Optional[Any] , ): """simple docstring""" super().__init__( lowerCAmelCase_ , lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , errors=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , trim_offsets=lowerCAmelCase_ , **lowerCAmelCase_ , ) lowercase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if pre_tok_state.get("""add_prefix_space""" , lowerCAmelCase_) != add_prefix_space: lowercase_ = getattr(lowerCAmelCase_ , pre_tok_state.pop("""type""")) lowercase_ = add_prefix_space lowercase_ = pre_tok_class(**lowerCAmelCase_) lowercase_ = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` lowercase_ = """post_processor""" lowercase_ = getattr(self.backend_tokenizer , lowerCAmelCase_ , lowerCAmelCase_) if tokenizer_component_instance: lowercase_ = json.loads(tokenizer_component_instance.__getstate__()) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: lowercase_ = tuple(state["""sep"""]) if "cls" in state: lowercase_ = tuple(state["""cls"""]) lowercase_ = False if state.get("""add_prefix_space""" , lowerCAmelCase_) != add_prefix_space: lowercase_ = add_prefix_space lowercase_ = True if state.get("""trim_offsets""" , lowerCAmelCase_) != trim_offsets: lowercase_ = trim_offsets lowercase_ = True if changes_to_apply: lowercase_ = getattr(lowerCAmelCase_ , state.pop("""type""")) lowercase_ = component_class(**lowerCAmelCase_) setattr(self.backend_tokenizer , lowerCAmelCase_ , lowerCAmelCase_) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def _UpperCAmelCase ( self : List[str]): """simple docstring""" if self._mask_token is None: if self.verbose: logger.error("""Using mask_token, but it is not set yet.""") return None return str(self._mask_token) @mask_token.setter def _UpperCAmelCase ( self : str , lowerCAmelCase_ : str): """simple docstring""" lowercase_ = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else value lowercase_ = value def _UpperCAmelCase ( self : Dict , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : List[Any]): """simple docstring""" lowercase_ = kwargs.get("""is_split_into_words""" , lowerCAmelCase_) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' """to use it with pretokenized inputs.""") return super()._batch_encode_plus(*lowerCAmelCase_ , **lowerCAmelCase_) def _UpperCAmelCase ( self : Union[str, Any] , *lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : Any): """simple docstring""" lowercase_ = kwargs.get("""is_split_into_words""" , lowerCAmelCase_) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' """to use it with pretokenized inputs.""") return super()._encode_plus(*lowerCAmelCase_ , **lowerCAmelCase_) def _UpperCAmelCase ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None): """simple docstring""" lowercase_ = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_) return tuple(lowerCAmelCase_) def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any]=None): """simple docstring""" lowercase_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None): """simple docstring""" lowercase_ = [self.sep_token_id] lowercase_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[bool] = None , ): """simple docstring""" lowercase_ = super()._pad( encoded_inputs=lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding_strategy=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , ) # Load from model defaults if return_attention_mask is None: lowercase_ = """attention_mask""" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: lowercase_ = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. lowercase_ = len(encoded_inputs["""global_attention_mask"""]) != len(lowerCAmelCase_) if needs_to_be_padded: lowercase_ = len(lowerCAmelCase_) - len(encoded_inputs["""global_attention_mask"""]) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` lowercase_ = ( encoded_inputs["""global_attention_mask"""] + [-1] * difference ) elif self.padding_side == "left": lowercase_ = [-1] * difference + encoded_inputs[ """global_attention_mask""" ] else: raise ValueError("""Invalid padding strategy:""" + str(self.padding_side)) return encoded_inputs
313
"""simple docstring""" import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer UpperCAmelCase : Any = logging.get_logger(__name__) UpperCAmelCase : Dict = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} UpperCAmelCase : List[Any] = { "vocab_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json", }, "merges_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt", }, "tokenizer_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json", }, } UpperCAmelCase : Union[str, Any] = { "allenai/led-base-16384": 1_6384, } class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ): lowercase__ = VOCAB_FILES_NAMES lowercase__ = PRETRAINED_VOCAB_FILES_MAP lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__ = LEDTokenizer lowercase__ = ["input_ids", "attention_mask"] def __init__( self : Dict , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[Any]="replace" , lowerCAmelCase_ : Dict="<s>" , lowerCAmelCase_ : Union[str, Any]="</s>" , lowerCAmelCase_ : List[Any]="</s>" , lowerCAmelCase_ : Optional[Any]="<s>" , lowerCAmelCase_ : Union[str, Any]="<unk>" , lowerCAmelCase_ : List[str]="<pad>" , lowerCAmelCase_ : Dict="<mask>" , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : List[Any]=True , **lowerCAmelCase_ : Optional[Any] , ): """simple docstring""" super().__init__( lowerCAmelCase_ , lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , errors=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , trim_offsets=lowerCAmelCase_ , **lowerCAmelCase_ , ) lowercase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if pre_tok_state.get("""add_prefix_space""" , lowerCAmelCase_) != add_prefix_space: lowercase_ = getattr(lowerCAmelCase_ , pre_tok_state.pop("""type""")) lowercase_ = add_prefix_space lowercase_ = pre_tok_class(**lowerCAmelCase_) lowercase_ = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` lowercase_ = """post_processor""" lowercase_ = getattr(self.backend_tokenizer , lowerCAmelCase_ , lowerCAmelCase_) if tokenizer_component_instance: lowercase_ = json.loads(tokenizer_component_instance.__getstate__()) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: lowercase_ = tuple(state["""sep"""]) if "cls" in state: lowercase_ = tuple(state["""cls"""]) lowercase_ = False if state.get("""add_prefix_space""" , lowerCAmelCase_) != add_prefix_space: lowercase_ = add_prefix_space lowercase_ = True if state.get("""trim_offsets""" , lowerCAmelCase_) != trim_offsets: lowercase_ = trim_offsets lowercase_ = True if changes_to_apply: lowercase_ = getattr(lowerCAmelCase_ , state.pop("""type""")) lowercase_ = component_class(**lowerCAmelCase_) setattr(self.backend_tokenizer , lowerCAmelCase_ , lowerCAmelCase_) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def _UpperCAmelCase ( self : List[str]): """simple docstring""" if self._mask_token is None: if self.verbose: logger.error("""Using mask_token, but it is not set yet.""") return None return str(self._mask_token) @mask_token.setter def _UpperCAmelCase ( self : str , lowerCAmelCase_ : str): """simple docstring""" lowercase_ = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else value lowercase_ = value def _UpperCAmelCase ( self : Dict , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : List[Any]): """simple docstring""" lowercase_ = kwargs.get("""is_split_into_words""" , lowerCAmelCase_) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' """to use it with pretokenized inputs.""") return super()._batch_encode_plus(*lowerCAmelCase_ , **lowerCAmelCase_) def _UpperCAmelCase ( self : Union[str, Any] , *lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : Any): """simple docstring""" lowercase_ = kwargs.get("""is_split_into_words""" , lowerCAmelCase_) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' """to use it with pretokenized inputs.""") return super()._encode_plus(*lowerCAmelCase_ , **lowerCAmelCase_) def _UpperCAmelCase ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None): """simple docstring""" lowercase_ = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_) return tuple(lowerCAmelCase_) def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any]=None): """simple docstring""" lowercase_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None): """simple docstring""" lowercase_ = [self.sep_token_id] lowercase_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[bool] = None , ): """simple docstring""" lowercase_ = super()._pad( encoded_inputs=lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding_strategy=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , ) # Load from model defaults if return_attention_mask is None: lowercase_ = """attention_mask""" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: lowercase_ = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. lowercase_ = len(encoded_inputs["""global_attention_mask"""]) != len(lowerCAmelCase_) if needs_to_be_padded: lowercase_ = len(lowerCAmelCase_) - len(encoded_inputs["""global_attention_mask"""]) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` lowercase_ = ( encoded_inputs["""global_attention_mask"""] + [-1] * difference ) elif self.padding_side == "left": lowercase_ = [-1] * difference + encoded_inputs[ """global_attention_mask""" ] else: raise ValueError("""Invalid padding strategy:""" + str(self.padding_side)) return encoded_inputs
313
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available SCREAMING_SNAKE_CASE_ = { """configuration_data2vec_audio""": ["""DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecAudioConfig"""], """configuration_data2vec_text""": [ """DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecTextConfig""", """Data2VecTextOnnxConfig""", ], """configuration_data2vec_vision""": [ """DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecVisionConfig""", """Data2VecVisionOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_ = [ """DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST""", """Data2VecAudioForAudioFrameClassification""", """Data2VecAudioForCTC""", """Data2VecAudioForSequenceClassification""", """Data2VecAudioForXVector""", """Data2VecAudioModel""", """Data2VecAudioPreTrainedModel""", ] SCREAMING_SNAKE_CASE_ = [ """DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""", """Data2VecTextForCausalLM""", """Data2VecTextForMaskedLM""", """Data2VecTextForMultipleChoice""", """Data2VecTextForQuestionAnswering""", """Data2VecTextForSequenceClassification""", """Data2VecTextForTokenClassification""", """Data2VecTextModel""", """Data2VecTextPreTrainedModel""", ] SCREAMING_SNAKE_CASE_ = [ """DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST""", """Data2VecVisionForImageClassification""", """Data2VecVisionForMaskedImageModeling""", """Data2VecVisionForSemanticSegmentation""", """Data2VecVisionModel""", """Data2VecVisionPreTrainedModel""", ] if is_tf_available(): SCREAMING_SNAKE_CASE_ = [ """TFData2VecVisionForImageClassification""", """TFData2VecVisionForSemanticSegmentation""", """TFData2VecVisionModel""", """TFData2VecVisionPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig from .configuration_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecTextConfig, DataaVecTextOnnxConfig, ) from .configuration_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecVisionConfig, DataaVecVisionOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dataavec_audio import ( DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecAudioForAudioFrameClassification, DataaVecAudioForCTC, DataaVecAudioForSequenceClassification, DataaVecAudioForXVector, DataaVecAudioModel, DataaVecAudioPreTrainedModel, ) from .modeling_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecTextForCausalLM, DataaVecTextForMaskedLM, DataaVecTextForMultipleChoice, DataaVecTextForQuestionAnswering, DataaVecTextForSequenceClassification, DataaVecTextForTokenClassification, DataaVecTextModel, DataaVecTextPreTrainedModel, ) from .modeling_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecVisionForImageClassification, DataaVecVisionForMaskedImageModeling, DataaVecVisionForSemanticSegmentation, DataaVecVisionModel, DataaVecVisionPreTrainedModel, ) if is_tf_available(): from .modeling_tf_dataavec_vision import ( TFDataaVecVisionForImageClassification, TFDataaVecVisionForSemanticSegmentation, TFDataaVecVisionModel, TFDataaVecVisionPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
296
from pathlib import Path import fire def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = Path(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = Path(_SCREAMING_SNAKE_CASE ) dest_dir.mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) for path in src_dir.iterdir(): SCREAMING_SNAKE_CASE = [x.rstrip() for x in list(path.open().readlines() )][:n] SCREAMING_SNAKE_CASE = dest_dir.joinpath(path.name ) print(_SCREAMING_SNAKE_CASE ) dest_path.open("""w""" ).write("""\n""".join(_SCREAMING_SNAKE_CASE ) ) if __name__ == "__main__": fire.Fire(minify)
296
1
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SegformerConfig, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() __a = logging.get_logger(__name__) def lowerCamelCase__ ( _lowercase , _lowercase=False ): '''simple docstring''' UpperCAmelCase_ : List[str] = OrderedDict() for key, value in state_dict.items(): if encoder_only and not key.startswith('''head''' ): UpperCAmelCase_ : int = "segformer.encoder." + key if key.startswith('''backbone''' ): UpperCAmelCase_ : Any = key.replace('''backbone''' , '''segformer.encoder''' ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 UpperCAmelCase_ : Union[str, Any] = key[key.find('''patch_embed''' ) + len('''patch_embed''' )] UpperCAmelCase_ : List[str] = key.replace(f'''patch_embed{idx}''' , f'''patch_embeddings.{int(_lowercase )-1}''' ) if "norm" in key: UpperCAmelCase_ : str = key.replace('''norm''' , '''layer_norm''' ) if "segformer.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 UpperCAmelCase_ : str = key[key.find('''segformer.encoder.layer_norm''' ) + len('''segformer.encoder.layer_norm''' )] UpperCAmelCase_ : List[str] = key.replace(f'''layer_norm{idx}''' , f'''layer_norm.{int(_lowercase )-1}''' ) if "layer_norm1" in key: UpperCAmelCase_ : Tuple = key.replace('''layer_norm1''' , '''layer_norm_1''' ) if "layer_norm2" in key: UpperCAmelCase_ : Dict = key.replace('''layer_norm2''' , '''layer_norm_2''' ) if "block" in key: # replace for example block1 by block.0 UpperCAmelCase_ : Tuple = key[key.find('''block''' ) + len('''block''' )] UpperCAmelCase_ : Optional[int] = key.replace(f'''block{idx}''' , f'''block.{int(_lowercase )-1}''' ) if "attn.q" in key: UpperCAmelCase_ : Union[str, Any] = key.replace('''attn.q''' , '''attention.self.query''' ) if "attn.proj" in key: UpperCAmelCase_ : Optional[int] = key.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in key: UpperCAmelCase_ : Any = key.replace('''attn''' , '''attention.self''' ) if "fc1" in key: UpperCAmelCase_ : Dict = key.replace('''fc1''' , '''dense1''' ) if "fc2" in key: UpperCAmelCase_ : Dict = key.replace('''fc2''' , '''dense2''' ) if "linear_pred" in key: UpperCAmelCase_ : Optional[int] = key.replace('''linear_pred''' , '''classifier''' ) if "linear_fuse" in key: UpperCAmelCase_ : int = key.replace('''linear_fuse.conv''' , '''linear_fuse''' ) UpperCAmelCase_ : Any = key.replace('''linear_fuse.bn''' , '''batch_norm''' ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 UpperCAmelCase_ : Tuple = key[key.find('''linear_c''' ) + len('''linear_c''' )] UpperCAmelCase_ : int = key.replace(f'''linear_c{idx}''' , f'''linear_c.{int(_lowercase )-1}''' ) if key.startswith('''head''' ): UpperCAmelCase_ : Optional[int] = key.replace('''head''' , '''classifier''' ) UpperCAmelCase_ : List[Any] = value return new_state_dict def lowerCamelCase__ ( _lowercase , _lowercase ): '''simple docstring''' for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) UpperCAmelCase_ : Any = state_dict.pop(f'''segformer.encoder.block.{i}.{j}.attention.self.kv.weight''' ) UpperCAmelCase_ : List[str] = state_dict.pop(f'''segformer.encoder.block.{i}.{j}.attention.self.kv.bias''' ) # next, add keys and values (in that order) to the state dict UpperCAmelCase_ : Optional[Any] = kv_weight[ : config.hidden_sizes[i], : ] UpperCAmelCase_ : str = kv_bias[: config.hidden_sizes[i]] UpperCAmelCase_ : str = kv_weight[ config.hidden_sizes[i] :, : ] UpperCAmelCase_ : Tuple = kv_bias[ config.hidden_sizes[i] : ] def lowerCamelCase__ ( ): '''simple docstring''' UpperCAmelCase_ : int = "http://images.cocodataset.org/val2017/000000039769.jpg" UpperCAmelCase_ : Dict = Image.open(requests.get(_lowercase , stream=_lowercase ).raw ) return image @torch.no_grad() def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = SegformerConfig() UpperCAmelCase_ : List[str] = False # set attributes based on model_name UpperCAmelCase_ : Any = "huggingface/label-files" if "segformer" in model_name: UpperCAmelCase_ : str = model_name[len('''segformer.''' ) : len('''segformer.''' ) + 2] if "ade" in model_name: UpperCAmelCase_ : str = 150 UpperCAmelCase_ : Optional[int] = "ade20k-id2label.json" UpperCAmelCase_ : str = (1, 150, 128, 128) elif "city" in model_name: UpperCAmelCase_ : int = 19 UpperCAmelCase_ : Any = "cityscapes-id2label.json" UpperCAmelCase_ : Tuple = (1, 19, 128, 128) else: raise ValueError(f'''Model {model_name} not supported''' ) elif "mit" in model_name: UpperCAmelCase_ : Tuple = True UpperCAmelCase_ : Union[str, Any] = model_name[4:6] UpperCAmelCase_ : Optional[int] = 1000 UpperCAmelCase_ : Dict = "imagenet-1k-id2label.json" UpperCAmelCase_ : Optional[Any] = (1, 1000) else: raise ValueError(f'''Model {model_name} not supported''' ) # set config attributes UpperCAmelCase_ : Any = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type='''dataset''' ) , '''r''' ) ) UpperCAmelCase_ : int = {int(_lowercase ): v for k, v in idalabel.items()} UpperCAmelCase_ : List[Any] = idalabel UpperCAmelCase_ : List[str] = {v: k for k, v in idalabel.items()} if size == "b0": pass elif size == "b1": UpperCAmelCase_ : Optional[Any] = [64, 128, 320, 512] UpperCAmelCase_ : str = 256 elif size == "b2": UpperCAmelCase_ : str = [64, 128, 320, 512] UpperCAmelCase_ : int = 768 UpperCAmelCase_ : List[Any] = [3, 4, 6, 3] elif size == "b3": UpperCAmelCase_ : Tuple = [64, 128, 320, 512] UpperCAmelCase_ : Any = 768 UpperCAmelCase_ : List[Any] = [3, 4, 18, 3] elif size == "b4": UpperCAmelCase_ : str = [64, 128, 320, 512] UpperCAmelCase_ : Union[str, Any] = 768 UpperCAmelCase_ : str = [3, 8, 27, 3] elif size == "b5": UpperCAmelCase_ : Tuple = [64, 128, 320, 512] UpperCAmelCase_ : Optional[int] = 768 UpperCAmelCase_ : Any = [3, 6, 40, 3] else: raise ValueError(f'''Size {size} not supported''' ) # load image processor (only resize + normalize) UpperCAmelCase_ : Union[str, Any] = SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=_lowercase , align=_lowercase , do_random_crop=_lowercase ) # prepare image UpperCAmelCase_ : List[str] = prepare_img() UpperCAmelCase_ : Dict = image_processor(images=_lowercase , return_tensors='''pt''' ).pixel_values logger.info(f'''Converting model {model_name}...''' ) # load original state dict if encoder_only: UpperCAmelCase_ : Optional[int] = torch.load(_lowercase , map_location=torch.device('''cpu''' ) ) else: UpperCAmelCase_ : Any = torch.load(_lowercase , map_location=torch.device('''cpu''' ) )["state_dict"] # rename keys UpperCAmelCase_ : List[Any] = rename_keys(_lowercase , encoder_only=_lowercase ) if not encoder_only: del state_dict["decode_head.conv_seg.weight"] del state_dict["decode_head.conv_seg.bias"] # key and value matrices need special treatment read_in_k_v(_lowercase , _lowercase ) # create HuggingFace model and load state dict if encoder_only: UpperCAmelCase_ : List[Any] = False UpperCAmelCase_ : Optional[int] = SegformerForImageClassification(_lowercase ) else: UpperCAmelCase_ : List[str] = SegformerForSemanticSegmentation(_lowercase ) model.load_state_dict(_lowercase ) model.eval() # forward pass UpperCAmelCase_ : Optional[int] = model(_lowercase ) UpperCAmelCase_ : List[Any] = outputs.logits # set expected_slice based on model name # ADE20k checkpoints if model_name == "segformer.b0.512x512.ade.160k": UpperCAmelCase_ : List[Any] = torch.tensor( [ [[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]], [[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]], [[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]], ] ) elif model_name == "segformer.b1.512x512.ade.160k": UpperCAmelCase_ : Any = torch.tensor( [ [[-7.5820, -8.7231, -8.3215], [-8.0600, -10.3529, -10.0304], [-7.5208, -9.4103, -9.6239]], [[-12.6918, -13.8994, -13.7137], [-13.3196, -15.7523, -15.4789], [-12.9343, -14.8757, -14.9689]], [[-11.1911, -11.9421, -11.3243], [-11.3342, -13.6839, -13.3581], [-10.3909, -12.1832, -12.4858]], ] ) elif model_name == "segformer.b2.512x512.ade.160k": UpperCAmelCase_ : List[str] = torch.tensor( [ [[-11.8173, -14.3850, -16.3128], [-14.5648, -16.5804, -18.6568], [-14.7223, -15.7387, -18.4218]], [[-15.7290, -17.9171, -19.4423], [-18.3105, -19.9448, -21.4661], [-17.9296, -18.6497, -20.7910]], [[-15.0783, -17.0336, -18.2789], [-16.8771, -18.6870, -20.1612], [-16.2454, -17.1426, -19.5055]], ] ) elif model_name == "segformer.b3.512x512.ade.160k": UpperCAmelCase_ : Tuple = torch.tensor( [ [[-9.0878, -10.2081, -10.1891], [-9.3144, -10.7941, -10.9843], [-9.2294, -10.3855, -10.5704]], [[-12.2316, -13.9068, -13.6102], [-12.9161, -14.3702, -14.3235], [-12.5233, -13.7174, -13.7932]], [[-14.6275, -15.2490, -14.9727], [-14.3400, -15.9687, -16.2827], [-14.1484, -15.4033, -15.8937]], ] ) elif model_name == "segformer.b4.512x512.ade.160k": UpperCAmelCase_ : Dict = torch.tensor( [ [[-12.3144, -13.2447, -14.0802], [-13.3614, -14.5816, -15.6117], [-13.3340, -14.4433, -16.2219]], [[-19.2781, -20.4128, -20.7506], [-20.6153, -21.6566, -22.0998], [-19.9800, -21.0430, -22.1494]], [[-18.8739, -19.7804, -21.1834], [-20.1233, -21.6765, -23.2944], [-20.0315, -21.2641, -23.6944]], ] ) elif model_name == "segformer.b5.640x640.ade.160k": UpperCAmelCase_ : Tuple = torch.tensor( [ [[-9.5524, -12.0835, -11.7348], [-10.5229, -13.6446, -14.5662], [-9.5842, -12.8851, -13.9414]], [[-15.3432, -17.5323, -17.0818], [-16.3330, -18.9255, -19.2101], [-15.1340, -17.7848, -18.3971]], [[-12.6072, -14.9486, -14.6631], [-13.7629, -17.0907, -17.7745], [-12.7899, -16.1695, -17.1671]], ] ) # Cityscapes checkpoints elif model_name == "segformer.b0.1024x1024.city.160k": UpperCAmelCase_ : Optional[int] = torch.tensor( [ [[-11.9295, -13.4057, -14.8106], [-13.3431, -14.8179, -15.3781], [-14.2836, -15.5942, -16.1588]], [[-11.4906, -12.8067, -13.6564], [-13.1189, -14.0500, -14.1543], [-13.8748, -14.5136, -14.8789]], [[0.5374, 0.1067, -0.4742], [0.1141, -0.2255, -0.7099], [-0.3000, -0.5924, -1.3105]], ] ) elif model_name == "segformer.b0.512x1024.city.160k": UpperCAmelCase_ : List[Any] = torch.tensor( [ [[-7.8217, -9.8767, -10.1717], [-9.4438, -10.9058, -11.4047], [-9.7939, -12.3495, -12.1079]], [[-7.1514, -9.5336, -10.0860], [-9.7776, -11.6822, -11.8439], [-10.1411, -12.7655, -12.8972]], [[0.3021, 0.0805, -0.2310], [-0.0328, -0.1605, -0.2714], [-0.1408, -0.5477, -0.6976]], ] ) elif model_name == "segformer.b0.640x1280.city.160k": UpperCAmelCase_ : Optional[Any] = torch.tensor( [ [ [-1.1_3_7_2E0_1, -1.2_7_8_7E0_1, -1.3_4_7_7E0_1], [-1.2_5_3_6E0_1, -1.4_1_9_4E0_1, -1.4_4_0_9E0_1], [-1.3_2_1_7E0_1, -1.4_8_8_8E0_1, -1.5_3_2_7E0_1], ], [ [-1.4_7_9_1E0_1, -1.7_1_2_2E0_1, -1.8_2_7_7E0_1], [-1.7_1_6_3E0_1, -1.9_1_9_2E0_1, -1.9_5_3_3E0_1], [-1.7_8_9_7E0_1, -1.9_9_9_1E0_1, -2.0_3_1_5E0_1], ], [ [7.6_7_2_3E-0_1, 4.1_9_2_1E-0_1, -7.7_8_7_8E-0_2], [4.7_7_7_2E-0_1, 9.5_5_5_7E-0_3, -2.8_0_8_2E-0_1], [3.6_0_3_2E-0_1, -2.4_8_2_6E-0_1, -5.1_1_6_8E-0_1], ], ] ) elif model_name == "segformer.b0.768x768.city.160k": UpperCAmelCase_ : Union[str, Any] = torch.tensor( [ [[-9.4959, -11.3087, -11.7479], [-11.0025, -12.6540, -12.3319], [-11.4064, -13.0487, -12.9905]], [[-9.8905, -11.3084, -12.0854], [-11.1726, -12.7698, -12.9583], [-11.5985, -13.3278, -14.1774]], [[0.2213, 0.0192, -0.2466], [-0.1731, -0.4213, -0.4874], [-0.3126, -0.6541, -1.1389]], ] ) elif model_name == "segformer.b1.1024x1024.city.160k": UpperCAmelCase_ : Union[str, Any] = torch.tensor( [ [[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]], [[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]], [[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]], ] ) elif model_name == "segformer.b2.1024x1024.city.160k": UpperCAmelCase_ : Dict = torch.tensor( [ [[-16.0976, -16.4856, -17.3962], [-16.6234, -19.0342, -19.7685], [-16.0900, -18.0661, -19.1180]], [[-18.4750, -18.8488, -19.5074], [-19.4030, -22.1570, -22.5977], [-19.1191, -20.8486, -22.3783]], [[-4.5178, -5.5037, -6.5109], [-5.0884, -7.2174, -8.0334], [-4.4156, -5.8117, -7.2970]], ] ) elif model_name == "segformer.b3.1024x1024.city.160k": UpperCAmelCase_ : Optional[Any] = torch.tensor( [ [[-14.2081, -14.4732, -14.1977], [-14.5867, -16.4423, -16.6356], [-13.4441, -14.9685, -16.8696]], [[-14.4576, -14.7073, -15.0451], [-15.0816, -17.6237, -17.9873], [-14.4213, -16.0199, -18.5992]], [[-4.7349, -4.9588, -5.0966], [-4.3210, -6.9325, -7.2591], [-3.4312, -4.7484, -7.1917]], ] ) elif model_name == "segformer.b4.1024x1024.city.160k": UpperCAmelCase_ : List[Any] = torch.tensor( [ [[-11.7737, -11.9526, -11.3273], [-13.6692, -14.4574, -13.8878], [-13.8937, -14.6924, -15.9345]], [[-14.6706, -14.5330, -14.1306], [-16.1502, -16.8180, -16.4269], [-16.8338, -17.8939, -20.1746]], [[1.0491, 0.8289, 1.0310], [1.1044, 0.5219, 0.8055], [1.0899, 0.6926, 0.5590]], ] ) elif model_name == "segformer.b5.1024x1024.city.160k": UpperCAmelCase_ : Dict = torch.tensor( [ [[-12.5641, -13.4777, -13.0684], [-13.9587, -15.8983, -16.6557], [-13.3109, -15.7350, -16.3141]], [[-14.7074, -15.4352, -14.5944], [-16.6353, -18.1663, -18.6120], [-15.1702, -18.0329, -18.1547]], [[-1.7990, -2.0951, -1.7784], [-2.6397, -3.8245, -3.9686], [-1.5264, -2.8126, -2.9316]], ] ) else: UpperCAmelCase_ : Optional[int] = logits.argmax(-1 ).item() print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] ) # verify logits if not encoder_only: assert logits.shape == expected_shape assert torch.allclose(logits[0, :3, :3, :3] , _lowercase , atol=1E-2 ) # finally, save model and image processor logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(_lowercase ).mkdir(exist_ok=_lowercase ) model.save_pretrained(_lowercase ) image_processor.save_pretrained(_lowercase ) if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument( '--model_name', default='segformer.b0.512x512.ade.160k', type=str, help='Name of the model you\'d like to convert.', ) parser.add_argument( '--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) __a = parser.parse_args() convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
363
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL __a = logging.get_logger(__name__) def lowerCamelCase__ ( _lowercase ): '''simple docstring''' if isinstance(_lowercase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(_lowercase , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(_lowercase ): return [[videos]] raise ValueError(f'''Could not make batched video from {videos}''' ) class __a( _a ): """simple docstring""" lowerCAmelCase = ['''pixel_values'''] def __init__( self ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = 1 / 255 ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> None: super().__init__(**_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : int = size if size is not None else {'''shortest_edge''': 224} UpperCAmelCase_ : Any = get_size_dict(_SCREAMING_SNAKE_CASE ,default_to_square=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Any = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} UpperCAmelCase_ : List[str] = get_size_dict(_SCREAMING_SNAKE_CASE ,param_name='''crop_size''' ) UpperCAmelCase_ : str = do_resize UpperCAmelCase_ : Union[str, Any] = size UpperCAmelCase_ : int = do_center_crop UpperCAmelCase_ : List[str] = crop_size UpperCAmelCase_ : Optional[int] = resample UpperCAmelCase_ : List[Any] = do_rescale UpperCAmelCase_ : Tuple = rescale_factor UpperCAmelCase_ : Optional[Any] = do_normalize UpperCAmelCase_ : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN UpperCAmelCase_ : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> np.ndarray: UpperCAmelCase_ : Optional[int] = get_size_dict(_SCREAMING_SNAKE_CASE ,default_to_square=_SCREAMING_SNAKE_CASE ) if "shortest_edge" in size: UpperCAmelCase_ : Dict = get_resize_output_image_size(_SCREAMING_SNAKE_CASE ,size['''shortest_edge'''] ,default_to_square=_SCREAMING_SNAKE_CASE ) elif "height" in size and "width" in size: UpperCAmelCase_ : Tuple = (size['''height'''], size['''width''']) else: raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' ) return resize(_SCREAMING_SNAKE_CASE ,size=_SCREAMING_SNAKE_CASE ,resample=_SCREAMING_SNAKE_CASE ,data_format=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> np.ndarray: UpperCAmelCase_ : str = get_size_dict(_SCREAMING_SNAKE_CASE ) if "height" not in size or "width" not in size: raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' ) return center_crop(_SCREAMING_SNAKE_CASE ,size=(size['''height'''], size['''width''']) ,data_format=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> Dict: return rescale(_SCREAMING_SNAKE_CASE ,scale=_SCREAMING_SNAKE_CASE ,data_format=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> np.ndarray: return normalize(_SCREAMING_SNAKE_CASE ,mean=_SCREAMING_SNAKE_CASE ,std=_SCREAMING_SNAKE_CASE ,data_format=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = ChannelDimension.FIRST ,) -> np.ndarray: if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. UpperCAmelCase_ : Any = to_numpy_array(_SCREAMING_SNAKE_CASE ) if do_resize: UpperCAmelCase_ : Union[str, Any] = self.resize(image=_SCREAMING_SNAKE_CASE ,size=_SCREAMING_SNAKE_CASE ,resample=_SCREAMING_SNAKE_CASE ) if do_center_crop: UpperCAmelCase_ : Optional[int] = self.center_crop(_SCREAMING_SNAKE_CASE ,size=_SCREAMING_SNAKE_CASE ) if do_rescale: UpperCAmelCase_ : str = self.rescale(image=_SCREAMING_SNAKE_CASE ,scale=_SCREAMING_SNAKE_CASE ) if do_normalize: UpperCAmelCase_ : List[Any] = self.normalize(image=_SCREAMING_SNAKE_CASE ,mean=_SCREAMING_SNAKE_CASE ,std=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[int] = to_channel_dimension_format(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) return image def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = ChannelDimension.FIRST ,**_SCREAMING_SNAKE_CASE ,) -> PIL.Image.Image: UpperCAmelCase_ : Dict = do_resize if do_resize is not None else self.do_resize UpperCAmelCase_ : int = resample if resample is not None else self.resample UpperCAmelCase_ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCAmelCase_ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase_ : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase_ : Tuple = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase_ : Optional[int] = image_mean if image_mean is not None else self.image_mean UpperCAmelCase_ : Optional[int] = image_std if image_std is not None else self.image_std UpperCAmelCase_ : List[str] = size if size is not None else self.size UpperCAmelCase_ : Optional[int] = get_size_dict(_SCREAMING_SNAKE_CASE ,default_to_square=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[str] = crop_size if crop_size is not None else self.crop_size UpperCAmelCase_ : Any = get_size_dict(_SCREAMING_SNAKE_CASE ,param_name='''crop_size''' ) if not valid_images(_SCREAMING_SNAKE_CASE ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) UpperCAmelCase_ : List[Any] = make_batched(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : str = [ [ self._preprocess_image( image=_SCREAMING_SNAKE_CASE ,do_resize=_SCREAMING_SNAKE_CASE ,size=_SCREAMING_SNAKE_CASE ,resample=_SCREAMING_SNAKE_CASE ,do_center_crop=_SCREAMING_SNAKE_CASE ,crop_size=_SCREAMING_SNAKE_CASE ,do_rescale=_SCREAMING_SNAKE_CASE ,rescale_factor=_SCREAMING_SNAKE_CASE ,do_normalize=_SCREAMING_SNAKE_CASE ,image_mean=_SCREAMING_SNAKE_CASE ,image_std=_SCREAMING_SNAKE_CASE ,data_format=_SCREAMING_SNAKE_CASE ,) for img in video ] for video in videos ] UpperCAmelCase_ : Any = {'''pixel_values''': videos} return BatchFeature(data=_SCREAMING_SNAKE_CASE ,tensor_type=_SCREAMING_SNAKE_CASE )
235
0
import argparse from copy import deepcopy import numpy as np from datasets import ClassLabel, DatasetDict, load_dataset from evaluate import load from transformers import ( AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, Trainer, TrainerCallback, TrainingArguments, set_seed, ) def lowerCAmelCase__ ( ): """simple docstring""" __a = argparse.ArgumentParser() parser.add_argument("""--model_ckpt""" , type=_SCREAMING_SNAKE_CASE , default="""microsoft/unixcoder-base-nine""" ) parser.add_argument("""--num_epochs""" , type=_SCREAMING_SNAKE_CASE , default=5 ) parser.add_argument("""--batch_size""" , type=_SCREAMING_SNAKE_CASE , default=6 ) parser.add_argument("""--gradient_accumulation_steps""" , type=_SCREAMING_SNAKE_CASE , default=1 ) parser.add_argument("""--freeze""" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE ) parser.add_argument("""--learning_rate""" , type=_SCREAMING_SNAKE_CASE , default=5e-4 ) parser.add_argument("""--seed""" , type=_SCREAMING_SNAKE_CASE , default=0 ) parser.add_argument("""--lr_scheduler_type""" , type=_SCREAMING_SNAKE_CASE , default="""cosine""" ) parser.add_argument("""--num_warmup_steps""" , type=_SCREAMING_SNAKE_CASE , default=10 ) parser.add_argument("""--weight_decay""" , type=_SCREAMING_SNAKE_CASE , default=0.01 ) parser.add_argument("""--output_dir""" , type=_SCREAMING_SNAKE_CASE , default="""./results""" ) return parser.parse_args() lowerCamelCase__ = load("""accuracy""") def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] ): """simple docstring""" __a , __a = eval_pred __a = np.argmax(_SCREAMING_SNAKE_CASE , axis=1 ) return metric.compute(predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE ) class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): def __init__( self : int , __lowercase : int ): '''simple docstring''' super().__init__() __a = trainer def UpperCamelCase_ ( self : List[Any] , __lowercase : Any , __lowercase : int , __lowercase : Optional[int] , **__lowercase : int ): '''simple docstring''' if control.should_evaluate: __a = deepcopy(__lowercase ) self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="""train""" ) return control_copy def lowerCAmelCase__ ( ): """simple docstring""" __a = get_args() set_seed(args.seed ) __a = load_dataset("""codeparrot/codecomplex""" , split="""train""" ) __a = dataset.train_test_split(test_size=0.2 ) __a = train_test["""test"""].train_test_split(test_size=0.5 ) __a = DatasetDict( { """train""": train_test["""train"""], """test""": test_validation["""train"""], """valid""": test_validation["""test"""], } ) print("""Loading tokenizer and model""" ) __a = AutoTokenizer.from_pretrained(args.model_ckpt ) __a = tokenizer.eos_token __a = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 ) __a = model.config.eos_token_id if args.freeze: for param in model.roberta.parameters(): __a = False __a = ClassLabel(num_classes=7 , names=list(set(train_test_validation["""train"""]["""complexity"""] ) ) ) def tokenize(_SCREAMING_SNAKE_CASE : List[str] ): __a = tokenizer(example["""src"""] , truncation=_SCREAMING_SNAKE_CASE , max_length=1024 ) __a = labels.straint(example["""complexity"""] ) return { "input_ids": inputs["input_ids"], "attention_mask": inputs["attention_mask"], "label": label, } __a = train_test_validation.map( _SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=train_test_validation["""train"""].column_names , ) __a = DataCollatorWithPadding(tokenizer=_SCREAMING_SNAKE_CASE ) __a = TrainingArguments( output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy="""epoch""" , save_strategy="""epoch""" , logging_strategy="""epoch""" , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model="""accuracy""" , run_name="""complexity-java""" , report_to="""wandb""" , ) __a = Trainer( model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=tokenized_datasets["""train"""] , eval_dataset=tokenized_datasets["""valid"""] , tokenizer=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , compute_metrics=_SCREAMING_SNAKE_CASE , ) print("""Training...""" ) trainer.add_callback(CustomCallback(_SCREAMING_SNAKE_CASE ) ) trainer.train() if __name__ == "__main__": main()
302
from dataclasses import dataclass from typing import Dict, Optional, Union import torch import torch.nn.functional as F from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .attention_processor import AttentionProcessor, AttnProcessor from .embeddings import TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin @dataclass class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): __lowerCamelCase : torch.FloatTensor class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ ): @register_to_config def __init__( self : Dict , __lowercase : int = 32 , __lowercase : int = 64 , __lowercase : int = 20 , __lowercase : int = 768 , __lowercase : Any=77 , __lowercase : Optional[int]=4 , __lowercase : float = 0.0 , __lowercase : str = "silu" , __lowercase : Optional[str] = None , __lowercase : Optional[str] = None , __lowercase : Optional[str] = "linear" , __lowercase : Optional[str] = "prd" , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , ): '''simple docstring''' super().__init__() __a = num_attention_heads __a = attention_head_dim __a = num_attention_heads * attention_head_dim __a = additional_embeddings __a = time_embed_dim or inner_dim __a = embedding_proj_dim or embedding_dim __a = clip_embed_dim or embedding_dim __a = Timesteps(__lowercase , __lowercase , 0 ) __a = TimestepEmbedding(__lowercase , __lowercase , out_dim=__lowercase , act_fn=__lowercase ) __a = nn.Linear(__lowercase , __lowercase ) if embedding_proj_norm_type is None: __a = None elif embedding_proj_norm_type == "layer": __a = nn.LayerNorm(__lowercase ) else: raise ValueError(F"unsupported embedding_proj_norm_type: {embedding_proj_norm_type}" ) __a = nn.Linear(__lowercase , __lowercase ) if encoder_hid_proj_type is None: __a = None elif encoder_hid_proj_type == "linear": __a = nn.Linear(__lowercase , __lowercase ) else: raise ValueError(F"unsupported encoder_hid_proj_type: {encoder_hid_proj_type}" ) __a = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , __lowercase ) ) if added_emb_type == "prd": __a = nn.Parameter(torch.zeros(1 , 1 , __lowercase ) ) elif added_emb_type is None: __a = None else: raise ValueError( F"`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`." ) __a = nn.ModuleList( [ BasicTransformerBlock( __lowercase , __lowercase , __lowercase , dropout=__lowercase , activation_fn="""gelu""" , attention_bias=__lowercase , ) for d in range(__lowercase ) ] ) if norm_in_type == "layer": __a = nn.LayerNorm(__lowercase ) elif norm_in_type is None: __a = None else: raise ValueError(F"Unsupported norm_in_type: {norm_in_type}." ) __a = nn.LayerNorm(__lowercase ) __a = nn.Linear(__lowercase , __lowercase ) __a = torch.full( [num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0 ) causal_attention_mask.triu_(1 ) __a = causal_attention_mask[None, ...] self.register_buffer("""causal_attention_mask""" , __lowercase , persistent=__lowercase ) __a = nn.Parameter(torch.zeros(1 , __lowercase ) ) __a = nn.Parameter(torch.zeros(1 , __lowercase ) ) @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def UpperCamelCase_ ( self : List[str] ): '''simple docstring''' __a = {} def fn_recursive_add_processors(__lowercase : str , __lowercase : torch.nn.Module , __lowercase : Dict[str, AttentionProcessor] ): if hasattr(__lowercase , """set_processor""" ): __a = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(F"{name}.{sub_name}" , __lowercase , __lowercase ) return processors for name, module in self.named_children(): fn_recursive_add_processors(__lowercase , __lowercase , __lowercase ) return processors def UpperCamelCase_ ( self : List[str] , __lowercase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ): '''simple docstring''' __a = len(self.attn_processors.keys() ) if isinstance(__lowercase , __lowercase ) and len(__lowercase ) != count: raise ValueError( F"A dict of processors was passed, but the number of processors {len(__lowercase )} does not match the" F" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(__lowercase : str , __lowercase : torch.nn.Module , __lowercase : Dict ): if hasattr(__lowercase , """set_processor""" ): if not isinstance(__lowercase , __lowercase ): module.set_processor(__lowercase ) else: module.set_processor(processor.pop(F"{name}.processor" ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(F"{name}.{sub_name}" , __lowercase , __lowercase ) for name, module in self.named_children(): fn_recursive_attn_processor(__lowercase , __lowercase , __lowercase ) def UpperCamelCase_ ( self : List[str] ): '''simple docstring''' self.set_attn_processor(AttnProcessor() ) def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : Optional[int] , __lowercase : Union[torch.Tensor, float, int] , __lowercase : torch.FloatTensor , __lowercase : Optional[torch.FloatTensor] = None , __lowercase : Optional[torch.BoolTensor] = None , __lowercase : bool = True , ): '''simple docstring''' __a = hidden_states.shape[0] __a = timestep if not torch.is_tensor(__lowercase ): __a = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device ) elif torch.is_tensor(__lowercase ) and len(timesteps.shape ) == 0: __a = timesteps[None].to(hidden_states.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML __a = timesteps * torch.ones(__lowercase , dtype=timesteps.dtype , device=timesteps.device ) __a = self.time_proj(__lowercase ) # timesteps does not contain any weights and will always return f32 tensors # but time_embedding might be fp16, so we need to cast here. __a = timesteps_projected.to(dtype=self.dtype ) __a = self.time_embedding(__lowercase ) if self.embedding_proj_norm is not None: __a = self.embedding_proj_norm(__lowercase ) __a = self.embedding_proj(__lowercase ) if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None: __a = self.encoder_hidden_states_proj(__lowercase ) elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None: raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""" ) __a = self.proj_in(__lowercase ) __a = self.positional_embedding.to(hidden_states.dtype ) __a = [] __a = 0 if encoder_hidden_states is not None: additional_embeds.append(__lowercase ) additional_embeddings_len += encoder_hidden_states.shape[1] if len(proj_embeddings.shape ) == 2: __a = proj_embeddings[:, None, :] if len(hidden_states.shape ) == 2: __a = hidden_states[:, None, :] __a = additional_embeds + [ proj_embeddings, time_embeddings[:, None, :], hidden_states, ] if self.prd_embedding is not None: __a = self.prd_embedding.to(hidden_states.dtype ).expand(__lowercase , -1 , -1 ) additional_embeds.append(__lowercase ) __a = torch.cat( __lowercase , dim=1 , ) # Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens __a = additional_embeddings_len + proj_embeddings.shape[1] + 1 if positional_embeddings.shape[1] < hidden_states.shape[1]: __a = F.pad( __lowercase , ( 0, 0, additional_embeddings_len, self.prd_embedding.shape[1] if self.prd_embedding is not None else 0, ) , value=0.0 , ) __a = hidden_states + positional_embeddings if attention_mask is not None: __a = (1 - attention_mask.to(hidden_states.dtype )) * -10000.0 __a = F.pad(__lowercase , (0, self.additional_embeddings) , value=0.0 ) __a = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype ) __a = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 ) if self.norm_in is not None: __a = self.norm_in(__lowercase ) for block in self.transformer_blocks: __a = block(__lowercase , attention_mask=__lowercase ) __a = self.norm_out(__lowercase ) if self.prd_embedding is not None: __a = hidden_states[:, -1] else: __a = hidden_states[:, additional_embeddings_len:] __a = self.proj_to_clip_embeddings(__lowercase ) if not return_dict: return (predicted_image_embedding,) return PriorTransformerOutput(predicted_image_embedding=__lowercase ) def UpperCamelCase_ ( self : Any , __lowercase : Tuple ): '''simple docstring''' __a = (prior_latents * self.clip_std) + self.clip_mean return prior_latents
302
1
"""simple docstring""" from pathlib import Path import cva import numpy as np from matplotlib import pyplot as plt def lowerCAmelCase__ ( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int , _UpperCamelCase : int ) -> np.ndarray: """simple docstring""" snake_case = cva.getAffineTransform(_UpperCamelCase , _UpperCamelCase ) return cva.warpAffine(_UpperCamelCase , _UpperCamelCase , (rows, cols) ) if __name__ == "__main__": # read original image SCREAMING_SNAKE_CASE__ = cva.imread( str(Path(__file__).resolve().parent.parent / "image_data" / "lena.jpg") ) # turn image in gray scale value SCREAMING_SNAKE_CASE__ = cva.cvtColor(image, cva.COLOR_BGR2GRAY) # get image shape SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = gray_img.shape # set different points to rotate image SCREAMING_SNAKE_CASE__ = np.array([[50, 50], [200, 50], [50, 200]], np.floataa) SCREAMING_SNAKE_CASE__ = np.array([[10, 100], [200, 50], [100, 250]], np.floataa) SCREAMING_SNAKE_CASE__ = np.array([[50, 50], [150, 50], [120, 200]], np.floataa) SCREAMING_SNAKE_CASE__ = np.array([[10, 100], [80, 50], [180, 250]], np.floataa) # add all rotated images in a list SCREAMING_SNAKE_CASE__ = [ gray_img, get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), ] # plot different image rotations SCREAMING_SNAKE_CASE__ = plt.figure(1) SCREAMING_SNAKE_CASE__ = ["Original", "Rotation 1", "Rotation 2", "Rotation 3"] for i, image in enumerate(images): plt.subplot(2, 2, i + 1), plt.imshow(image, "gray") plt.title(titles[i]) plt.axis("off") plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95) plt.show()
149
"""simple docstring""" import os def lowerCAmelCase__ ( _UpperCamelCase : str = "matrix.txt" ) -> int: """simple docstring""" with open(os.path.join(os.path.dirname(_UpperCamelCase ) , _UpperCamelCase ) ) as in_file: snake_case = in_file.read() snake_case = [[int(_UpperCamelCase ) for cell in row.split(',' )] for row in data.strip().splitlines()] snake_case = [[0 for cell in row] for row in grid] snake_case = len(grid[0] ) snake_case = [[0 for i in range(_UpperCamelCase )] for j in range(_UpperCamelCase )] snake_case = grid[0][0] for i in range(1 , _UpperCamelCase ): snake_case = grid[0][i] + dp[0][i - 1] for i in range(1 , _UpperCamelCase ): snake_case = grid[i][0] + dp[i - 1][0] for i in range(1 , _UpperCamelCase ): for j in range(1 , _UpperCamelCase ): snake_case = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] ) return dp[-1][-1] if __name__ == "__main__": print(f"""{solution() = }""")
149
1
import argparse import json import os import tensorstore as ts import torch from flax import serialization from flax.traverse_util import flatten_dict, unflatten_dict from tensorflow.io import gfile from transformers.modeling_utils import dtype_byte_size from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import ( rename_keys, ) from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME from transformers.utils.hub import convert_file_size_to_int def A ( _lowercase , _lowercase ): if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3: # expert layer SCREAMING_SNAKE_CASE : Union[str, Any] = flax_key_tuple[:-1] + ('''weight''',) SCREAMING_SNAKE_CASE : Tuple = torch.permute(snake_case__ , (0, 2, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(snake_case__ ): # linear layer SCREAMING_SNAKE_CASE : Any = flax_key_tuple[:-1] + ('''weight''',) SCREAMING_SNAKE_CASE : Union[str, Any] = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: SCREAMING_SNAKE_CASE : int = flax_key_tuple[:-1] + ('''weight''',) return flax_key_tuple, flax_tensor def A ( _lowercase , _lowercase , _lowercase ): if "metadata" in layer: SCREAMING_SNAKE_CASE : Union[str, Any] = layer.split('''metadata''' ) SCREAMING_SNAKE_CASE : List[Any] = ''''''.join(split_layer[0] )[:-1] SCREAMING_SNAKE_CASE : int = [tuple(('''metadata''' + split_layer[1]).split('''/''' ) )] elif "kvstore" in layer: SCREAMING_SNAKE_CASE : Any = layer.split('''kvstore''' ) SCREAMING_SNAKE_CASE : List[Any] = ''''''.join(split_layer[0] )[:-1] SCREAMING_SNAKE_CASE : Optional[Any] = [tuple(('''kvstore''' + split_layer[1]).split('''/''' ) )] else: SCREAMING_SNAKE_CASE : Union[str, Any] = layer.split('''/''' ) SCREAMING_SNAKE_CASE : Optional[int] = '''/'''.join(split_layer[:-1] ) SCREAMING_SNAKE_CASE : Optional[Any] = (split_layer[-1],) if "kvstore/path" in layer: SCREAMING_SNAKE_CASE : int = f"""{switch_checkpoint_path}/{checkpoint_info[layer]}""" elif "kvstore/driver" in layer: SCREAMING_SNAKE_CASE : int = '''file''' else: SCREAMING_SNAKE_CASE : Any = checkpoint_info[layer] return curr_real_layer_name, split_layer, content def A ( _lowercase , _lowercase ): SCREAMING_SNAKE_CASE : Optional[int] = rename_keys(snake_case__ ) SCREAMING_SNAKE_CASE : Tuple = {} for k, v in current_block.items(): SCREAMING_SNAKE_CASE : Tuple = v SCREAMING_SNAKE_CASE : List[str] = new_current_block torch.save(snake_case__ , snake_case__ ) def A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = WEIGHTS_NAME ): SCREAMING_SNAKE_CASE : Dict = convert_file_size_to_int(snake_case__ ) SCREAMING_SNAKE_CASE : str = [] SCREAMING_SNAKE_CASE : str = {} SCREAMING_SNAKE_CASE : Any = 0 SCREAMING_SNAKE_CASE : List[str] = 0 os.makedirs(snake_case__ , exist_ok=snake_case__ ) with gfile.GFile(switch_checkpoint_path + '''/checkpoint''' , '''rb''' ) as fp: SCREAMING_SNAKE_CASE : Tuple = serialization.msgpack_restore(fp.read() )['''optimizer''']['''target'''] SCREAMING_SNAKE_CASE : List[Any] = flatten_dict(snake_case__ , sep='''/''' ) SCREAMING_SNAKE_CASE : List[str] = {} for layer in checkpoint_info.keys(): SCREAMING_SNAKE_CASE : Tuple = get_key_and_tensorstore_dict( snake_case__ , snake_case__ , snake_case__ ) if curr_real_layer_name in all_layers: SCREAMING_SNAKE_CASE : List[str] = content else: SCREAMING_SNAKE_CASE : Optional[Any] = {split_layer[-1]: content} for key in all_layers.keys(): # open tensorstore file SCREAMING_SNAKE_CASE : Any = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result() SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(snake_case__ ) SCREAMING_SNAKE_CASE : int = raw_weights.numel() * dtype_byte_size(raw_weights.dtype ) # use the renaming pattern from the small conversion scripts SCREAMING_SNAKE_CASE : int = rename_base_flax_keys(tuple(key.split('''/''' ) ) , snake_case__ ) SCREAMING_SNAKE_CASE : Union[str, Any] = '''/'''.join(snake_case__ ) # If this weight is going to tip up over the maximal size, we split. if current_block_size + weight_size > max_shard_size: SCREAMING_SNAKE_CASE : List[Any] = os.path.join( snake_case__ , weights_name.replace('''.bin''' , f"""-{len(snake_case__ )+1:05d}-of-???.bin""" ) ) rename_and_save_block(snake_case__ , snake_case__ ) sharded_state_dicts.append(current_block.keys() ) del current_block SCREAMING_SNAKE_CASE : Dict = {} SCREAMING_SNAKE_CASE : List[Any] = 0 SCREAMING_SNAKE_CASE : List[Any] = raw_weights.to(getattr(snake_case__ , snake_case__ ) ) current_block_size += weight_size total_size += weight_size # Add the last block SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(snake_case__ , weights_name.replace('''.bin''' , f"""-{len(snake_case__ )+1:05d}-of-???.bin""" ) ) rename_and_save_block(snake_case__ , snake_case__ ) sharded_state_dicts.append(current_block.keys() ) # If we only have one shard, we return it if len(snake_case__ ) == 1: return {weights_name: sharded_state_dicts[0]}, None # Otherwise, let's build the index SCREAMING_SNAKE_CASE : Union[str, Any] = {} SCREAMING_SNAKE_CASE : List[str] = {} for idx, shard in enumerate(snake_case__ ): SCREAMING_SNAKE_CASE : int = weights_name.replace( '''.bin''' , f"""-{idx+1:05d}-of-{len(snake_case__ ):05d}.bin""" ) # len(sharded_state_dicts):05d} SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(snake_case__ , weights_name.replace('''.bin''' , f"""-{idx+1:05d}-of-???.bin""" ) ) os.rename(snake_case__ , os.path.join(snake_case__ , snake_case__ ) ) SCREAMING_SNAKE_CASE : str = shard for key in shard: SCREAMING_SNAKE_CASE : Tuple = shard_file # Add the metadata SCREAMING_SNAKE_CASE : Tuple = {'''total_size''': total_size} SCREAMING_SNAKE_CASE : Optional[int] = {'''metadata''': metadata, '''weight_map''': weight_map} with open(os.path.join(snake_case__ , snake_case__ ) , '''w''' , encoding='''utf-8''' ) as f: SCREAMING_SNAKE_CASE : Union[str, Any] = json.dumps(snake_case__ , indent=2 , sort_keys=snake_case__ ) + '''\n''' f.write(snake_case__ ) return metadata, index if __name__ == "__main__": __UpperCamelCase : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--switch_t5x_checkpoint_path', default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600', type=str, required=False, help='Path to a directory containing a folder per layer. Follows the original Google format.', ) parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size') parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model') parser.add_argument( '--pytorch_dump_folder_path', default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted', type=str, required=False, help='Path to the output pytorch model.', ) __UpperCamelCase : List[str] = parser.parse_args() shard_on_the_fly( args.switch_tax_checkpoint_path, args.pytorch_dump_folder_path, args.max_shard_size, args.dtype, ) def A ( ): from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer SCREAMING_SNAKE_CASE : Optional[Any] = SwitchTransformersConfig.from_pretrained('''google/switch-base-8''' ) config.save_pretrained('''/home/arthur_huggingface_co/transformers/switch_converted''' ) SCREAMING_SNAKE_CASE : Any = SwitchTransformersForConditionalGeneration.from_pretrained( '''/home/arthur_huggingface_co/transformers/switch_converted''' , device_map='''auto''' ) SCREAMING_SNAKE_CASE : Any = TaTokenizer.from_pretrained('''t5-small''' ) SCREAMING_SNAKE_CASE : Union[str, Any] = '''A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''' SCREAMING_SNAKE_CASE : List[Any] = tokenizer(snake_case__ , return_tensors='''pt''' ).input_ids SCREAMING_SNAKE_CASE : Optional[Any] = model.generate(snake_case__ , decoder_start_token_id=0 ) print(tokenizer.decode(out[0] ) )
182
'''simple docstring''' class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : Any = None A : Optional[Any] = None A : Tuple = graph self._normalize_graph(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Dict = len(SCREAMING_SNAKE_CASE ) A : Optional[Any] = None def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" if sources is int: A : Dict = [sources] if sinks is int: A : str = [sinks] if len(SCREAMING_SNAKE_CASE ) == 0 or len(SCREAMING_SNAKE_CASE ) == 0: return A : Optional[int] = sources[0] A : Union[str, Any] = sinks[0] # make fake vertex if there are more # than one source or sink if len(SCREAMING_SNAKE_CASE ) > 1 or len(SCREAMING_SNAKE_CASE ) > 1: A : Optional[int] = 0 for i in sources: max_input_flow += sum(self.graph[i] ) A : Dict = len(self.graph ) + 1 for room in self.graph: room.insert(0 , 0 ) self.graph.insert(0 , [0] * size ) for i in sources: A : Dict = max_input_flow A : Tuple = 0 A : Tuple = len(self.graph ) + 1 for room in self.graph: room.append(0 ) self.graph.append([0] * size ) for i in sinks: A : Optional[Any] = max_input_flow A : Optional[Any] = size - 1 def __lowerCAmelCase ( self ) -> Any: """simple docstring""" if self.maximum_flow_algorithm is None: raise Exception('''You need to set maximum flow algorithm before.''' ) if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" A : List[Any] = algorithm(self ) class A : def __init__( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" A : Union[str, Any] = flow_network A : Optional[Any] = flow_network.verticesCount A : Tuple = flow_network.sourceIndex A : Dict = flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that A : str = flow_network.graph A : Optional[Any] = False def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" if not self.executed: self._algorithm() A : Optional[int] = True def __lowerCAmelCase ( self ) -> Any: """simple docstring""" pass class A ( __snake_case ): def __init__( self , SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" super().__init__(SCREAMING_SNAKE_CASE ) # use this to save your result A : List[str] = -1 def __lowerCAmelCase ( self ) -> str: """simple docstring""" if not self.executed: raise Exception('''You should execute algorithm before using its result!''' ) return self.maximum_flow class A ( __snake_case ): def __init__( self , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" super().__init__(SCREAMING_SNAKE_CASE ) A : Optional[Any] = [[0] * self.verticies_count for i in range(self.verticies_count )] A : Union[str, Any] = [0] * self.verticies_count A : List[Any] = [0] * self.verticies_count def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Tuple = self.verticies_count # push some substance to graph for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule A : Optional[Any] = [ i for i in range(self.verticies_count ) if i != self.source_index and i != self.sink_index ] # move through list A : Union[str, Any] = 0 while i < len(SCREAMING_SNAKE_CASE ): A : str = vertices_list[i] A : List[str] = self.heights[vertex_index] self.process_vertex(SCREAMING_SNAKE_CASE ) if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index vertices_list.insert(0 , vertices_list.pop(SCREAMING_SNAKE_CASE ) ) A : int = 0 else: i += 1 A : Optional[Any] = sum(self.preflow[self.source_index] ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count ): # if it's neighbour and current vertex is higher if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) self.relabel(SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" A : Dict = min( self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , ) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" A : Dict = None for to_index in range(self.verticies_count ): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): A : Dict = self.heights[to_index] if min_height is not None: A : Dict = min_height + 1 if __name__ == "__main__": lowercase : Optional[int] = [0] lowercase : List[Any] = [3] # graph = [ # [0, 0, 4, 6, 0, 0], # [0, 0, 5, 2, 0, 0], # [0, 0, 0, 0, 4, 4], # [0, 0, 0, 0, 6, 6], # [0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0], # ] lowercase : int = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network lowercase : List[str] = FlowNetwork(graph, entrances, exits) # set algorithm flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate lowercase : List[str] = flow_network.find_maximum_flow() print(f'''maximum flow is {maximum_flow}''')
3
0
"""simple docstring""" import json import os import tempfile import transformers import datasets from utils import generate_example_dataset, get_duration lowerCamelCase__ = 500_000 lowerCamelCase__ , lowerCamelCase__ = os.path.split(__file__) lowerCamelCase__ = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json""")) @get_duration def __lowerCAmelCase (_UpperCamelCase , **_UpperCamelCase ): __lowerCAmelCase : Optional[int] = dataset.map(**_UpperCamelCase ) @get_duration def __lowerCAmelCase (_UpperCamelCase , **_UpperCamelCase ): __lowerCAmelCase : Optional[int] = dataset.filter(**_UpperCamelCase ) def __lowerCAmelCase (): __lowerCAmelCase : str = {'num examples': SPEED_TEST_N_EXAMPLES} with tempfile.TemporaryDirectory() as tmp_dir: __lowerCAmelCase : Optional[int] = datasets.Features({'text': datasets.Value('string' ), 'numbers': datasets.Value('float32' )} ) __lowerCAmelCase : str = generate_example_dataset( os.path.join(_UpperCamelCase , 'dataset.arrow' ) , _UpperCamelCase , num_examples=_UpperCamelCase ) __lowerCAmelCase : int = transformers.AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=_UpperCamelCase ) def tokenize(_UpperCamelCase ): return tokenizer(examples['text'] ) __lowerCAmelCase : List[str] = map(_UpperCamelCase ) __lowerCAmelCase : Union[str, Any] = map(_UpperCamelCase , batched=_UpperCamelCase ) __lowerCAmelCase : Dict = map(_UpperCamelCase , function=lambda _UpperCamelCase : None , batched=_UpperCamelCase ) with dataset.formatted_as(type='numpy' ): __lowerCAmelCase : Union[str, Any] = map(_UpperCamelCase , function=lambda _UpperCamelCase : None , batched=_UpperCamelCase ) with dataset.formatted_as(type='pandas' ): __lowerCAmelCase : Dict = map(_UpperCamelCase , function=lambda _UpperCamelCase : None , batched=_UpperCamelCase ) with dataset.formatted_as(type='torch' , columns='numbers' ): __lowerCAmelCase : Dict = map(_UpperCamelCase , function=lambda _UpperCamelCase : None , batched=_UpperCamelCase ) with dataset.formatted_as(type='tensorflow' , columns='numbers' ): __lowerCAmelCase : Optional[int] = map(_UpperCamelCase , function=lambda _UpperCamelCase : None , batched=_UpperCamelCase ) __lowerCAmelCase : List[str] = map(_UpperCamelCase , function=_UpperCamelCase , batched=_UpperCamelCase ) __lowerCAmelCase : Optional[int] = filter(_UpperCamelCase ) # Activate later when tokenizer support batched inputs # with dataset.formatted_as(type='numpy'): # times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True) with open(_UpperCamelCase , 'wb' ) as f: f.write(json.dumps(_UpperCamelCase ).encode('utf-8' ) ) if __name__ == "__main__": # useful to run the profiler benchmark_map_filter()
365
"""simple docstring""" def __lowerCAmelCase (_UpperCamelCase ): if len(_UpperCamelCase ) <= 1: return lst __lowerCAmelCase : str = 1 while i < len(_UpperCamelCase ): if lst[i - 1] <= lst[i]: i += 1 else: __lowerCAmelCase , __lowerCAmelCase : List[Any] = lst[i], lst[i - 1] i -= 1 if i == 0: __lowerCAmelCase : int = 1 return lst if __name__ == "__main__": lowerCamelCase__ = input("""Enter numbers separated by a comma:\n""").strip() lowerCamelCase__ = [int(item) for item in user_input.split(""",""")] print(gnome_sort(unsorted))
182
0
"""simple docstring""" import re def lowercase__ ( _UpperCAmelCase ) -> str: '''simple docstring''' if len(re.findall('[ATCG]' , _UpperCAmelCase ) ) != len(_UpperCAmelCase ): raise ValueError('Invalid Strand' ) return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) ) if __name__ == "__main__": import doctest doctest.testmod()
255
"""simple docstring""" def lowercase__ ( _UpperCAmelCase ) -> int: '''simple docstring''' if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise TypeError('Input value must be an \'int\' type' ) lowercase : str = 0 while number: position += 1 number >>= 1 return position if __name__ == "__main__": import doctest doctest.testmod()
255
1
'''simple docstring''' import unittest from datasets import load_dataset from transformers import BloomTokenizerFast from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCAmelCase__ ( a , unittest.TestCase ): """simple docstring""" lowerCAmelCase__ = None lowerCAmelCase__ = BloomTokenizerFast lowerCAmelCase__ = BloomTokenizerFast lowerCAmelCase__ = True lowerCAmelCase__ = False lowerCAmelCase__ = "tokenizer_file" lowerCAmelCase__ = {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"} def UpperCAmelCase__ ( self : Union[str, Any] ) -> str: """simple docstring""" super().setUp() __SCREAMING_SNAKE_CASE = BloomTokenizerFast.from_pretrained("""bigscience/tokenizer""" ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCAmelCase__ ( self : List[Any] , **__SCREAMING_SNAKE_CASE : Tuple ) -> int: """simple docstring""" kwargs.update(self.special_tokens_map ) return BloomTokenizerFast.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Optional[int] ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE = ["""The quick brown fox</s>""", """jumps over the lazy dog</s>"""] __SCREAMING_SNAKE_CASE = [[2_175, 23_714, 73_173, 144_252, 2], [77, 132_619, 3_478, 368, 109_586, 35_433, 2]] __SCREAMING_SNAKE_CASE = tokenizer.batch_encode_plus(__SCREAMING_SNAKE_CASE )["""input_ids"""] self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple=6 ) -> Dict: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ): __SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) # tokenizer_r.pad_token = None # Hotfixing padding = None # Simple input __SCREAMING_SNAKE_CASE = """This is a simple input""" __SCREAMING_SNAKE_CASE = ["""This is a simple input 1""", """This is a simple input 2"""] __SCREAMING_SNAKE_CASE = ("""This is a simple input""", """This is a pair""") __SCREAMING_SNAKE_CASE = [ ("""This is a simple input 1""", """This is a simple input 2"""), ("""This is a simple pair 1""", """This is a simple pair 2"""), ] # Simple input tests try: tokenizer_r.encode(__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE ) tokenizer_r.encode_plus(__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE ) tokenizer_r.batch_encode_plus(__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE ) tokenizer_r.encode(__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE ) tokenizer_r.batch_encode_plus(__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE ) except ValueError: self.fail("""Bloom Tokenizer should be able to deal with padding""" ) __SCREAMING_SNAKE_CASE = None # Hotfixing padding = None self.assertRaises(__SCREAMING_SNAKE_CASE , tokenizer_r.encode , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding="""max_length""" ) # Simple input self.assertRaises(__SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding="""max_length""" ) # Simple input self.assertRaises( __SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding="""max_length""" , ) # Pair input self.assertRaises(__SCREAMING_SNAKE_CASE , tokenizer_r.encode , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding="""max_length""" ) # Pair input self.assertRaises(__SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding="""max_length""" ) # Pair input self.assertRaises( __SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding="""max_length""" , ) def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE = load_dataset("""xnli""" , """all_languages""" , split="""test""" , streaming=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = next(iter(__SCREAMING_SNAKE_CASE ) )["""premise"""] # pick up one data __SCREAMING_SNAKE_CASE = list(sample_data.values() ) __SCREAMING_SNAKE_CASE = list(map(tokenizer.encode , __SCREAMING_SNAKE_CASE ) ) __SCREAMING_SNAKE_CASE = [tokenizer.decode(__SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE ) for x in output_tokens] self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Dict ) -> Optional[int]: """simple docstring""" self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 ) self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
331
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( RobertaSeriesConfig, RobertaSeriesModelWithTransformation, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class lowerCAmelCase__ ( a , a , a , unittest.TestCase ): """simple docstring""" lowerCAmelCase__ = AltDiffusionPipeline lowerCAmelCase__ = TEXT_TO_IMAGE_PARAMS lowerCAmelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS lowerCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS lowerCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]: """simple docstring""" torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) __SCREAMING_SNAKE_CASE = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , ) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) # TODO: address the non-deterministic text encoder (fails for save-load tests) # torch.manual_seed(0) # text_encoder_config = RobertaSeriesConfig( # hidden_size=32, # project_dim=32, # intermediate_size=37, # layer_norm_eps=1e-05, # num_attention_heads=4, # num_hidden_layers=5, # vocab_size=5002, # ) # text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_002 , ) __SCREAMING_SNAKE_CASE = CLIPTextModel(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" ) __SCREAMING_SNAKE_CASE = 77 __SCREAMING_SNAKE_CASE = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict=0 ) -> List[str]: """simple docstring""" if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ): __SCREAMING_SNAKE_CASE = torch.manual_seed(__SCREAMING_SNAKE_CASE ) else: __SCREAMING_SNAKE_CASE = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def UpperCAmelCase__ ( self : Any ) -> Tuple: """simple docstring""" super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 ) def UpperCAmelCase__ ( self : Tuple ) -> str: """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator __SCREAMING_SNAKE_CASE = self.get_dummy_components() torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = RobertaSeriesConfig( hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_002 , ) # TODO: remove after fixing the non-deterministic text encoder __SCREAMING_SNAKE_CASE = RobertaSeriesModelWithTransformation(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = text_encoder __SCREAMING_SNAKE_CASE = AltDiffusionPipeline(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = alt_pipe.to(__SCREAMING_SNAKE_CASE ) alt_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = """A photo of an astronaut""" __SCREAMING_SNAKE_CASE = alt_pipe(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = output.images __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __SCREAMING_SNAKE_CASE = np.array( [0.5748162, 0.60447145, 0.48821217, 0.50100636, 0.5431185, 0.45763683, 0.49657696, 0.48132733, 0.47573093] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCAmelCase__ ( self : int ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator __SCREAMING_SNAKE_CASE = self.get_dummy_components() __SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=__SCREAMING_SNAKE_CASE ) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = RobertaSeriesConfig( hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_002 , ) # TODO: remove after fixing the non-deterministic text encoder __SCREAMING_SNAKE_CASE = RobertaSeriesModelWithTransformation(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = text_encoder __SCREAMING_SNAKE_CASE = AltDiffusionPipeline(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = alt_pipe.to(__SCREAMING_SNAKE_CASE ) alt_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = alt_pipe(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = output.images __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __SCREAMING_SNAKE_CASE = np.array( [0.51605093, 0.5707241, 0.47365507, 0.50578886, 0.5633877, 0.4642503, 0.5182081, 0.48763484, 0.49084237] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : Any ) -> Optional[Any]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" , safety_checker=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = alt_pipe.to(__SCREAMING_SNAKE_CASE ) alt_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = """A painting of a squirrel eating a burger""" __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = alt_pipe([prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=20 , output_type="""np""" ) __SCREAMING_SNAKE_CASE = output.images __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) __SCREAMING_SNAKE_CASE = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCAmelCase__ ( self : List[Any] ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = DDIMScheduler.from_pretrained("""BAAI/AltDiffusion""" , subfolder="""scheduler""" ) __SCREAMING_SNAKE_CASE = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" , scheduler=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = alt_pipe.to(__SCREAMING_SNAKE_CASE ) alt_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = """A painting of a squirrel eating a burger""" __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = alt_pipe([prompt] , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type="""numpy""" ) __SCREAMING_SNAKE_CASE = output.images __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) __SCREAMING_SNAKE_CASE = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
331
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) a_ : str = {'configuration_mbart': ['MBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MBartConfig', 'MBartOnnxConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Optional[Any] = ['MBartTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : int = ['MBartTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : List[str] = [ 'MBART_PRETRAINED_MODEL_ARCHIVE_LIST', 'MBartForCausalLM', 'MBartForConditionalGeneration', 'MBartForQuestionAnswering', 'MBartForSequenceClassification', 'MBartModel', 'MBartPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Optional[int] = [ 'TFMBartForConditionalGeneration', 'TFMBartModel', 'TFMBartPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : List[Any] = [ 'FlaxMBartForConditionalGeneration', 'FlaxMBartForQuestionAnswering', 'FlaxMBartForSequenceClassification', 'FlaxMBartModel', 'FlaxMBartPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart import MBartTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart_fast import MBartTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mbart import ( MBART_PRETRAINED_MODEL_ARCHIVE_LIST, MBartForCausalLM, MBartForConditionalGeneration, MBartForQuestionAnswering, MBartForSequenceClassification, MBartModel, MBartPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mbart import ( FlaxMBartForConditionalGeneration, FlaxMBartForQuestionAnswering, FlaxMBartForSequenceClassification, FlaxMBartModel, FlaxMBartPreTrainedModel, ) else: import sys a_ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
137
import argparse import torch from huggingface_hub import hf_hub_download from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM from transformers.utils import logging logging.set_verbosity_info() a_ : str = logging.get_logger(__name__) def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = RobertaPreLayerNormConfig.from_pretrained( _UpperCAmelCase , architectures=['RobertaPreLayerNormForMaskedLM']) # convert state_dict SCREAMING_SNAKE_CASE = torch.load(hf_hub_download(repo_id=_UpperCAmelCase , filename='pytorch_model.bin')) SCREAMING_SNAKE_CASE = {} for tensor_key, tensor_value in original_state_dict.items(): # The transformer implementation gives the model a unique name, rather than overwiriting 'roberta' if tensor_key.startswith('roberta.'): SCREAMING_SNAKE_CASE = 'roberta_prelayernorm.' + tensor_key[len('roberta.') :] # The original implementation contains weights which are not used, remove them from the state_dict if tensor_key.endswith('.self.LayerNorm.weight') or tensor_key.endswith('.self.LayerNorm.bias'): continue SCREAMING_SNAKE_CASE = tensor_value SCREAMING_SNAKE_CASE = RobertaPreLayerNormForMaskedLM.from_pretrained( pretrained_model_name_or_path=_UpperCAmelCase , config=_UpperCAmelCase , state_dict=_UpperCAmelCase) model.save_pretrained(_UpperCAmelCase) # convert tokenizer SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(_UpperCAmelCase) tokenizer.save_pretrained(_UpperCAmelCase) if __name__ == "__main__": a_ : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint-repo', default=None, type=str, required=True, help='Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) a_ : int = parser.parse_args() convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
137
1
"""simple docstring""" from datetime import datetime import matplotlib.pyplot as plt import torch def SCREAMING_SNAKE_CASE_ ( snake_case : Tuple )-> Optional[Any]: for param in module.parameters(): _lowerCamelCase = False def SCREAMING_SNAKE_CASE_ ( )-> Tuple: _lowerCamelCase = 'cuda' if torch.cuda.is_available() else 'cpu' if torch.backends.mps.is_available() and torch.backends.mps.is_built(): _lowerCamelCase = 'mps' if device == "mps": print( 'WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch' ' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues' ' with generations.' ) return device def SCREAMING_SNAKE_CASE_ ( snake_case : str )-> str: _lowerCamelCase = plt.imshow(snake_case ) fig.axes.get_xaxis().set_visible(snake_case ) fig.axes.get_yaxis().set_visible(snake_case ) plt.show() def SCREAMING_SNAKE_CASE_ ( )-> Any: _lowerCamelCase = datetime.now() _lowerCamelCase = current_time.strftime('%H:%M:%S' ) return timestamp
366
"""simple docstring""" import gc import unittest from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class __a ( unittest.TestCase ): def snake_case_ ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() def snake_case_ ( self ): _lowerCamelCase , _lowerCamelCase = FlaxControlNetModel.from_pretrained( 'lllyasviel/sd-controlnet-canny' , from_pt=a__ , dtype=jnp.bfloataa ) _lowerCamelCase , _lowerCamelCase = FlaxStableDiffusionControlNetPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' , controlnet=a__ , from_pt=a__ , dtype=jnp.bfloataa ) _lowerCamelCase = controlnet_params _lowerCamelCase = 'bird' _lowerCamelCase = jax.device_count() _lowerCamelCase = pipe.prepare_text_inputs([prompts] * num_samples ) _lowerCamelCase = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' ) _lowerCamelCase = pipe.prepare_image_inputs([canny_image] * num_samples ) _lowerCamelCase = jax.random.PRNGKey(0 ) _lowerCamelCase = jax.random.split(a__ , jax.device_count() ) _lowerCamelCase = replicate(a__ ) _lowerCamelCase = shard(a__ ) _lowerCamelCase = shard(a__ ) _lowerCamelCase = pipe( prompt_ids=a__ , image=a__ , params=a__ , prng_seed=a__ , num_inference_steps=50 , jit=a__ , ).images assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3) _lowerCamelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) _lowerCamelCase = images[0, 2_53:2_56, 2_53:2_56, -1] _lowerCamelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) ) _lowerCamelCase = jnp.array( [0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078] ) print(F'output_slice: {output_slice}' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 def snake_case_ ( self ): _lowerCamelCase , _lowerCamelCase = FlaxControlNetModel.from_pretrained( 'lllyasviel/sd-controlnet-openpose' , from_pt=a__ , dtype=jnp.bfloataa ) _lowerCamelCase , _lowerCamelCase = FlaxStableDiffusionControlNetPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' , controlnet=a__ , from_pt=a__ , dtype=jnp.bfloataa ) _lowerCamelCase = controlnet_params _lowerCamelCase = 'Chef in the kitchen' _lowerCamelCase = jax.device_count() _lowerCamelCase = pipe.prepare_text_inputs([prompts] * num_samples ) _lowerCamelCase = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' ) _lowerCamelCase = pipe.prepare_image_inputs([pose_image] * num_samples ) _lowerCamelCase = jax.random.PRNGKey(0 ) _lowerCamelCase = jax.random.split(a__ , jax.device_count() ) _lowerCamelCase = replicate(a__ ) _lowerCamelCase = shard(a__ ) _lowerCamelCase = shard(a__ ) _lowerCamelCase = pipe( prompt_ids=a__ , image=a__ , params=a__ , prng_seed=a__ , num_inference_steps=50 , jit=a__ , ).images assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3) _lowerCamelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) _lowerCamelCase = images[0, 2_53:2_56, 2_53:2_56, -1] _lowerCamelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) ) _lowerCamelCase = jnp.array( [[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]] ) print(F'output_slice: {output_slice}' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
80
0
from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class a__ ( snake_case ): """simple docstring""" __lowerCamelCase = ['image_processor', 'tokenizer'] __lowerCamelCase = 'Pix2StructImageProcessor' __lowerCamelCase = ('T5Tokenizer', 'T5TokenizerFast') def __init__( self , lowercase , lowercase ) -> Optional[int]: '''simple docstring''' A__ = False super().__init__(lowercase , lowercase ) def __call__( self , lowercase=None , lowercase = None , lowercase = True , lowercase = False , lowercase = None , lowercase = None , lowercase = 2048 , lowercase = 0 , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = True , lowercase = None , **lowercase , ) -> BatchEncoding: '''simple docstring''' if images is None and text is None: raise ValueError("You have to specify either images or text." ) # Get only text if images is None and not self.image_processor.is_vqa: A__ = self.tokenizer A__ = self.tokenizer( text=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_token_type_ids=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , ) return text_encoding if not self.image_processor.is_vqa: # add pixel_values A__ = self.image_processor( lowercase , return_tensors=lowercase , max_patches=lowercase , **lowercase ) else: # add pixel_values and bbox A__ = self.image_processor( lowercase , return_tensors=lowercase , max_patches=lowercase , header_text=lowercase , **lowercase ) if text is not None and not self.image_processor.is_vqa: A__ = self.tokenizer( text=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_token_type_ids=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , ) if "attention_mask" in text_encoding: A__ = text_encoding.pop("attention_mask" ) if "input_ids" in text_encoding: A__ = text_encoding.pop("input_ids" ) else: A__ = None if text_encoding is not None: encoding_image_processor.update(lowercase ) return encoding_image_processor def UpperCamelCase ( self , *lowercase , **lowercase ) -> Tuple: '''simple docstring''' return self.tokenizer.batch_decode(*lowercase , **lowercase ) def UpperCamelCase ( self , *lowercase , **lowercase ) -> Union[str, Any]: '''simple docstring''' return self.tokenizer.decode(*lowercase , **lowercase ) @property def UpperCamelCase ( self ) -> Dict: '''simple docstring''' A__ = self.tokenizer.model_input_names A__ = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
68
import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaImgaImgPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Union[str, Any] = KandinskyVaaImgaImgPipeline _lowercase : Tuple = ['''image_embeds''', '''negative_image_embeds''', '''image'''] _lowercase : Any = [ '''image_embeds''', '''negative_image_embeds''', '''image''', ] _lowercase : Union[str, Any] = [ '''generator''', '''height''', '''width''', '''strength''', '''guidance_scale''', '''num_inference_steps''', '''return_dict''', '''guidance_scale''', '''num_images_per_prompt''', '''output_type''', '''return_dict''', ] _lowercase : Optional[Any] = False @property def lowerCamelCase_ ( self: Union[str, Any] ) -> Dict: """simple docstring""" return 32 @property def lowerCamelCase_ ( self: Optional[int] ) -> Optional[Any]: """simple docstring""" return 32 @property def lowerCamelCase_ ( self: Any ) -> Any: """simple docstring""" return self.time_input_dim @property def lowerCamelCase_ ( self: Tuple ) -> Any: """simple docstring""" return self.time_input_dim * 4 @property def lowerCamelCase_ ( self: List[Any] ) -> Optional[Any]: """simple docstring""" return 100 @property def lowerCamelCase_ ( self: int ) -> int: """simple docstring""" torch.manual_seed(0 ) lowercase__ = { '''in_channels''': 4, # Out channels is double in channels because predicts mean and variance '''out_channels''': 8, '''addition_embed_type''': '''image''', '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''encoder_hid_dim''': self.text_embedder_hidden_size, '''encoder_hid_dim_type''': '''image_proj''', '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': None, } lowercase__ = UNetaDConditionModel(**UpperCamelCase_ ) return model @property def lowerCamelCase_ ( self: Optional[int] ) -> Union[str, Any]: """simple docstring""" return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def lowerCamelCase_ ( self: Optional[Any] ) -> int: """simple docstring""" torch.manual_seed(0 ) lowercase__ = VQModel(**self.dummy_movq_kwargs ) return model def lowerCamelCase_ ( self: Optional[int] ) -> Optional[int]: """simple docstring""" lowercase__ = self.dummy_unet lowercase__ = self.dummy_movq lowercase__ = { '''num_train_timesteps''': 1_000, '''beta_schedule''': '''linear''', '''beta_start''': 0.00085, '''beta_end''': 0.012, '''clip_sample''': False, '''set_alpha_to_one''': False, '''steps_offset''': 0, '''prediction_type''': '''epsilon''', '''thresholding''': False, } lowercase__ = DDIMScheduler(**UpperCamelCase_ ) lowercase__ = { '''unet''': unet, '''scheduler''': scheduler, '''movq''': movq, } return components def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[int]=0 ) -> Optional[int]: """simple docstring""" lowercase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ ) lowercase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( UpperCamelCase_ ) # create init_image lowercase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ ) lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowercase__ = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('''RGB''' ).resize((256, 256) ) if str(UpperCamelCase_ ).startswith('''mps''' ): lowercase__ = torch.manual_seed(UpperCamelCase_ ) else: lowercase__ = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ ) lowercase__ = { '''image''': init_image, '''image_embeds''': image_embeds, '''negative_image_embeds''': negative_image_embeds, '''generator''': generator, '''height''': 64, '''width''': 64, '''num_inference_steps''': 10, '''guidance_scale''': 7.0, '''strength''': 0.2, '''output_type''': '''np''', } return inputs def lowerCamelCase_ ( self: Optional[int] ) -> Dict: """simple docstring""" lowercase__ = '''cpu''' lowercase__ = self.get_dummy_components() lowercase__ = self.pipeline_class(**UpperCamelCase_ ) lowercase__ = pipe.to(UpperCamelCase_ ) pipe.set_progress_bar_config(disable=UpperCamelCase_ ) lowercase__ = pipe(**self.get_dummy_inputs(UpperCamelCase_ ) ) lowercase__ = output.images lowercase__ = pipe( **self.get_dummy_inputs(UpperCamelCase_ ) , return_dict=UpperCamelCase_ , )[0] lowercase__ = image[0, -3:, -3:, -1] lowercase__ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase__ = np.array( [0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f' expected_slice {expected_slice}, but got {image_slice.flatten()}' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}' @slow @require_torch_gpu class _a ( unittest.TestCase ): def lowerCamelCase_ ( self: str ) -> List[Any]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase_ ( self: List[str] ) -> Union[str, Any]: """simple docstring""" lowercase__ = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinskyv22/kandinskyv22_img2img_frog.npy''' ) lowercase__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' ) lowercase__ = '''A red cartoon frog, 4k''' lowercase__ = KandinskyVaaPriorPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa ) pipe_prior.to(UpperCamelCase_ ) lowercase__ = KandinskyVaaImgaImgPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa ) lowercase__ = pipeline.to(UpperCamelCase_ ) pipeline.set_progress_bar_config(disable=UpperCamelCase_ ) lowercase__ = torch.Generator(device='''cpu''' ).manual_seed(0 ) lowercase__ , lowercase__ = pipe_prior( UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple() lowercase__ = pipeline( image=UpperCamelCase_ , image_embeds=UpperCamelCase_ , negative_image_embeds=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , ) lowercase__ = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_ )
110
0
"""simple docstring""" import math from collections.abc import Iterator from itertools import takewhile def __UpperCAmelCase ( lowercase ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 ,int(math.sqrt(lowercase ) + 1 ) ,6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = 2 while True: if is_prime(lowercase ): yield num num += 1 def __UpperCAmelCase ( lowercase = 2_00_00_00 ): """simple docstring""" return sum(takewhile(lambda lowercase : x < n ,prime_generator() ) ) if __name__ == "__main__": print(F'''{solution() = }''')
30
"""simple docstring""" import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = {} _UpperCAmelCase = tokenizer(example["""content"""] ,truncation=lowercase )["""input_ids"""] _UpperCAmelCase = len(example["""content"""] ) / len(output["""input_ids"""] ) return output UpperCAmelCase__ = HfArgumentParser(PretokenizationArguments) UpperCAmelCase__ = parser.parse_args() if args.num_workers is None: UpperCAmelCase__ = multiprocessing.cpu_count() UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_dir) UpperCAmelCase__ = time.time() UpperCAmelCase__ = load_dataset(args.dataset_name, split="""train""") print(F'''Dataset loaded in {time.time()-t_start:.2f}s''') UpperCAmelCase__ = time.time() UpperCAmelCase__ = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ """repo_name""", """path""", """copies""", """size""", """content""", """license""", """hash""", """line_mean""", """line_max""", """alpha_frac""", """autogenerated""", ], ) print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''') UpperCAmelCase__ = time.time() ds.push_to_hub(args.tokenized_data_repo) print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
30
1
'''simple docstring''' import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class a_ : def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=3_2 , snake_case_=3 , snake_case_=4 , snake_case_=[1_0, 2_0, 3_0, 4_0] , snake_case_=[2, 2, 3, 2] , snake_case_=True , snake_case_=True , snake_case_=3_7 , snake_case_="gelu" , snake_case_=1_0 , snake_case_=0.02 , snake_case_=["stage2", "stage3", "stage4"] , snake_case_=[2, 3, 4] , snake_case_=None , ): _lowerCAmelCase : Dict = parent _lowerCAmelCase : int = batch_size _lowerCAmelCase : int = image_size _lowerCAmelCase : str = num_channels _lowerCAmelCase : Union[str, Any] = num_stages _lowerCAmelCase : List[str] = hidden_sizes _lowerCAmelCase : Tuple = depths _lowerCAmelCase : Optional[int] = is_training _lowerCAmelCase : Tuple = use_labels _lowerCAmelCase : Dict = intermediate_size _lowerCAmelCase : Optional[Any] = hidden_act _lowerCAmelCase : Tuple = num_labels _lowerCAmelCase : List[Any] = initializer_range _lowerCAmelCase : Any = out_features _lowerCAmelCase : str = out_indices _lowerCAmelCase : List[str] = scope def __UpperCamelCase ( self ): _lowerCAmelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowerCAmelCase : List[Any] = None if self.use_labels: _lowerCAmelCase : Tuple = ids_tensor([self.batch_size] , self.num_labels ) _lowerCAmelCase : Optional[int] = self.get_config() return config, pixel_values, labels def __UpperCamelCase ( self ): return ConvNextVaConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ): _lowerCAmelCase : Dict = ConvNextVaModel(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() _lowerCAmelCase : Any = model(lowerCAmelCase__ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , ) def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ): _lowerCAmelCase : Optional[Any] = ConvNextVaForImageClassification(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() _lowerCAmelCase : List[str] = model(lowerCAmelCase__ , labels=lowerCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ): _lowerCAmelCase : str = ConvNextVaBackbone(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() _lowerCAmelCase : Optional[int] = model(lowerCAmelCase__ ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None _lowerCAmelCase : Any = None _lowerCAmelCase : Dict = ConvNextVaBackbone(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() _lowerCAmelCase : List[str] = model(lowerCAmelCase__ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def __UpperCamelCase ( self ): _lowerCAmelCase : Any = self.prepare_config_and_inputs() _lowerCAmelCase : Any = config_and_inputs _lowerCAmelCase : Tuple = {"""pixel_values""": pixel_values} return config, inputs_dict def __UpperCamelCase ( self ): _lowerCAmelCase : int = self.prepare_config_and_inputs() _lowerCAmelCase : str = config_and_inputs _lowerCAmelCase : Dict = {"""pixel_values""": pixel_values, """labels""": labels} return config, inputs_dict @require_torch class a_ (_lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): __lowerCAmelCase : Union[str, Any] = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) __lowerCAmelCase : List[str] = ( {'''feature-extraction''': ConvNextVaModel, '''image-classification''': ConvNextVaForImageClassification} if is_torch_available() else {} ) __lowerCAmelCase : Optional[int] = False __lowerCAmelCase : List[str] = False __lowerCAmelCase : Tuple = False __lowerCAmelCase : Optional[int] = False __lowerCAmelCase : Tuple = False def __UpperCamelCase ( self ): _lowerCAmelCase : Optional[Any] = ConvNextVaModelTester(self ) _lowerCAmelCase : Any = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=3_7 ) def __UpperCamelCase ( self ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __UpperCamelCase ( self ): return @unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" ) def __UpperCamelCase ( self ): pass @unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" ) def __UpperCamelCase ( self ): pass @unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" ) def __UpperCamelCase ( self ): pass def __UpperCamelCase ( self ): if not self.model_tester.is_training: return for model_class in self.all_model_classes: _lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_with_labels() _lowerCAmelCase : Any = True if model_class.__name__ in [ *get_values(lowerCAmelCase__ ), *get_values(lowerCAmelCase__ ), ]: continue _lowerCAmelCase : Union[str, Any] = model_class(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.train() _lowerCAmelCase : Optional[Any] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ ) _lowerCAmelCase : Dict = model(**lowerCAmelCase__ ).loss loss.backward() def __UpperCamelCase ( self ): if not self.model_tester.is_training: return for model_class in self.all_model_classes: _lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_with_labels() _lowerCAmelCase : Optional[Any] = False _lowerCAmelCase : Optional[Any] = True if ( model_class.__name__ in [*get_values(lowerCAmelCase__ ), *get_values(lowerCAmelCase__ )] or not model_class.supports_gradient_checkpointing ): continue _lowerCAmelCase : int = model_class(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.gradient_checkpointing_enable() model.train() _lowerCAmelCase : str = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ ) _lowerCAmelCase : Dict = model(**lowerCAmelCase__ ).loss loss.backward() def __UpperCamelCase ( self ): _lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase : List[str] = model_class(lowerCAmelCase__ ) _lowerCAmelCase : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCAmelCase : int = [*signature.parameters.keys()] _lowerCAmelCase : List[Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowerCAmelCase__ ) def __UpperCamelCase ( self ): _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase__ ) def __UpperCamelCase ( self ): def check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ ): _lowerCAmelCase : Optional[int] = model_class(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() with torch.no_grad(): _lowerCAmelCase : Dict = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) ) _lowerCAmelCase : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _lowerCAmelCase : Tuple = self.model_tester.num_stages self.assertEqual(len(lowerCAmelCase__ ) , expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) _lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase : Tuple = True check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowerCAmelCase : Any = True check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) def __UpperCamelCase ( self ): _lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ ) @slow def __UpperCamelCase ( self ): for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase : Optional[int] = ConvNextVaModel.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) def _UpperCAmelCase ( ) -> str: _lowerCAmelCase : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class a_ (unittest.TestCase ): @cached_property def __UpperCamelCase ( self ): return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None @slow def __UpperCamelCase ( self ): _lowerCAmelCase : Optional[Any] = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(lowerCAmelCase__ ) _lowerCAmelCase : List[str] = self.default_image_processor _lowerCAmelCase : List[Any] = prepare_img() _lowerCAmelCase : str = preprocessor(images=lowerCAmelCase__ , return_tensors="""pt""" ).to(lowerCAmelCase__ ) # forward pass with torch.no_grad(): _lowerCAmelCase : List[Any] = model(**lowerCAmelCase__ ) # verify the logits _lowerCAmelCase : Any = torch.Size((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase__ ) _lowerCAmelCase : str = torch.tensor([0.9996, 0.1966, -0.4386] ).to(lowerCAmelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
309
import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import TimesformerConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, TimesformerForVideoClassification, TimesformerModel, ) from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class snake_case__ : def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=10 , lowerCAmelCase__=3 , lowerCAmelCase__=2 , lowerCAmelCase__=2 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=32 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=10 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__="divided_space_time" , lowerCAmelCase__=None , ) -> List[str]: __magic_name__ : int = parent __magic_name__ : Tuple = batch_size __magic_name__ : int = image_size __magic_name__ : str = num_channels __magic_name__ : Dict = patch_size __magic_name__ : Tuple = num_frames __magic_name__ : List[Any] = is_training __magic_name__ : List[Any] = use_labels __magic_name__ : Dict = hidden_size __magic_name__ : List[Any] = num_hidden_layers __magic_name__ : str = num_attention_heads __magic_name__ : List[Any] = intermediate_size __magic_name__ : Dict = hidden_act __magic_name__ : List[Any] = hidden_dropout_prob __magic_name__ : Union[str, Any] = attention_probs_dropout_prob __magic_name__ : Tuple = attention_type __magic_name__ : List[str] = initializer_range __magic_name__ : Optional[Any] = scope __magic_name__ : Tuple = num_labels # in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token __magic_name__ : str = (image_size // patch_size) ** 2 __magic_name__ : Any = (num_frames) * self.num_patches_per_frame + 1 def __magic_name__ ( self ) -> Dict: __magic_name__ : Optional[Any] = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) __magic_name__ : str = None if self.use_labels: __magic_name__ : List[Any] = ids_tensor([self.batch_size] , self.num_labels ) __magic_name__ : Optional[Any] = self.get_config() return config, pixel_values, labels def __magic_name__ ( self ) -> str: __magic_name__ : Dict = TimesformerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , ) __magic_name__ : Optional[Any] = self.num_labels return config def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]: __magic_name__ : List[Any] = TimesformerModel(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() __magic_name__ : Optional[Any] = model(lowerCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any: __magic_name__ : int = TimesformerForVideoClassification(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() __magic_name__ : List[Any] = model(lowerCAmelCase__ ) # verify the logits shape __magic_name__ : List[Any] = torch.Size((self.batch_size, self.num_labels) ) self.parent.assertEqual(result.logits.shape , lowerCAmelCase__ ) def __magic_name__ ( self ) -> Any: __magic_name__ : Union[str, Any] = self.prepare_config_and_inputs() __magic_name__ ,__magic_name__ ,__magic_name__ : Tuple = config_and_inputs __magic_name__ : Optional[int] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class snake_case__ ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): lowercase__ : Tuple = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else () lowercase__ : Union[str, Any] = ( {'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification} if is_torch_available() else {} ) lowercase__ : int = False lowercase__ : str = False lowercase__ : Tuple = False lowercase__ : Any = False def __magic_name__ ( self ) -> List[Any]: __magic_name__ : List[Any] = TimesformerModelTester(self ) __magic_name__ : List[str] = ConfigTester( self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 ) def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> List[str]: __magic_name__ : List[str] = copy.deepcopy(lowerCAmelCase__ ) if return_labels: if model_class in get_values(lowerCAmelCase__ ): __magic_name__ : Union[str, Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ ) return inputs_dict def __magic_name__ ( self ) -> List[str]: self.config_tester.run_common_tests() @unittest.skip(reason="""TimeSformer does not use inputs_embeds""" ) def __magic_name__ ( self ) -> str: pass def __magic_name__ ( self ) -> Optional[int]: __magic_name__ ,__magic_name__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __magic_name__ : List[Any] = model_class(lowerCAmelCase__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __magic_name__ : Tuple = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear ) ) def __magic_name__ ( self ) -> Optional[Any]: __magic_name__ ,__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __magic_name__ : Dict = model_class(lowerCAmelCase__ ) __magic_name__ : Optional[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __magic_name__ : Optional[int] = [*signature.parameters.keys()] __magic_name__ : Any = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowerCAmelCase__ ) def __magic_name__ ( self ) -> List[Any]: __magic_name__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase__ ) def __magic_name__ ( self ) -> Union[str, Any]: __magic_name__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*lowerCAmelCase__ ) @slow def __magic_name__ ( self ) -> Optional[int]: for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __magic_name__ : List[str] = TimesformerModel.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) def __magic_name__ ( self ) -> List[Any]: if not self.has_attentions: pass else: __magic_name__ ,__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ : Optional[int] = True for model_class in self.all_model_classes: __magic_name__ : Tuple = self.model_tester.seq_length __magic_name__ : int = self.model_tester.num_frames __magic_name__ : Any = True __magic_name__ : Tuple = False __magic_name__ : Optional[int] = True __magic_name__ : str = model_class(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() with torch.no_grad(): __magic_name__ : List[str] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) ) __magic_name__ : List[str] = outputs.attentions self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] __magic_name__ : Optional[Any] = True __magic_name__ : Optional[Any] = model_class(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() with torch.no_grad(): __magic_name__ : Optional[int] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) ) __magic_name__ : int = outputs.attentions self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) __magic_name__ : Union[str, Any] = len(lowerCAmelCase__ ) # Check attention is always last and order is fine __magic_name__ : str = True __magic_name__ : Optional[Any] = True __magic_name__ : int = model_class(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() with torch.no_grad(): __magic_name__ : List[str] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) ) self.assertEqual(out_len + 1 , len(lowerCAmelCase__ ) ) __magic_name__ : Union[str, Any] = outputs.attentions self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) def __magic_name__ ( self ) -> Any: def check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): __magic_name__ : Union[str, Any] = model_class(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() with torch.no_grad(): __magic_name__ : int = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) ) __magic_name__ : Optional[Any] = outputs.hidden_states __magic_name__ : str = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ ) __magic_name__ : str = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) __magic_name__ ,__magic_name__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __magic_name__ : Optional[Any] = True check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __magic_name__ : Union[str, Any] = True check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) def UpperCamelCase ( ): """simple docstring""" __magic_name__ : List[Any] = hf_hub_download( repo_id="""hf-internal-testing/spaghetti-video""", filename="""eating_spaghetti.npy""", repo_type="""dataset""" ) __magic_name__ : List[str] = np.load(_A ) return list(_A ) @require_torch @require_vision class snake_case__ ( unittest.TestCase ): @cached_property def __magic_name__ ( self ) -> Optional[Any]: # logits were tested with a different mean and std, so we use the same here return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def __magic_name__ ( self ) -> List[Any]: __magic_name__ : Dict = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""" ).to( lowerCAmelCase__ ) __magic_name__ : str = self.default_image_processor __magic_name__ : Any = prepare_video() __magic_name__ : Dict = image_processor(video[:8] , return_tensors="""pt""" ).to(lowerCAmelCase__ ) # forward pass with torch.no_grad(): __magic_name__ : int = model(**lowerCAmelCase__ ) # verify the logits __magic_name__ : Optional[int] = torch.Size((1, 4_00) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase__ ) __magic_name__ : Union[str, Any] = torch.tensor([-0.3_0_1_6, -0.7_7_1_3, -0.4_2_0_5] ).to(lowerCAmelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
342
0
from __future__ import annotations import math def a__ ( __UpperCamelCase , __UpperCamelCase ): if len(__UpperCamelCase ) != 2 or len(a[0] ) != 2 or len(__UpperCamelCase ) != 2 or len(b[0] ) != 2: raise Exception("Matrices are not 2x2" ) SCREAMING_SNAKE_CASE_ = [ [a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]], [a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]], ] return new_matrix def a__ ( __UpperCamelCase , __UpperCamelCase ): return [ [matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )] for row in range(len(__UpperCamelCase ) ) ] def a__ ( __UpperCamelCase , __UpperCamelCase ): return [ [matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )] for row in range(len(__UpperCamelCase ) ) ] def a__ ( __UpperCamelCase ): if len(__UpperCamelCase ) % 2 != 0 or len(a[0] ) % 2 != 0: raise Exception("Odd matrices are not supported!" ) SCREAMING_SNAKE_CASE_ = len(__UpperCamelCase ) SCREAMING_SNAKE_CASE_ = matrix_length // 2 SCREAMING_SNAKE_CASE_ = [[a[i][j] for j in range(__UpperCamelCase , __UpperCamelCase )] for i in range(__UpperCamelCase )] SCREAMING_SNAKE_CASE_ = [ [a[i][j] for j in range(__UpperCamelCase , __UpperCamelCase )] for i in range(__UpperCamelCase , __UpperCamelCase ) ] SCREAMING_SNAKE_CASE_ = [[a[i][j] for j in range(__UpperCamelCase )] for i in range(__UpperCamelCase )] SCREAMING_SNAKE_CASE_ = [[a[i][j] for j in range(__UpperCamelCase )] for i in range(__UpperCamelCase , __UpperCamelCase )] return top_left, top_right, bot_left, bot_right def a__ ( __UpperCamelCase ): return len(__UpperCamelCase ), len(matrix[0] ) def a__ ( __UpperCamelCase ): print("\n".join(str(__UpperCamelCase ) for line in matrix ) ) def a__ ( __UpperCamelCase , __UpperCamelCase ): if matrix_dimensions(__UpperCamelCase ) == (2, 2): return default_matrix_multiplication(__UpperCamelCase , __UpperCamelCase ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = split_matrix(__UpperCamelCase ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = split_matrix(__UpperCamelCase ) SCREAMING_SNAKE_CASE_ = actual_strassen(__UpperCamelCase , matrix_subtraction(__UpperCamelCase , __UpperCamelCase ) ) SCREAMING_SNAKE_CASE_ = actual_strassen(matrix_addition(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase ) SCREAMING_SNAKE_CASE_ = actual_strassen(matrix_addition(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase ) SCREAMING_SNAKE_CASE_ = actual_strassen(__UpperCamelCase , matrix_subtraction(__UpperCamelCase , __UpperCamelCase ) ) SCREAMING_SNAKE_CASE_ = actual_strassen(matrix_addition(__UpperCamelCase , __UpperCamelCase ) , matrix_addition(__UpperCamelCase , __UpperCamelCase ) ) SCREAMING_SNAKE_CASE_ = actual_strassen(matrix_subtraction(__UpperCamelCase , __UpperCamelCase ) , matrix_addition(__UpperCamelCase , __UpperCamelCase ) ) SCREAMING_SNAKE_CASE_ = actual_strassen(matrix_subtraction(__UpperCamelCase , __UpperCamelCase ) , matrix_addition(__UpperCamelCase , __UpperCamelCase ) ) SCREAMING_SNAKE_CASE_ = matrix_addition(matrix_subtraction(matrix_addition(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase ) , __UpperCamelCase ) SCREAMING_SNAKE_CASE_ = matrix_addition(__UpperCamelCase , __UpperCamelCase ) SCREAMING_SNAKE_CASE_ = matrix_addition(__UpperCamelCase , __UpperCamelCase ) SCREAMING_SNAKE_CASE_ = matrix_subtraction(matrix_subtraction(matrix_addition(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase ) , __UpperCamelCase ) # construct the new matrix from our 4 quadrants SCREAMING_SNAKE_CASE_ = [] for i in range(len(__UpperCamelCase ) ): new_matrix.append(top_left[i] + top_right[i] ) for i in range(len(__UpperCamelCase ) ): new_matrix.append(bot_left[i] + bot_right[i] ) return new_matrix def a__ ( __UpperCamelCase , __UpperCamelCase ): if matrix_dimensions(__UpperCamelCase )[1] != matrix_dimensions(__UpperCamelCase )[0]: SCREAMING_SNAKE_CASE_ = ( "Unable to multiply these matrices, please check the dimensions.\n" F'''Matrix A: {matrixa}\n''' F'''Matrix B: {matrixa}''' ) raise Exception(__UpperCamelCase ) SCREAMING_SNAKE_CASE_ = matrix_dimensions(__UpperCamelCase ) SCREAMING_SNAKE_CASE_ = matrix_dimensions(__UpperCamelCase ) if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]: return [matrixa, matrixa] SCREAMING_SNAKE_CASE_ = max(*__UpperCamelCase , *__UpperCamelCase ) SCREAMING_SNAKE_CASE_ = int(math.pow(2 , math.ceil(math.loga(__UpperCamelCase ) ) ) ) SCREAMING_SNAKE_CASE_ = matrixa SCREAMING_SNAKE_CASE_ = matrixa # Adding zeros to the matrices so that the arrays dimensions are the same and also # power of 2 for i in range(0 , __UpperCamelCase ): if i < dimensiona[0]: for _ in range(dimensiona[1] , __UpperCamelCase ): new_matrixa[i].append(0 ) else: new_matrixa.append([0] * maxim ) if i < dimensiona[0]: for _ in range(dimensiona[1] , __UpperCamelCase ): new_matrixa[i].append(0 ) else: new_matrixa.append([0] * maxim ) SCREAMING_SNAKE_CASE_ = actual_strassen(__UpperCamelCase , __UpperCamelCase ) # Removing the additional zeros for i in range(0 , __UpperCamelCase ): if i < dimensiona[0]: for _ in range(dimensiona[1] , __UpperCamelCase ): final_matrix[i].pop() else: final_matrix.pop() return final_matrix if __name__ == "__main__": A : Any = [ [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 2, 3, 1], ] A : Any = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]] print(strassen(matrixa, matrixa))
305
from collections import deque class lowerCamelCase : """simple docstring""" def __init__( self : str , __magic_name__ : str , __magic_name__ : int , __magic_name__ : int ) -> None: SCREAMING_SNAKE_CASE_ = process_name # process name SCREAMING_SNAKE_CASE_ = arrival_time # arrival time of the process # completion time of finished process or last interrupted time SCREAMING_SNAKE_CASE_ = arrival_time SCREAMING_SNAKE_CASE_ = burst_time # remaining burst time SCREAMING_SNAKE_CASE_ = 0 # total time of the process wait in ready queue SCREAMING_SNAKE_CASE_ = 0 # time from arrival time to completion time class lowerCamelCase : """simple docstring""" def __init__( self : Tuple , __magic_name__ : int , __magic_name__ : list[int] , __magic_name__ : deque[Process] , __magic_name__ : int , ) -> None: # total number of mlfq's queues SCREAMING_SNAKE_CASE_ = number_of_queues # time slice of queues that round robin algorithm applied SCREAMING_SNAKE_CASE_ = time_slices # unfinished process is in this ready_queue SCREAMING_SNAKE_CASE_ = queue # current time SCREAMING_SNAKE_CASE_ = current_time # finished process is in this sequence queue SCREAMING_SNAKE_CASE_ = deque() def __A ( self : Dict ) -> list[str]: SCREAMING_SNAKE_CASE_ = [] for i in range(len(self.finish_queue ) ): sequence.append(self.finish_queue[i].process_name ) return sequence def __A ( self : List[str] , __magic_name__ : list[Process] ) -> list[int]: SCREAMING_SNAKE_CASE_ = [] for i in range(len(__magic_name__ ) ): waiting_times.append(queue[i].waiting_time ) return waiting_times def __A ( self : List[str] , __magic_name__ : list[Process] ) -> list[int]: SCREAMING_SNAKE_CASE_ = [] for i in range(len(__magic_name__ ) ): turnaround_times.append(queue[i].turnaround_time ) return turnaround_times def __A ( self : Tuple , __magic_name__ : list[Process] ) -> list[int]: SCREAMING_SNAKE_CASE_ = [] for i in range(len(__magic_name__ ) ): completion_times.append(queue[i].stop_time ) return completion_times def __A ( self : str , __magic_name__ : deque[Process] ) -> list[int]: return [q.burst_time for q in queue] def __A ( self : Optional[Any] , __magic_name__ : Process ) -> int: process.waiting_time += self.current_time - process.stop_time return process.waiting_time def __A ( self : Optional[Any] , __magic_name__ : deque[Process] ) -> deque[Process]: SCREAMING_SNAKE_CASE_ = deque() # sequence deque of finished process while len(__magic_name__ ) != 0: SCREAMING_SNAKE_CASE_ = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of current process self.update_waiting_time(__magic_name__ ) # update current time self.current_time += cp.burst_time # finish the process and set the process's burst-time 0 SCREAMING_SNAKE_CASE_ = 0 # set the process's turnaround time because it is finished SCREAMING_SNAKE_CASE_ = self.current_time - cp.arrival_time # set the completion time SCREAMING_SNAKE_CASE_ = self.current_time # add the process to queue that has finished queue finished.append(__magic_name__ ) self.finish_queue.extend(__magic_name__ ) # add finished process to finish queue # FCFS will finish all remaining processes return finished def __A ( self : Any , __magic_name__ : deque[Process] , __magic_name__ : int ) -> tuple[deque[Process], deque[Process]]: SCREAMING_SNAKE_CASE_ = deque() # sequence deque of terminated process # just for 1 cycle and unfinished processes will go back to queue for _ in range(len(__magic_name__ ) ): SCREAMING_SNAKE_CASE_ = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of unfinished processes self.update_waiting_time(__magic_name__ ) # if the burst time of process is bigger than time-slice if cp.burst_time > time_slice: # use CPU for only time-slice self.current_time += time_slice # update remaining burst time cp.burst_time -= time_slice # update end point time SCREAMING_SNAKE_CASE_ = self.current_time # locate the process behind the queue because it is not finished ready_queue.append(__magic_name__ ) else: # use CPU for remaining burst time self.current_time += cp.burst_time # set burst time 0 because the process is finished SCREAMING_SNAKE_CASE_ = 0 # set the finish time SCREAMING_SNAKE_CASE_ = self.current_time # update the process' turnaround time because it is finished SCREAMING_SNAKE_CASE_ = self.current_time - cp.arrival_time # add the process to queue that has finished queue finished.append(__magic_name__ ) self.finish_queue.extend(__magic_name__ ) # add finished process to finish queue # return finished processes queue and remaining processes queue return finished, ready_queue def __A ( self : Any ) -> deque[Process]: # all queues except last one have round_robin algorithm for i in range(self.number_of_queues - 1 ): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.round_robin( self.ready_queue , self.time_slices[i] ) # the last queue has first_come_first_served algorithm self.first_come_first_served(self.ready_queue ) return self.finish_queue if __name__ == "__main__": import doctest A : Dict = Process("P1", 0, 53) A : str = Process("P2", 0, 17) A : List[Any] = Process("P3", 0, 68) A : List[str] = Process("P4", 0, 24) A : Dict = 3 A : Any = [17, 25] A : Dict = deque([Pa, Pa, Pa, Pa]) if len(time_slices) != number_of_queues - 1: raise SystemExit(0) doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])}) A : Union[str, Any] = Process("P1", 0, 53) A : Any = Process("P2", 0, 17) A : Dict = Process("P3", 0, 68) A : List[str] = Process("P4", 0, 24) A : Optional[int] = 3 A : int = [17, 25] A : Union[str, Any] = deque([Pa, Pa, Pa, Pa]) A : Tuple = MLFQ(number_of_queues, time_slices, queue, 0) A : Tuple = mlfq.multi_level_feedback_queue() # print total waiting times of processes(P1, P2, P3, P4) print( f"waiting time:\ \t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}" ) # print completion times of processes(P1, P2, P3, P4) print( f"completion time:\ \t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}" ) # print total turnaround times of processes(P1, P2, P3, P4) print( f"turnaround time:\ \t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}" ) # print sequence of finished processes print( f"sequence of finished processes:\ {mlfq.calculate_sequence_of_finish_queue()}" )
305
1
"""simple docstring""" import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]: snake_case_ = 1.5 snake_case_ = int(factor * num_class_images ) snake_case_ = ClipClient( url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=_SCREAMING_SNAKE_CASE , aesthetic_weight=0.1 ) os.makedirs(f"""{class_data_dir}/images""" , exist_ok=_SCREAMING_SNAKE_CASE ) if len(list(Path(f"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images: return while True: snake_case_ = client.query(text=_SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) >= factor * num_class_images or num_images > 1E4: break else: snake_case_ = int(factor * num_images ) snake_case_ = ClipClient( url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=_SCREAMING_SNAKE_CASE , aesthetic_weight=0.1 , ) snake_case_ = 0 snake_case_ = 0 snake_case_ = tqdm(desc="""downloading real regularization images""" , total=_SCREAMING_SNAKE_CASE ) with open(f"""{class_data_dir}/caption.txt""" , """w""" ) as fa, open(f"""{class_data_dir}/urls.txt""" , """w""" ) as fa, open( f"""{class_data_dir}/images.txt""" , """w""" ) as fa: while total < num_class_images: snake_case_ = class_images[count] count += 1 try: snake_case_ = requests.get(images["""url"""] ) if img.status_code == 200: snake_case_ = Image.open(BytesIO(img.content ) ) with open(f"""{class_data_dir}/images/{total}.jpg""" , """wb""" ) as f: f.write(img.content ) fa.write(images["""caption"""] + """\n""" ) fa.write(images["""url"""] + """\n""" ) fa.write(f"""{class_data_dir}/images/{total}.jpg""" + """\n""" ) total += 1 pbar.update(1 ) else: continue except Exception: continue return def _a ( ) -> Any: snake_case_ = argparse.ArgumentParser("""""" , add_help=_SCREAMING_SNAKE_CASE ) parser.add_argument("""--class_prompt""" , help="""text prompt to retrieve images""" , required=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE ) parser.add_argument("""--class_data_dir""" , help="""path to save images""" , required=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE ) parser.add_argument("""--num_class_images""" , help="""number of images to download""" , default=200 , type=_SCREAMING_SNAKE_CASE ) return parser.parse_args() if __name__ == "__main__": __SCREAMING_SNAKE_CASE : str = parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
347
"""simple docstring""" import argparse import logging import os import re import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, DataCollatorForLanguageModeling, PushToHubCallback, TFAutoModelForMaskedLM, create_optimizer, ) __SCREAMING_SNAKE_CASE : List[str] = logging.getLogger(__name__) __SCREAMING_SNAKE_CASE : str = tf.data.AUTOTUNE def _a ( ) -> List[str]: snake_case_ = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" ) parser.add_argument( """--pretrained_model_config""" , type=_SCREAMING_SNAKE_CASE , default="""roberta-base""" , help="""The model config to use. Note that we don't copy the model's weights, only the config!""" , ) parser.add_argument( """--tokenizer""" , type=_SCREAMING_SNAKE_CASE , default="""unigram-tokenizer-wikitext""" , help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""" , ) parser.add_argument( """--per_replica_batch_size""" , type=_SCREAMING_SNAKE_CASE , default=8 , help="""Batch size per TPU core.""" , ) parser.add_argument( """--no_tpu""" , action="""store_true""" , help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""" , ) parser.add_argument( """--tpu_name""" , type=_SCREAMING_SNAKE_CASE , help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""" , default="""local""" , ) parser.add_argument( """--tpu_zone""" , type=_SCREAMING_SNAKE_CASE , help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" , ) parser.add_argument( """--gcp_project""" , type=_SCREAMING_SNAKE_CASE , help="""Google cloud project name. Only used for non-Colab TPU nodes.""" ) parser.add_argument( """--bfloat16""" , action="""store_true""" , help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" , ) parser.add_argument( """--train_dataset""" , type=_SCREAMING_SNAKE_CASE , help="""Path to training dataset to load. If the path begins with `gs://`""" """ then the dataset will be loaded from a Google Cloud Storage bucket.""" , ) parser.add_argument( """--shuffle_buffer_size""" , type=_SCREAMING_SNAKE_CASE , default=2**18 , help="""Size of the shuffle buffer (in samples)""" , ) parser.add_argument( """--eval_dataset""" , type=_SCREAMING_SNAKE_CASE , help="""Path to evaluation dataset to load. If the path begins with `gs://`""" """ then the dataset will be loaded from a Google Cloud Storage bucket.""" , ) parser.add_argument( """--num_epochs""" , type=_SCREAMING_SNAKE_CASE , default=1 , help="""Number of epochs to train for.""" , ) parser.add_argument( """--learning_rate""" , type=_SCREAMING_SNAKE_CASE , default=1E-4 , help="""Learning rate to use for training.""" , ) parser.add_argument( """--weight_decay_rate""" , type=_SCREAMING_SNAKE_CASE , default=1E-3 , help="""Weight decay rate to use for training.""" , ) parser.add_argument( """--max_length""" , type=_SCREAMING_SNAKE_CASE , default=512 , help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" , ) parser.add_argument( """--mlm_probability""" , type=_SCREAMING_SNAKE_CASE , default=0.15 , help="""Fraction of tokens to mask during training.""" , ) parser.add_argument("""--output_dir""" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="""Path to save model checkpoints to.""" ) parser.add_argument("""--hub_model_id""" , type=_SCREAMING_SNAKE_CASE , help="""Model ID to upload to on the Hugging Face Hub.""" ) snake_case_ = parser.parse_args() return args def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]: try: if args.tpu_name: snake_case_ = tf.distribute.cluster_resolver.TPUClusterResolver( args.tpu_name , zone=args.tpu_zone , project=args.gcp_project ) else: snake_case_ = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: raise RuntimeError( """Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """ """--gcp_project. When running on a TPU VM, use --tpu_name local.""" ) tf.config.experimental_connect_to_cluster(_SCREAMING_SNAKE_CASE ) tf.tpu.experimental.initialize_tpu_system(_SCREAMING_SNAKE_CASE ) return tpu def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]: snake_case_ = 0 for file in file_list: snake_case_ = file.split("""/""" )[-1] snake_case_ = re.search(r"""-\d+-(\d+)\.tfrecord""" , _SCREAMING_SNAKE_CASE ).group(1 ) snake_case_ = int(_SCREAMING_SNAKE_CASE ) num_samples += sample_count return num_samples def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]: snake_case_ = count_samples(_SCREAMING_SNAKE_CASE ) snake_case_ = tf.data.Dataset.from_tensor_slices(_SCREAMING_SNAKE_CASE ) if shuffle: snake_case_ = dataset.shuffle(len(_SCREAMING_SNAKE_CASE ) ) snake_case_ = tf.data.TFRecordDataset(_SCREAMING_SNAKE_CASE , num_parallel_reads=_SCREAMING_SNAKE_CASE ) # TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here snake_case_ = dataset.apply(tf.data.experimental.assert_cardinality(_SCREAMING_SNAKE_CASE ) ) snake_case_ = dataset.map(_SCREAMING_SNAKE_CASE , num_parallel_calls=_SCREAMING_SNAKE_CASE ) if shuffle: assert shuffle_buffer_size is not None snake_case_ = dataset.shuffle(args.shuffle_buffer_size ) snake_case_ = dataset.batch(_SCREAMING_SNAKE_CASE , drop_remainder=_SCREAMING_SNAKE_CASE ) snake_case_ = dataset.map(_SCREAMING_SNAKE_CASE , num_parallel_calls=_SCREAMING_SNAKE_CASE ) snake_case_ = dataset.prefetch(_SCREAMING_SNAKE_CASE ) return dataset def _a ( _SCREAMING_SNAKE_CASE ) -> List[Any]: if not args.no_tpu: snake_case_ = initialize_tpu(_SCREAMING_SNAKE_CASE ) snake_case_ = tf.distribute.TPUStrategy(_SCREAMING_SNAKE_CASE ) else: snake_case_ = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" ) if args.bfloataa: tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" ) snake_case_ = AutoTokenizer.from_pretrained(args.tokenizer ) snake_case_ = AutoConfig.from_pretrained(args.pretrained_model_config ) snake_case_ = tokenizer.vocab_size snake_case_ = tf.io.gfile.glob(os.path.join(args.train_dataset , """*.tfrecord""" ) ) if not training_records: raise ValueError(f"""No .tfrecord files found in {args.train_dataset}.""" ) snake_case_ = tf.io.gfile.glob(os.path.join(args.eval_dataset , """*.tfrecord""" ) ) if not eval_records: raise ValueError(f"""No .tfrecord files found in {args.eval_dataset}.""" ) snake_case_ = count_samples(_SCREAMING_SNAKE_CASE ) snake_case_ = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync) snake_case_ = steps_per_epoch * args.num_epochs with strategy.scope(): snake_case_ = TFAutoModelForMaskedLM.from_config(_SCREAMING_SNAKE_CASE ) model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built snake_case_ , snake_case_ = create_optimizer( num_train_steps=_SCREAMING_SNAKE_CASE , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , ) # Transformers models compute the right loss for their task by default when labels are passed, and will # use this for training unless you specify your own loss function in compile(). model.compile(optimizer=_SCREAMING_SNAKE_CASE , metrics=["""accuracy"""] ) def decode_fn(_SCREAMING_SNAKE_CASE ): snake_case_ = { """input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), """attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), } return tf.io.parse_single_example(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can # use their methods in our data pipeline. snake_case_ = DataCollatorForLanguageModeling( tokenizer=_SCREAMING_SNAKE_CASE , mlm_probability=args.mlm_probability , mlm=_SCREAMING_SNAKE_CASE , return_tensors="""tf""" ) def mask_with_collator(_SCREAMING_SNAKE_CASE ): # TF really needs an isin() function snake_case_ = ( ~tf.cast(batch["""attention_mask"""] , tf.bool ) | (batch["""input_ids"""] == tokenizer.cls_token_id) | (batch["""input_ids"""] == tokenizer.sep_token_id) ) snake_case_ , snake_case_ = data_collator.tf_mask_tokens( batch["""input_ids"""] , vocab_size=len(_SCREAMING_SNAKE_CASE ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=_SCREAMING_SNAKE_CASE , ) return batch snake_case_ = args.per_replica_batch_size * strategy.num_replicas_in_sync snake_case_ = prepare_dataset( _SCREAMING_SNAKE_CASE , decode_fn=_SCREAMING_SNAKE_CASE , mask_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , shuffle=_SCREAMING_SNAKE_CASE , shuffle_buffer_size=args.shuffle_buffer_size , ) snake_case_ = prepare_dataset( _SCREAMING_SNAKE_CASE , decode_fn=_SCREAMING_SNAKE_CASE , mask_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , shuffle=_SCREAMING_SNAKE_CASE , ) snake_case_ = [] if args.hub_model_id: callbacks.append( PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=_SCREAMING_SNAKE_CASE ) ) model.fit( _SCREAMING_SNAKE_CASE , validation_data=_SCREAMING_SNAKE_CASE , epochs=args.num_epochs , callbacks=_SCREAMING_SNAKE_CASE , ) model.save_pretrained(args.output_dir ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Union[str, Any] = parse_args() main(args)
347
1
'''simple docstring''' import copy import json import os import tempfile from transformers import is_torch_available from .test_configuration_utils import config_common_kwargs class a__ ( lowerCamelCase_ ): def __init__( self , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase=None , **_UpperCamelCase ): """simple docstring""" _lowercase : Dict = parent _lowercase : int = config_class _lowercase : Optional[int] = has_text_modality _lowercase : str = kwargs _lowercase : List[Any] = common_properties def _lowerCamelCase ( self ): """simple docstring""" _lowercase : int = self.config_class(**self.inputs_dict ) _lowercase : Optional[Any] = ( ["hidden_size", "num_attention_heads", "num_hidden_layers"] if self.common_properties is None else self.common_properties ) # Add common fields for text models if self.has_text_modality: common_properties.extend(["vocab_size"] ) # Test that config has the common properties as getters for prop in common_properties: self.parent.assertTrue(hasattr(_UpperCamelCase , _UpperCamelCase ) , msg=f'''`{prop}` does not exist''' ) # Test that config has the common properties as setter for idx, name in enumerate(_UpperCamelCase ): try: setattr(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) self.parent.assertEqual( getattr(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase , msg=f'''`{name} value {idx} expected, but was {getattr(_UpperCamelCase , _UpperCamelCase )}''' ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass # Test if config class can be called with Config(prop_name=..) for idx, name in enumerate(_UpperCamelCase ): try: _lowercase : List[Any] = self.config_class(**{name: idx} ) self.parent.assertEqual( getattr(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase , msg=f'''`{name} value {idx} expected, but was {getattr(_UpperCamelCase , _UpperCamelCase )}''' ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass def _lowerCamelCase ( self ): """simple docstring""" _lowercase : Tuple = self.config_class(**self.inputs_dict ) _lowercase : Tuple = json.loads(config.to_json_string() ) for key, value in self.inputs_dict.items(): self.parent.assertEqual(obj[key] , _UpperCamelCase ) def _lowerCamelCase ( self ): """simple docstring""" _lowercase : int = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _lowercase : Optional[Any] = os.path.join(_UpperCamelCase , "config.json" ) config_first.to_json_file(_UpperCamelCase ) _lowercase : List[Any] = self.config_class.from_json_file(_UpperCamelCase ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def _lowerCamelCase ( self ): """simple docstring""" _lowercase : int = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: config_first.save_pretrained(_UpperCamelCase ) _lowercase : Optional[int] = self.config_class.from_pretrained(_UpperCamelCase ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def _lowerCamelCase ( self ): """simple docstring""" _lowercase : List[str] = self.config_class(**self.inputs_dict ) _lowercase : Dict = "test" with tempfile.TemporaryDirectory() as tmpdirname: _lowercase : str = os.path.join(_UpperCamelCase , _UpperCamelCase ) config_first.save_pretrained(_UpperCamelCase ) _lowercase : Dict = self.config_class.from_pretrained(_UpperCamelCase , subfolder=_UpperCamelCase ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def _lowerCamelCase ( self ): """simple docstring""" _lowercase : Any = self.config_class(**self.inputs_dict , num_labels=5 ) self.parent.assertEqual(len(config.idalabel ) , 5 ) self.parent.assertEqual(len(config.labelaid ) , 5 ) _lowercase : Tuple = 3 self.parent.assertEqual(len(config.idalabel ) , 3 ) self.parent.assertEqual(len(config.labelaid ) , 3 ) def _lowerCamelCase ( self ): """simple docstring""" if self.config_class.is_composition: return _lowercase : Tuple = self.config_class() self.parent.assertIsNotNone(_UpperCamelCase ) def _lowerCamelCase ( self ): """simple docstring""" _lowercase : Dict = copy.deepcopy(_UpperCamelCase ) _lowercase : int = self.config_class(**_UpperCamelCase ) _lowercase : Optional[Any] = [] for key, value in config_common_kwargs.items(): if key == "torch_dtype": if not is_torch_available(): continue else: import torch if config.torch_dtype != torch.floataa: wrong_values.append(("torch_dtype", config.torch_dtype, torch.floataa) ) elif getattr(_UpperCamelCase , _UpperCamelCase ) != value: wrong_values.append((key, getattr(_UpperCamelCase , _UpperCamelCase ), value) ) if len(_UpperCamelCase ) > 0: _lowercase : Optional[Any] = "\n".join([f'''- {v[0]}: got {v[1]} instead of {v[2]}''' for v in wrong_values] ) raise ValueError(f'''The following keys were not properly set in the config:\n{errors}''' ) def _lowerCamelCase ( self ): """simple docstring""" self.create_and_test_config_common_properties() self.create_and_test_config_to_json_string() self.create_and_test_config_to_json_file() self.create_and_test_config_from_and_save_pretrained() self.create_and_test_config_from_and_save_pretrained_subfolder() self.create_and_test_config_with_num_labels() self.check_config_can_be_init_without_params() self.check_config_arguments_init()
199
'''simple docstring''' def _A ( snake_case , snake_case ) -> int: return int((input_a, input_a).count(0 ) != 0 ) def _A ( ) -> None: assert nand_gate(0 , 0 ) == 1 assert nand_gate(0 , 1 ) == 1 assert nand_gate(1 , 0 ) == 1 assert nand_gate(1 , 1 ) == 0 if __name__ == "__main__": print(nand_gate(0, 0)) print(nand_gate(0, 1)) print(nand_gate(1, 0)) print(nand_gate(1, 1))
199
1
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class lowerCAmelCase__ ( unittest.TestCase): '''simple docstring''' @slow def _lowerCamelCase ( self) -> Union[str, Any]: _A : List[str] = XLMRobertaModel.from_pretrained("xlm-roberta-base") _A : str = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]]) # The dog is cute and lives in the garden house _A : List[Any] = torch.Size((1, 1_2, 7_6_8)) # batch_size, sequence_length, embedding_vector_dim _A : List[Any] = torch.tensor( [[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]]) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): _A : List[Any] = model(__lowerCamelCase)["last_hidden_state"].detach() self.assertEqual(output.shape , __lowerCamelCase) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , __lowerCamelCase , atol=1e-3)) @slow def _lowerCamelCase ( self) -> List[Any]: _A : Dict = XLMRobertaModel.from_pretrained("xlm-roberta-large") _A : int = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]]) # The dog is cute and lives in the garden house _A : List[Any] = torch.Size((1, 1_2, 1_0_2_4)) # batch_size, sequence_length, embedding_vector_dim _A : str = torch.tensor( [[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]]) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): _A : int = model(__lowerCamelCase)["last_hidden_state"].detach() self.assertEqual(output.shape , __lowerCamelCase) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , __lowerCamelCase , atol=1e-3))
11
import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNetaDConditionModel, ) from diffusers.utils import load_image, slow from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __A ( a , a , unittest.TestCase ): """simple docstring""" UpperCamelCase__ : Optional[Any] =StableDiffusionDiffEditPipeline UpperCamelCase__ : str =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""height""", """width""", """image"""} | {"""image_latents"""} UpperCamelCase__ : Optional[Any] =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"""image"""} | {"""image_latents"""} UpperCamelCase__ : Dict =frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess UpperCamelCase__ : Any =frozenset([] ) def __lowercase ( self ): """simple docstring""" torch.manual_seed(0 ) __UpperCamelCase : Dict =UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=lowerCamelCase__ , ) __UpperCamelCase : List[str] =DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowerCamelCase__ , set_alpha_to_one=lowerCamelCase__ , ) __UpperCamelCase : Union[str, Any] =DDIMInverseScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowerCamelCase__ , set_alpha_to_zero=lowerCamelCase__ , ) torch.manual_seed(0 ) __UpperCamelCase : Optional[int] =AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) __UpperCamelCase : Tuple =CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , ) __UpperCamelCase : Any =CLIPTextModel(lowerCamelCase__ ) __UpperCamelCase : int =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) __UpperCamelCase : Union[str, Any] ={ 'unet': unet, 'scheduler': scheduler, 'inverse_scheduler': inverse_scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__=0 ): """simple docstring""" __UpperCamelCase : int =floats_tensor((1, 16, 16) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ ) __UpperCamelCase : Any =floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ ) if str(lowerCamelCase__ ).startswith('mps' ): __UpperCamelCase : Any =torch.manual_seed(lowerCamelCase__ ) else: __UpperCamelCase : Optional[int] =torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ ) __UpperCamelCase : Dict ={ 'prompt': 'a dog and a newt', 'mask_image': mask, 'image_latents': latents, 'generator': generator, 'num_inference_steps': 2, 'inpaint_strength': 1.0, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__=0 ): """simple docstring""" __UpperCamelCase : Tuple =floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ ) __UpperCamelCase : int =image.cpu().permute(0 , 2 , 3 , 1 )[0] __UpperCamelCase : Optional[Any] =Image.fromarray(np.uinta(lowerCamelCase__ ) ).convert('RGB' ) if str(lowerCamelCase__ ).startswith('mps' ): __UpperCamelCase : List[Any] =torch.manual_seed(lowerCamelCase__ ) else: __UpperCamelCase : Any =torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ ) __UpperCamelCase : Optional[int] ={ 'image': image, 'source_prompt': 'a cat and a frog', 'target_prompt': 'a dog and a newt', 'generator': generator, 'num_inference_steps': 2, 'num_maps_per_mask': 2, 'mask_encode_strength': 1.0, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__=0 ): """simple docstring""" __UpperCamelCase : str =floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ ) __UpperCamelCase : Any =image.cpu().permute(0 , 2 , 3 , 1 )[0] __UpperCamelCase : int =Image.fromarray(np.uinta(lowerCamelCase__ ) ).convert('RGB' ) if str(lowerCamelCase__ ).startswith('mps' ): __UpperCamelCase : Any =torch.manual_seed(lowerCamelCase__ ) else: __UpperCamelCase : int =torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ ) __UpperCamelCase : Optional[int] ={ 'image': image, 'prompt': 'a cat and a frog', 'generator': generator, 'num_inference_steps': 2, 'inpaint_strength': 1.0, 'guidance_scale': 6.0, 'decode_latents': True, 'output_type': 'numpy', } return inputs def __lowercase ( self ): """simple docstring""" if not hasattr(self.pipeline_class , '_optional_components' ): return __UpperCamelCase : Optional[Any] =self.get_dummy_components() __UpperCamelCase : List[str] =self.pipeline_class(**lowerCamelCase__ ) pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} ) __UpperCamelCase : Union[str, Any] =self.get_dummy_inputs(lowerCamelCase__ ) __UpperCamelCase : List[Any] =pipe(**lowerCamelCase__ )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(lowerCamelCase__ ) __UpperCamelCase : Tuple =self.pipeline_class.from_pretrained(lowerCamelCase__ ) pipe_loaded.to(lowerCamelCase__ ) pipe_loaded.set_progress_bar_config(disable=lowerCamelCase__ ) for optional_component in pipe._optional_components: self.assertTrue( getattr(lowerCamelCase__ , lowerCamelCase__ ) is None , f'`{optional_component}` did not stay set to None after loading.' , ) __UpperCamelCase : str =self.get_dummy_inputs(lowerCamelCase__ ) __UpperCamelCase : Union[str, Any] =pipe_loaded(**lowerCamelCase__ )[0] __UpperCamelCase : Tuple =np.abs(output - output_loaded ).max() self.assertLess(lowerCamelCase__ , 1E-4 ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Any ='cpu' __UpperCamelCase : Union[str, Any] =self.get_dummy_components() __UpperCamelCase : Any =self.pipeline_class(**lowerCamelCase__ ) pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __UpperCamelCase : int =self.get_dummy_mask_inputs(lowerCamelCase__ ) __UpperCamelCase : Union[str, Any] =pipe.generate_mask(**lowerCamelCase__ ) __UpperCamelCase : int =mask[0, -3:, -3:] self.assertEqual(mask.shape , (1, 16, 16) ) __UpperCamelCase : Tuple =np.array([0] * 9 ) __UpperCamelCase : str =np.abs(mask_slice.flatten() - expected_slice ).max() self.assertLessEqual(lowerCamelCase__ , 1E-3 ) self.assertEqual(mask[0, -3, -4] , 0 ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : int ='cpu' __UpperCamelCase : Union[str, Any] =self.get_dummy_components() __UpperCamelCase : Optional[Any] =self.pipeline_class(**lowerCamelCase__ ) pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __UpperCamelCase : Dict =self.get_dummy_inversion_inputs(lowerCamelCase__ ) __UpperCamelCase : List[Any] =pipe.invert(**lowerCamelCase__ ).images __UpperCamelCase : Optional[Any] =image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) __UpperCamelCase : List[str] =np.array( [0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , ) __UpperCamelCase : int =np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(lowerCamelCase__ , 1E-3 ) def __lowercase ( self ): """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=5E-3 ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : List[str] ='cpu' __UpperCamelCase : int =self.get_dummy_components() __UpperCamelCase : str ={'beta_start': 0.00_085, 'beta_end': 0.012, 'beta_schedule': 'scaled_linear'} __UpperCamelCase : str =DPMSolverMultistepScheduler(**lowerCamelCase__ ) __UpperCamelCase : Dict =DPMSolverMultistepInverseScheduler(**lowerCamelCase__ ) __UpperCamelCase : Any =self.pipeline_class(**lowerCamelCase__ ) pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __UpperCamelCase : Tuple =self.get_dummy_inversion_inputs(lowerCamelCase__ ) __UpperCamelCase : str =pipe.invert(**lowerCamelCase__ ).images __UpperCamelCase : List[Any] =image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) __UpperCamelCase : List[str] =np.array( [0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , ) __UpperCamelCase : Optional[Any] =np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(lowerCamelCase__ , 1E-3 ) @require_torch_gpu @slow class __A ( unittest.TestCase ): """simple docstring""" def __lowercase ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def __lowercase ( cls ): """simple docstring""" __UpperCamelCase : Optional[int] =load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' ) __UpperCamelCase : Union[str, Any] =raw_image.convert('RGB' ).resize((768, 768) ) __UpperCamelCase : List[Any] =raw_image def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Optional[int] =torch.manual_seed(0 ) __UpperCamelCase : Dict =StableDiffusionDiffEditPipeline.from_pretrained( 'stabilityai/stable-diffusion-2-1' , safety_checker=lowerCamelCase__ , torch_dtype=torch.floataa ) __UpperCamelCase : List[str] =DDIMScheduler.from_config(pipe.scheduler.config ) __UpperCamelCase : List[str] =DDIMInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __UpperCamelCase : List[str] ='a bowl of fruit' __UpperCamelCase : Dict ='a bowl of pears' __UpperCamelCase : Tuple =pipe.generate_mask( image=self.raw_image , source_prompt=lowerCamelCase__ , target_prompt=lowerCamelCase__ , generator=lowerCamelCase__ , ) __UpperCamelCase : int =pipe.invert( prompt=lowerCamelCase__ , image=self.raw_image , inpaint_strength=0.7 , generator=lowerCamelCase__ ).latents __UpperCamelCase : Dict =pipe( prompt=lowerCamelCase__ , mask_image=lowerCamelCase__ , image_latents=lowerCamelCase__ , generator=lowerCamelCase__ , negative_prompt=lowerCamelCase__ , inpaint_strength=0.7 , output_type='numpy' , ).images[0] __UpperCamelCase : str =( np.array( load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/diffedit/pears.png' ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5E-1 def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Any =torch.manual_seed(0 ) __UpperCamelCase : List[Any] =StableDiffusionDiffEditPipeline.from_pretrained( 'stabilityai/stable-diffusion-2-1' , safety_checker=lowerCamelCase__ , torch_dtype=torch.floataa ) __UpperCamelCase : Optional[Any] =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) __UpperCamelCase : Optional[int] =DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __UpperCamelCase : Optional[Any] ='a bowl of fruit' __UpperCamelCase : int ='a bowl of pears' __UpperCamelCase : str =pipe.generate_mask( image=self.raw_image , source_prompt=lowerCamelCase__ , target_prompt=lowerCamelCase__ , generator=lowerCamelCase__ , ) __UpperCamelCase : List[str] =pipe.invert( prompt=lowerCamelCase__ , image=self.raw_image , inpaint_strength=0.7 , generator=lowerCamelCase__ , num_inference_steps=25 , ).latents __UpperCamelCase : List[str] =pipe( prompt=lowerCamelCase__ , mask_image=lowerCamelCase__ , image_latents=lowerCamelCase__ , generator=lowerCamelCase__ , negative_prompt=lowerCamelCase__ , inpaint_strength=0.7 , num_inference_steps=25 , output_type='numpy' , ).images[0] __UpperCamelCase : Tuple =( np.array( load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/diffedit/pears.png' ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5E-1
71
0
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL __lowerCAmelCase = logging.get_logger(__name__) class UpperCAmelCase__ ( __snake_case ): """simple docstring""" __UpperCAmelCase : Tuple = ['pixel_values'] def __init__( self : Optional[int] ,_a : List[str] = True ,_a : Any = None ,_a : List[str] = PIL.Image.BICUBIC ,_a : str = True ,_a : Any = None ,_a : Optional[int] = 1 / 255 ,_a : int = True ,_a : List[str] = True ,_a : List[Any] = None ,_a : Any = None ,**_a : Optional[int] ,): '''simple docstring''' super().__init__(**_a ) _a : Dict = size if size is not None else {'height': 256, 'width': 256} _a : List[Any] = get_size_dict(_a ) _a : Tuple = crop_size if crop_size is not None else {'height': 224, 'width': 224} _a : Optional[int] = get_size_dict(_a ,param_name='crop_size' ) _a : List[str] = do_resize _a : Any = size _a : Union[str, Any] = resample _a : Tuple = do_center_crop _a : Dict = crop_size _a : Tuple = do_rescale _a : int = rescale_factor _a : Optional[Any] = do_normalize _a : List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _a : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD def __lowercase ( self : List[Any] ,_a : str ,_a : Any ,_a : Dict = PIL.Image.BICUBIC ,_a : str = None ,**_a : Dict ,): '''simple docstring''' _a : Dict = get_size_dict(_a ) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}""" ) return resize( _a ,size=(size['height'], size['width']) ,resample=_a ,data_format=_a ,**_a ) def __lowercase ( self : Tuple ,_a : List[Any] ,_a : Dict ,_a : List[Any] = None ,**_a : List[str] ,): '''simple docstring''' _a : List[Any] = get_size_dict(_a ) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}""" ) return center_crop(_a ,size=(size['height'], size['width']) ,data_format=_a ,**_a ) def __lowercase ( self : Union[str, Any] ,_a : List[Any] ,_a : Any ,_a : List[Any] = None ,**_a : List[str] ,): '''simple docstring''' return rescale(_a ,scale=_a ,data_format=_a ,**_a ) def __lowercase ( self : Any ,_a : Optional[Any] ,_a : Union[str, Any] ,_a : Any ,_a : Optional[int] = None ,**_a : Optional[Any] ,): '''simple docstring''' return normalize(_a ,mean=_a ,std=_a ,data_format=_a ,**_a ) def __lowercase ( self : str ,_a : Optional[Any] ,_a : str = None ,_a : Union[str, Any] = None ,_a : Tuple=None ,_a : Optional[Any] = None ,_a : List[Any] = None ,_a : str = None ,_a : List[Any] = None ,_a : Dict = None ,_a : Tuple = None ,_a : Tuple = None ,_a : List[Any] = None ,_a : Optional[int] = ChannelDimension.FIRST ,**_a : Tuple ,): '''simple docstring''' _a : Any = do_resize if do_resize is not None else self.do_resize _a : Optional[int] = resample if resample is not None else self.resample _a : Any = do_center_crop if do_center_crop is not None else self.do_center_crop _a : str = do_rescale if do_rescale is not None else self.do_rescale _a : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor _a : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize _a : int = image_mean if image_mean is not None else self.image_mean _a : Tuple = image_std if image_std is not None else self.image_std _a : int = size if size is not None else self.size _a : Optional[Any] = get_size_dict(_a ) _a : Tuple = crop_size if crop_size is not None else self.crop_size _a : Optional[Any] = get_size_dict(_a ,param_name='crop_size' ) _a : List[Any] = make_list_of_images(_a ) if not valid_images(_a ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. _a : Optional[int] = [to_numpy_array(_a ) for image in images] if do_resize: _a : Optional[Any] = [self.resize(image=_a ,size=_a ,resample=_a ) for image in images] if do_center_crop: _a : Optional[Any] = [self.center_crop(image=_a ,size=_a ) for image in images] if do_rescale: _a : Dict = [self.rescale(image=_a ,scale=_a ) for image in images] if do_normalize: _a : str = [self.normalize(image=_a ,mean=_a ,std=_a ) for image in images] _a : List[str] = [to_channel_dimension_format(_a ,_a ) for image in images] _a : Optional[Any] = {'pixel_values': images} return BatchFeature(data=_a ,tensor_type=_a )
361
'''simple docstring''' import inspect import unittest from transformers import ViTMSNConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMSNForImageClassification, ViTMSNModel from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class UpperCAmelCase__ : """simple docstring""" def __init__( self : int ,_a : List[str] ,_a : Optional[Any]=13 ,_a : str=30 ,_a : str=2 ,_a : Union[str, Any]=3 ,_a : Optional[Any]=True ,_a : int=True ,_a : Union[str, Any]=32 ,_a : List[Any]=5 ,_a : Union[str, Any]=4 ,_a : int=37 ,_a : Any="gelu" ,_a : Union[str, Any]=0.1 ,_a : str=0.1 ,_a : List[str]=10 ,_a : Dict=0.02 ,_a : Tuple=None ,): '''simple docstring''' _a : Any = parent _a : int = batch_size _a : List[Any] = image_size _a : Optional[int] = patch_size _a : List[str] = num_channels _a : Dict = is_training _a : Dict = use_labels _a : Optional[Any] = hidden_size _a : str = num_hidden_layers _a : Optional[int] = num_attention_heads _a : Dict = intermediate_size _a : Union[str, Any] = hidden_act _a : List[str] = hidden_dropout_prob _a : Any = attention_probs_dropout_prob _a : List[str] = type_sequence_label_size _a : int = initializer_range _a : List[Any] = scope # in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) _a : Union[str, Any] = (image_size // patch_size) ** 2 _a : Tuple = num_patches + 1 def __lowercase ( self : Any ): '''simple docstring''' _a : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _a : str = None if self.use_labels: _a : Tuple = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) _a : List[str] = self.get_config() return config, pixel_values, labels def __lowercase ( self : Optional[int] ): '''simple docstring''' return ViTMSNConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,initializer_range=self.initializer_range ,) def __lowercase ( self : Tuple ,_a : Any ,_a : List[Any] ,_a : int ): '''simple docstring''' _a : str = ViTMSNModel(config=_a ) model.to(_a ) model.eval() _a : int = model(_a ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def __lowercase ( self : List[Any] ,_a : str ,_a : Tuple ,_a : Dict ): '''simple docstring''' _a : Tuple = self.type_sequence_label_size _a : int = ViTMSNForImageClassification(_a ) model.to(_a ) model.eval() _a : Dict = model(_a ,labels=_a ) print('Pixel and labels shape: {pixel_values.shape}, {labels.shape}' ) print('Labels: {labels}' ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) # test greyscale images _a : int = 1 _a : Optional[Any] = ViTMSNForImageClassification(_a ) model.to(_a ) model.eval() _a : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _a : Optional[int] = model(_a ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) def __lowercase ( self : Any ): '''simple docstring''' _a : Optional[int] = self.prepare_config_and_inputs() _a, _a, _a : int = config_and_inputs _a : List[Any] = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ): """simple docstring""" __UpperCAmelCase : Tuple = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else () __UpperCAmelCase : List[Any] = ( {'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification} if is_torch_available() else {} ) __UpperCAmelCase : str = False __UpperCAmelCase : Optional[Any] = False __UpperCAmelCase : List[str] = False __UpperCAmelCase : int = False def __lowercase ( self : Optional[int] ): '''simple docstring''' _a : List[str] = ViTMSNModelTester(self ) _a : Optional[int] = ConfigTester(self ,config_class=_a ,has_text_modality=_a ,hidden_size=37 ) def __lowercase ( self : str ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='ViTMSN does not use inputs_embeds' ) def __lowercase ( self : List[str] ): '''simple docstring''' pass def __lowercase ( self : Union[str, Any] ): '''simple docstring''' _a, _a : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a : List[Any] = model_class(_a ) self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) ) _a : Dict = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_a ,nn.Linear ) ) def __lowercase ( self : Any ): '''simple docstring''' _a, _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a : List[str] = model_class(_a ) _a : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _a : List[Any] = [*signature.parameters.keys()] _a : int = ['pixel_values'] self.assertListEqual(arg_names[:1] ,_a ) def __lowercase ( self : List[str] ): '''simple docstring''' _a : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_a ) def __lowercase ( self : Optional[Any] ): '''simple docstring''' _a : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_a ) @slow def __lowercase ( self : int ): '''simple docstring''' for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _a : Dict = ViTMSNModel.from_pretrained(_a ) self.assertIsNotNone(_a ) def UpperCAmelCase_ (): """simple docstring""" _a : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class UpperCAmelCase__ ( unittest.TestCase ): """simple docstring""" @cached_property def __lowercase ( self : Union[str, Any] ): '''simple docstring''' return ViTImageProcessor.from_pretrained('facebook/vit-msn-small' ) if is_vision_available() else None @slow def __lowercase ( self : Union[str, Any] ): '''simple docstring''' torch.manual_seed(2 ) _a : List[str] = ViTMSNForImageClassification.from_pretrained('facebook/vit-msn-small' ).to(_a ) _a : List[str] = self.default_image_processor _a : int = prepare_img() _a : Tuple = image_processor(images=_a ,return_tensors='pt' ).to(_a ) # forward pass with torch.no_grad(): _a : Optional[int] = model(**_a ) # verify the logits _a : Union[str, Any] = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape ,_a ) _a : List[Any] = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(_a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_a ,atol=1E-4 ) )
5
0
def __lowerCamelCase ( __a :str ) -> str: """simple docstring""" A__ = """""" for ch in key: if ch == " " or ch not in key_no_dups and ch.isalpha(): key_no_dups += ch return key_no_dups def __lowerCamelCase ( __a :str ) -> dict[str, str]: """simple docstring""" A__ = [chr(i + 6_5 ) for i in range(2_6 )] # Remove duplicate characters from key A__ = remove_duplicates(key.upper() ) A__ = len(__a ) # First fill cipher with key characters A__ = {alphabet[i]: char for i, char in enumerate(__a )} # Then map remaining characters in alphabet to # the alphabet from the beginning for i in range(len(__a ) , 2_6 ): A__ = alphabet[i - offset] # Ensure we are not mapping letters to letters previously mapped while char in key: offset -= 1 A__ = alphabet[i - offset] A__ = char return cipher_alphabet def __lowerCamelCase ( __a :str , __a :dict[str, str] ) -> str: """simple docstring""" return "".join(cipher_map.get(__a , __a ) for ch in message.upper() ) def __lowerCamelCase ( __a :str , __a :dict[str, str] ) -> str: """simple docstring""" A__ = {v: k for k, v in cipher_map.items()} return "".join(rev_cipher_map.get(__a , __a ) for ch in message.upper() ) def __lowerCamelCase ( ) -> None: """simple docstring""" A__ = input("""Enter message to encode or decode: """ ).strip() A__ = input("""Enter keyword: """ ).strip() A__ = input("""Encipher or decipher? E/D:""" ).strip()[0].lower() try: A__ = {"""e""": encipher, """d""": decipher}[option] except KeyError: raise KeyError("""invalid input option""" ) A__ = create_cipher_map(__a ) print(func(__a , __a ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
274
from datetime import datetime import requests from bsa import BeautifulSoup if __name__ == "__main__": A : List[str] = input('''Enter image url: ''').strip() print(F'''Downloading image from {url} ...''') A : Any = BeautifulSoup(requests.get(url).content, '''html.parser''') # The image URL is in the content field of the first meta tag with property og:image A : List[Any] = soup.find('''meta''', {'''property''': '''og:image'''})['''content'''] A : Dict = requests.get(image_url).content A : Tuple = F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg''' with open(file_name, '''wb''') as fp: fp.write(image_data) print(F'''Done. Image saved to disk as {file_name}.''')
274
1
"""simple docstring""" import argparse import json from typing import List from ltp import LTP from transformers import BertTokenizer def _snake_case ( _snake_case : Dict ): # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0X4e00 and cp <= 0X9fff) or (cp >= 0X3400 and cp <= 0X4dbf) # or (cp >= 0X2_0000 and cp <= 0X2_a6df) # or (cp >= 0X2_a700 and cp <= 0X2_b73f) # or (cp >= 0X2_b740 and cp <= 0X2_b81f) # or (cp >= 0X2_b820 and cp <= 0X2_ceaf) # or (cp >= 0Xf900 and cp <= 0Xfaff) or (cp >= 0X2_f800 and cp <= 0X2_fa1f) # ): # return True return False def _snake_case ( _snake_case : str ): # word like '180' or '身高' or '神' for char in word: lowerCAmelCase : str = ord(_snake_case ) if not _is_chinese_char(_snake_case ): return 0 return 1 def _snake_case ( _snake_case : List[str] ): lowerCAmelCase : List[Any] = set() for token in tokens: lowerCAmelCase : Union[str, Any] = len(_snake_case ) > 1 and is_chinese(_snake_case ) if chinese_word: word_set.add(_snake_case ) lowerCAmelCase : List[str] = list(_snake_case ) return word_list def _snake_case ( _snake_case : List[str] , _snake_case : set() ): if not chinese_word_set: return bert_tokens lowerCAmelCase : List[Any] = max([len(_snake_case ) for w in chinese_word_set] ) lowerCAmelCase : Optional[Any] = bert_tokens lowerCAmelCase, lowerCAmelCase : Any = 0, len(_snake_case ) while start < end: lowerCAmelCase : str = True if is_chinese(bert_word[start] ): lowerCAmelCase : List[Any] = min(end - start , _snake_case ) for i in range(_snake_case , 1 , -1 ): lowerCAmelCase : str = ''''''.join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): lowerCAmelCase : Optional[Any] = '''##''' + bert_word[j] lowerCAmelCase : Union[str, Any] = start + i lowerCAmelCase : Optional[Any] = False break if single_word: start += 1 return bert_word def _snake_case ( _snake_case : List[str] , _snake_case : LTP , _snake_case : BertTokenizer ): lowerCAmelCase : Optional[int] = [] for i in range(0 , len(_snake_case ) , 100 ): lowerCAmelCase : Optional[int] = ltp_tokenizer.seg(lines[i : i + 100] )[0] lowerCAmelCase : Union[str, Any] = [get_chinese_word(_snake_case ) for r in res] ltp_res.extend(_snake_case ) assert len(_snake_case ) == len(_snake_case ) lowerCAmelCase : int = [] for i in range(0 , len(_snake_case ) , 100 ): lowerCAmelCase : Optional[Any] = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_snake_case , truncation=_snake_case , max_length=512 ) bert_res.extend(res['''input_ids'''] ) assert len(_snake_case ) == len(_snake_case ) lowerCAmelCase : Union[str, Any] = [] for input_ids, chinese_word in zip(_snake_case , _snake_case ): lowerCAmelCase : Optional[int] = [] for id in input_ids: lowerCAmelCase : Union[str, Any] = bert_tokenizer._convert_id_to_token(_snake_case ) input_tokens.append(_snake_case ) lowerCAmelCase : Any = add_sub_symbol(_snake_case , _snake_case ) lowerCAmelCase : Union[str, Any] = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(_snake_case ): if token[:2] == "##": lowerCAmelCase : Any = token[2:] # save chinese tokens' pos if len(_snake_case ) == 1 and _is_chinese_char(ord(_snake_case ) ): ref_id.append(_snake_case ) ref_ids.append(_snake_case ) assert len(_snake_case ) == len(_snake_case ) return ref_ids def _snake_case ( _snake_case : Dict ): # For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm) # If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp) with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f: lowerCAmelCase : List[str] = f.readlines() lowerCAmelCase : Union[str, Any] = [line.strip() for line in data if len(_snake_case ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' lowerCAmelCase : List[str] = LTP(args.ltp ) # faster in GPU device lowerCAmelCase : Any = BertTokenizer.from_pretrained(args.bert ) lowerCAmelCase : int = prepare_ref(_snake_case , _snake_case , _snake_case ) with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f: lowerCAmelCase : List[Any] = [json.dumps(_snake_case ) + '''\n''' for ref in ref_ids] f.writelines(_snake_case ) if __name__ == "__main__": snake_case__ : Optional[int] = argparse.ArgumentParser(description='''prepare_chinese_ref''') parser.add_argument( '''--file_name''', type=str, default='''./resources/chinese-demo.txt''', help='''file need process, same as training data in lm''', ) parser.add_argument( '''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path''' ) parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''') parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''') snake_case__ : int = parser.parse_args() main(args)
314
"""simple docstring""" import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class snake_case_( a__ ): __UpperCamelCase = (DDPMScheduler,) def lowerCamelCase__ ( self : List[Any] , **UpperCamelCase_ : Union[str, Any] ): lowerCAmelCase : Optional[Any] = { '''num_train_timesteps''': 1_0_0_0, '''beta_start''': 0.0_001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''variance_type''': '''fixed_small''', '''clip_sample''': True, } config.update(**UpperCamelCase_ ) return config def lowerCamelCase__ ( self : Optional[int] ): for timesteps in [1, 5, 1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=UpperCamelCase_ ) def lowerCamelCase__ ( self : Tuple ): for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=UpperCamelCase_ , beta_end=UpperCamelCase_ ) def lowerCamelCase__ ( self : str ): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[Any] ): for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[int] ): for clip_sample in [True, False]: self.check_over_configs(clip_sample=UpperCamelCase_ ) def lowerCamelCase__ ( self : Any ): self.check_over_configs(thresholding=UpperCamelCase_ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=UpperCamelCase_ , prediction_type=UpperCamelCase_ , sample_max_value=UpperCamelCase_ , ) def lowerCamelCase__ ( self : Tuple ): for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=UpperCamelCase_ ) def lowerCamelCase__ ( self : str ): for t in [0, 5_0_0, 9_9_9]: self.check_over_forward(time_step=UpperCamelCase_ ) def lowerCamelCase__ ( self : int ): lowerCAmelCase : str = self.scheduler_classes[0] lowerCAmelCase : Dict = self.get_scheduler_config() lowerCAmelCase : Dict = scheduler_class(**UpperCamelCase_ ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.00_979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1E-5 def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : List[Any] = self.scheduler_classes[0] lowerCAmelCase : List[Any] = self.get_scheduler_config() lowerCAmelCase : List[str] = scheduler_class(**UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = len(UpperCamelCase_ ) lowerCAmelCase : List[str] = self.dummy_model() lowerCAmelCase : Union[str, Any] = self.dummy_sample_deter lowerCAmelCase : List[Any] = torch.manual_seed(0 ) for t in reversed(range(UpperCamelCase_ ) ): # 1. predict noise residual lowerCAmelCase : Optional[int] = model(UpperCamelCase_ , UpperCamelCase_ ) # 2. predict previous mean of sample x_t-1 lowerCAmelCase : Optional[Any] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance lowerCAmelCase : Union[str, Any] = pred_prev_sample lowerCAmelCase : str = torch.sum(torch.abs(UpperCamelCase_ ) ) lowerCAmelCase : int = torch.mean(torch.abs(UpperCamelCase_ ) ) assert abs(result_sum.item() - 258.9_606 ) < 1E-2 assert abs(result_mean.item() - 0.3_372 ) < 1E-3 def lowerCamelCase__ ( self : Any ): lowerCAmelCase : Optional[int] = self.scheduler_classes[0] lowerCAmelCase : Any = self.get_scheduler_config(prediction_type='''v_prediction''' ) lowerCAmelCase : Tuple = scheduler_class(**UpperCamelCase_ ) lowerCAmelCase : Dict = len(UpperCamelCase_ ) lowerCAmelCase : Any = self.dummy_model() lowerCAmelCase : Any = self.dummy_sample_deter lowerCAmelCase : List[Any] = torch.manual_seed(0 ) for t in reversed(range(UpperCamelCase_ ) ): # 1. predict noise residual lowerCAmelCase : str = model(UpperCamelCase_ , UpperCamelCase_ ) # 2. predict previous mean of sample x_t-1 lowerCAmelCase : List[Any] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance lowerCAmelCase : List[Any] = pred_prev_sample lowerCAmelCase : List[str] = torch.sum(torch.abs(UpperCamelCase_ ) ) lowerCAmelCase : Dict = torch.mean(torch.abs(UpperCamelCase_ ) ) assert abs(result_sum.item() - 202.0_296 ) < 1E-2 assert abs(result_mean.item() - 0.2_631 ) < 1E-3 def lowerCamelCase__ ( self : Any ): lowerCAmelCase : Dict = self.scheduler_classes[0] lowerCAmelCase : Tuple = self.get_scheduler_config() lowerCAmelCase : int = scheduler_class(**UpperCamelCase_ ) lowerCAmelCase : List[Any] = [1_0_0, 8_7, 5_0, 1, 0] scheduler.set_timesteps(timesteps=UpperCamelCase_ ) lowerCAmelCase : Dict = scheduler.timesteps for i, timestep in enumerate(UpperCamelCase_ ): if i == len(UpperCamelCase_ ) - 1: lowerCAmelCase : List[Any] = -1 else: lowerCAmelCase : Union[str, Any] = timesteps[i + 1] lowerCAmelCase : Any = scheduler.previous_timestep(UpperCamelCase_ ) lowerCAmelCase : Dict = prev_t.item() self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0] lowerCAmelCase : List[Any] = self.get_scheduler_config() lowerCAmelCase : Tuple = scheduler_class(**UpperCamelCase_ ) lowerCAmelCase : int = [1_0_0, 8_7, 5_0, 5_1, 0] with self.assertRaises(UpperCamelCase_ , msg='''`custom_timesteps` must be in descending order.''' ): scheduler.set_timesteps(timesteps=UpperCamelCase_ ) def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : Any = self.scheduler_classes[0] lowerCAmelCase : Optional[int] = self.get_scheduler_config() lowerCAmelCase : str = scheduler_class(**UpperCamelCase_ ) lowerCAmelCase : List[str] = [1_0_0, 8_7, 5_0, 1, 0] lowerCAmelCase : int = len(UpperCamelCase_ ) with self.assertRaises(UpperCamelCase_ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ): scheduler.set_timesteps(num_inference_steps=UpperCamelCase_ , timesteps=UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase : List[Any] = self.scheduler_classes[0] lowerCAmelCase : Tuple = self.get_scheduler_config() lowerCAmelCase : Dict = scheduler_class(**UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = [scheduler.config.num_train_timesteps] with self.assertRaises( UpperCamelCase_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ): scheduler.set_timesteps(timesteps=UpperCamelCase_ )
314
1
import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class UpperCamelCase__ ( unittest.TestCase ): def lowerCAmelCase (self : List[Any] ): __a : Union[str, Any] = 1_0 def lowerCAmelCase (self : List[Any] ): __a : Dict = [1, 2, 3, 4] __a : Any = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(SCREAMING_SNAKE_CASE_ , self.block_size , 0 ) , SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase (self : str ): __a : List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0] __a : Union[str, Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0] self.assertEqual(truncate_or_pad(SCREAMING_SNAKE_CASE_ , self.block_size , 0 ) , SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase (self : str ): __a : Optional[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0, 1_1, 1_2, 1_3] __a : Optional[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0] self.assertEqual(truncate_or_pad(SCREAMING_SNAKE_CASE_ , self.block_size , 0 ) , SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase (self : Optional[int] ): __a : int = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.' __a : str = process_story(SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , [] ) def lowerCAmelCase (self : Tuple ): __a : int = '' __a : List[Any] = process_story(SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , [] ) self.assertEqual(SCREAMING_SNAKE_CASE_ , [] ) def lowerCAmelCase (self : Optional[Any] ): __a : List[Any] = ( 'It was the year of Our Lord one thousand seven hundred and ' 'seventy-five\n\nSpiritual revelations were conceded to England ' 'at that favoured period, as at this.\n@highlight\n\nIt was the best of times' ) __a : Tuple = process_story(SCREAMING_SNAKE_CASE_ ) __a : str = [ 'It was the year of Our Lord one thousand seven hundred and seventy-five.', 'Spiritual revelations were conceded to England at that favoured period, as at this.', ] self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __a : Tuple = ['It was the best of times.'] self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase (self : Dict ): __a : Tuple = torch.tensor([1, 2, 3, 4] ) __a : Union[str, Any] = torch.tensor([1, 1, 1, 1] ) np.testing.assert_array_equal(build_mask(SCREAMING_SNAKE_CASE_ , 0 ).numpy() , expected.numpy() ) def lowerCAmelCase (self : int ): __a : Union[str, Any] = torch.tensor([1, 2, 3, 4, 2_3, 2_3, 2_3] ) __a : List[str] = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(SCREAMING_SNAKE_CASE_ , 2_3 ).numpy() , expected.numpy() ) def lowerCAmelCase (self : Union[str, Any] ): __a : int = torch.tensor([8, 2, 3, 4, 1, 1, 1] ) __a : Tuple = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(SCREAMING_SNAKE_CASE_ , 1 ).numpy() , expected.numpy() ) def lowerCAmelCase (self : Union[str, Any] ): __a : List[str] = 1_0_1 __a : Tuple = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_0_1, 5, 6], [1, 1_0_1, 3, 4, 1_0_1, 6]] ) __a : List[Any] = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] ) __a : List[Any] = compute_token_type_ids(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) np.testing.assert_array_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
216
'''simple docstring''' import inspect import jax import jax.lax as lax import jax.numpy as jnp from ..utils import add_start_docstrings from ..utils.logging import get_logger A__ : List[str] = get_logger(__name__) A__ : str = R""" Args: input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search kwargs (`Dict[str, Any]`, *optional*): Additional logits processor specific kwargs. Return: `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores. """ class UpperCAmelCase_ : """simple docstring""" @add_start_docstrings(SCREAMING_SNAKE_CASE_ ) def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> jnp.ndarray: raise NotImplementedError( f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' ) class UpperCAmelCase_ : """simple docstring""" @add_start_docstrings(SCREAMING_SNAKE_CASE_ ) def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> jnp.ndarray: raise NotImplementedError( f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' ) class UpperCAmelCase_ (_UpperCAmelCase ): """simple docstring""" @add_start_docstrings(SCREAMING_SNAKE_CASE_ ) def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> jnp.ndarray: for processor in self: __lowerCamelCase : str = inspect.signature(processor.__call__ ).parameters if len(SCREAMING_SNAKE_CASE_ ) > 3: if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ): raise ValueError( f'Make sure that all the required parameters: {list(function_args.keys() )} for ' f'{processor.__class__} are passed to the logits processor.' ) __lowerCamelCase : Tuple = processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) else: __lowerCamelCase : int = processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return scores class UpperCAmelCase_ (_UpperCAmelCase ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ ) -> List[Any]: if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not (temperature > 0): raise ValueError(f'`temperature` has to be a strictly positive float, but is {temperature}' ) __lowerCamelCase : Optional[int] = temperature def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> jnp.ndarray: __lowerCamelCase : Dict = scores / self.temperature return scores class UpperCAmelCase_ (_UpperCAmelCase ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = -float('Inf' ) , SCREAMING_SNAKE_CASE_ = 1 ) -> Union[str, Any]: if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or (top_p < 0 or top_p > 1.0): raise ValueError(f'`top_p` has to be a float > 0 and < 1, but is {top_p}' ) if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or (min_tokens_to_keep < 1): raise ValueError(f'`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}' ) __lowerCamelCase : str = top_p __lowerCamelCase : Tuple = filter_value __lowerCamelCase : Tuple = min_tokens_to_keep def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> jnp.ndarray: __lowerCamelCase , __lowerCamelCase : Any = lax.top_k(SCREAMING_SNAKE_CASE_ , scores.shape[-1] ) __lowerCamelCase : int = jnp.full_like(SCREAMING_SNAKE_CASE_ , self.filter_value ) __lowerCamelCase : Tuple = jax.nn.softmax(SCREAMING_SNAKE_CASE_ , axis=-1 ).cumsum(axis=-1 ) __lowerCamelCase : List[str] = cumulative_probs < self.top_p # include the token that is higher than top_p as well __lowerCamelCase : Tuple = jnp.roll(SCREAMING_SNAKE_CASE_ , 1 ) score_mask |= score_mask.at[:, 0].set(SCREAMING_SNAKE_CASE_ ) # min tokens to keep __lowerCamelCase : Any = score_mask.at[:, : self.min_tokens_to_keep].set(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Any = jnp.where(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Tuple = jax.lax.sort_key_val(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )[-1] return next_scores class UpperCAmelCase_ (_UpperCAmelCase ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = -float('Inf' ) , SCREAMING_SNAKE_CASE_ = 1 ) -> str: if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or top_k <= 0: raise ValueError(f'`top_k` has to be a strictly positive integer, but is {top_k}' ) __lowerCamelCase : List[str] = max(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Dict = filter_value def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> jnp.ndarray: __lowerCamelCase , __lowerCamelCase : List[Any] = scores.shape __lowerCamelCase : Tuple = jnp.full(batch_size * vocab_size , self.filter_value ) __lowerCamelCase : int = min(self.top_k , scores.shape[-1] ) # Safety check __lowerCamelCase , __lowerCamelCase : Tuple = lax.top_k(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Union[str, Any] = jnp.broadcast_to((jnp.arange(SCREAMING_SNAKE_CASE_ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten() __lowerCamelCase : List[Any] = topk_scores.flatten() __lowerCamelCase : Union[str, Any] = topk_indices.flatten() + shift __lowerCamelCase : Tuple = next_scores_flat.at[topk_indices_flat].set(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Any = next_scores_flat.reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return next_scores class UpperCAmelCase_ (_UpperCAmelCase ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: __lowerCamelCase : Any = bos_token_id def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> jnp.ndarray: __lowerCamelCase : Optional[Any] = jnp.full(scores.shape , -float('inf' ) ) __lowerCamelCase : Optional[Any] = 1 - jnp.bool_(cur_len - 1 ) __lowerCamelCase : List[Any] = jnp.where(SCREAMING_SNAKE_CASE_ , new_scores.at[:, self.bos_token_id].set(0 ) , SCREAMING_SNAKE_CASE_ ) return scores class UpperCAmelCase_ (_UpperCAmelCase ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict: __lowerCamelCase : Tuple = max_length __lowerCamelCase : Any = eos_token_id def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> jnp.ndarray: __lowerCamelCase : List[str] = jnp.full(scores.shape , -float('inf' ) ) __lowerCamelCase : Any = 1 - jnp.bool_(cur_len - self.max_length + 1 ) __lowerCamelCase : List[str] = jnp.where(SCREAMING_SNAKE_CASE_ , new_scores.at[:, self.eos_token_id].set(0 ) , SCREAMING_SNAKE_CASE_ ) return scores class UpperCAmelCase_ (_UpperCAmelCase ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]: if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or min_length < 0: raise ValueError(f'`min_length` has to be a positive integer, but is {min_length}' ) if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or eos_token_id < 0: raise ValueError(f'`eos_token_id` has to be a positive integer, but is {eos_token_id}' ) __lowerCamelCase : str = min_length __lowerCamelCase : Optional[int] = eos_token_id def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> jnp.ndarray: # create boolean flag to decide if min length penalty should be applied __lowerCamelCase : Optional[Any] = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 ) __lowerCamelCase : str = jnp.where(SCREAMING_SNAKE_CASE_ , scores.at[:, self.eos_token_id].set(-float('inf' ) ) , SCREAMING_SNAKE_CASE_ ) return scores class UpperCAmelCase_ (_UpperCAmelCase ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict: __lowerCamelCase : Union[str, Any] = list(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : str = begin_index def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any: __lowerCamelCase : List[Any] = 1 - jnp.bool_(cur_len - self.begin_index ) __lowerCamelCase : str = jnp.where(SCREAMING_SNAKE_CASE_ , scores.at[:, self.begin_suppress_tokens].set(-float('inf' ) ) , SCREAMING_SNAKE_CASE_ ) return scores class UpperCAmelCase_ (_UpperCAmelCase ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ ) -> List[Any]: __lowerCamelCase : Tuple = list(SCREAMING_SNAKE_CASE_ ) def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> jnp.ndarray: __lowerCamelCase : int = scores.at[..., self.suppress_tokens].set(-float('inf' ) ) return scores class UpperCAmelCase_ (_UpperCAmelCase ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: __lowerCamelCase : Optional[int] = dict(SCREAMING_SNAKE_CASE_ ) # Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the # index of the array corresponds to the index of the token to be forced, for XLA compatibility. # Indexes without forced tokens will have a negative value. __lowerCamelCase : Dict = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1 for index, token in force_token_map.items(): if token is not None: __lowerCamelCase : str = force_token_array.at[index].set(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[Any] = jnp.intaa(SCREAMING_SNAKE_CASE_ ) def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> jnp.ndarray: def _force_token(SCREAMING_SNAKE_CASE_ ): __lowerCamelCase : List[str] = scores.shape[0] __lowerCamelCase : Tuple = self.force_token_array[generation_idx] __lowerCamelCase : List[Any] = jnp.ones_like(SCREAMING_SNAKE_CASE_ , dtype=scores.dtype ) * -float('inf' ) __lowerCamelCase : Any = jnp.zeros((batch_size, 1) , dtype=scores.dtype ) __lowerCamelCase : str = lax.dynamic_update_slice(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (0, current_token) ) return new_scores __lowerCamelCase : int = lax.cond( cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond( self.force_token_array[cur_len] >= 0 , lambda: _force_token(SCREAMING_SNAKE_CASE_ ) , lambda: scores , ) , ) return scores class UpperCAmelCase_ (_UpperCAmelCase ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any: __lowerCamelCase : Any = generate_config.eos_token_id __lowerCamelCase : Dict = generate_config.no_timestamps_token_id __lowerCamelCase : Tuple = generate_config.no_timestamps_token_id + 1 __lowerCamelCase : List[str] = decoder_input_length + 1 if generate_config.is_multilingual: # room for language token and task token self.begin_index += 2 if hasattr(SCREAMING_SNAKE_CASE_ , 'max_initial_timestamp_index' ): __lowerCamelCase : str = generate_config.max_initial_timestamp_index else: __lowerCamelCase : Optional[int] = model_config.vocab_size if self.max_initial_timestamp_index is None: __lowerCamelCase : Tuple = model_config.vocab_size def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any: # suppress <|notimestamps|> which is handled by without_timestamps __lowerCamelCase : Union[str, Any] = scores.at[:, self.no_timestamps_token_id].set(-float('inf' ) ) def handle_pairs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): __lowerCamelCase : str = jnp.where((cur_len - self.begin_index) >= 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[Any] = jnp.where( input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , SCREAMING_SNAKE_CASE_ , ) __lowerCamelCase : Any = jnp.where((cur_len - self.begin_index) < 2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : str = jnp.where( input_ids_k[cur_len - 2] >= self.timestamp_begin , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) return jnp.where( SCREAMING_SNAKE_CASE_ , jnp.where( penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('inf' ) ) , scores_k.at[: self.eos_token_id].set(-float('inf' ) ) , ) , SCREAMING_SNAKE_CASE_ , ) __lowerCamelCase : List[Any] = jax.vmap(SCREAMING_SNAKE_CASE_ )(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Any = jnp.where(cur_len == self.begin_index , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Tuple = jnp.where( self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , SCREAMING_SNAKE_CASE_ , ) __lowerCamelCase : Any = self.timestamp_begin + self.max_initial_timestamp_index __lowerCamelCase : str = jnp.where( SCREAMING_SNAKE_CASE_ , scores.at[:, last_allowed + 1 :].set(-float('inf' ) ) , SCREAMING_SNAKE_CASE_ , ) # if sum of probability over timestamps is above any other token, sample timestamp __lowerCamelCase : Any = jax.nn.log_softmax(SCREAMING_SNAKE_CASE_ , axis=-1 ) def handle_cumulative_probs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): __lowerCamelCase : Union[str, Any] = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 ) __lowerCamelCase : List[str] = jnp.max(logprobs_k[: self.timestamp_begin] ) return jnp.where( timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('inf' ) ) , SCREAMING_SNAKE_CASE_ , ) __lowerCamelCase : Dict = jax.vmap(SCREAMING_SNAKE_CASE_ )(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return scores
185
0
'''simple docstring''' import unittest import numpy as np import requests from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: _UpperCAmelCase = False if is_vision_available(): from PIL import Image from transformers import PixaStructImageProcessor class lowercase ( unittest.TestCase ): """simple docstring""" def __init__( self , UpperCamelCase_ , UpperCamelCase_=7 , UpperCamelCase_=3 , UpperCamelCase_=18 , UpperCamelCase_=30 , UpperCamelCase_=400 , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=None , ): '''simple docstring''' UpperCamelCase__ :List[Any] = size if size is not None else {'''height''': 20, '''width''': 20} UpperCamelCase__ :Tuple = parent UpperCamelCase__ :Dict = batch_size UpperCamelCase__ :Optional[Any] = num_channels UpperCamelCase__ :Any = image_size UpperCamelCase__ :str = min_resolution UpperCamelCase__ :int = max_resolution UpperCamelCase__ :Dict = size UpperCamelCase__ :Optional[Any] = do_normalize UpperCamelCase__ :Tuple = do_convert_rgb UpperCamelCase__ :str = [512, 1024, 2048, 4096] UpperCamelCase__ :Any = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16} def lowerCAmelCase__ ( self ): '''simple docstring''' return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb} def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Tuple = '''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg''' UpperCamelCase__ :Optional[int] = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw ).convert('''RGB''' ) return raw_image @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , ) @require_torch @require_vision class lowercase ( A__ , unittest.TestCase ): """simple docstring""" _a = PixaStructImageProcessor if is_vision_available() else None def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :str = PixaStructImageProcessingTester(self ) @property def lowerCAmelCase__ ( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''do_convert_rgb''' ) ) def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :List[str] = self.image_processor_tester.prepare_dummy_image() UpperCamelCase__ :Any = self.image_processing_class(**self.image_processor_dict ) UpperCamelCase__ :Union[str, Any] = 2048 UpperCamelCase__ :Any = image_processor(UpperCamelCase_ , return_tensors='''pt''' , max_patches=UpperCamelCase_ ) self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) ) def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase__ :Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , Image.Image ) # Test not batched input UpperCamelCase__ :Optional[int] = ( (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width''']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCamelCase__ :str = image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=UpperCamelCase_ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCamelCase__ :str = image_processor( UpperCamelCase_ , return_tensors='''pt''' , max_patches=UpperCamelCase_ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase__ :List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , Image.Image ) # Test not batched input UpperCamelCase__ :Dict = ( (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width''']) * self.image_processor_tester.num_channels ) + 2 UpperCamelCase__ :str = True for max_patch in self.image_processor_tester.max_patches: # Test not batched input with self.assertRaises(UpperCamelCase_ ): UpperCamelCase__ :Optional[int] = image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=UpperCamelCase_ ).flattened_patches UpperCamelCase__ :List[str] = '''Hello''' UpperCamelCase__ :str = image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=UpperCamelCase_ , header_text=UpperCamelCase_ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCamelCase__ :List[Any] = image_processor( UpperCamelCase_ , return_tensors='''pt''' , max_patches=UpperCamelCase_ , header_text=UpperCamelCase_ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase__ :Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , np.ndarray ) UpperCamelCase__ :Dict = ( (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width''']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCamelCase__ :int = image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=UpperCamelCase_ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCamelCase__ :str = image_processor( UpperCamelCase_ , return_tensors='''pt''' , max_patches=UpperCamelCase_ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase__ :Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , torch.Tensor ) # Test not batched input UpperCamelCase__ :Union[str, Any] = ( (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width''']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCamelCase__ :Any = image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=UpperCamelCase_ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCamelCase__ :List[Any] = image_processor( UpperCamelCase_ , return_tensors='''pt''' , max_patches=UpperCamelCase_ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , ) @require_torch @require_vision class lowercase ( A__ , unittest.TestCase ): """simple docstring""" _a = PixaStructImageProcessor if is_vision_available() else None def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Union[str, Any] = PixaStructImageProcessingTester(self , num_channels=4 ) UpperCamelCase__ :Any = 3 @property def lowerCAmelCase__ ( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Any = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''do_convert_rgb''' ) ) def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase__ :Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , Image.Image ) # Test not batched input UpperCamelCase__ :Any = ( (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width''']) * (self.image_processor_tester.num_channels - 1) ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCamelCase__ :List[Any] = image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=UpperCamelCase_ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCamelCase__ :str = image_processor( UpperCamelCase_ , return_tensors='''pt''' , max_patches=UpperCamelCase_ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
363
'''simple docstring''' from argparse import ArgumentParser from .env import EnvironmentCommand def a ( ) -> Union[str, Any]: '''simple docstring''' UpperCamelCase__ :Union[str, Any] = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' ) UpperCamelCase__ :Union[str, Any] = parser.add_subparsers(help='''diffusers-cli command helpers''' ) # Register commands EnvironmentCommand.register_subcommand(__a ) # Let's go UpperCamelCase__ :Optional[int] = parser.parse_args() if not hasattr(__a , '''func''' ): parser.print_help() exit(1 ) # Run UpperCamelCase__ :Optional[int] = args.func(__a ) service.run() if __name__ == "__main__": main()
219
0
'''simple docstring''' from __future__ import annotations from collections import deque class UpperCAmelCase_ : """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ ) -> Tuple: __lowerCamelCase : list[dict] = [] self.adlist.append( {'value': '', 'next_states': [], 'fail_state': 0, 'output': []} ) for keyword in keywords: self.add_keyword(SCREAMING_SNAKE_CASE_ ) self.set_fail_transitions() def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int | None: for state in self.adlist[current_state]["next_states"]: if char == self.adlist[state]["value"]: return state return None def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> None: __lowerCamelCase : Dict = 0 for character in keyword: __lowerCamelCase : List[Any] = self.find_next_state(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if next_state is None: self.adlist.append( { 'value': character, 'next_states': [], 'fail_state': 0, 'output': [], } ) self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 ) __lowerCamelCase : Optional[int] = len(self.adlist ) - 1 else: __lowerCamelCase : Union[str, Any] = next_state self.adlist[current_state]["output"].append(SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> None: __lowerCamelCase : deque = deque() for node in self.adlist[0]["next_states"]: q.append(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : str = 0 while q: __lowerCamelCase : Any = q.popleft() for child in self.adlist[r]["next_states"]: q.append(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Tuple = self.adlist[r]['fail_state'] while ( self.find_next_state(SCREAMING_SNAKE_CASE_ , self.adlist[child]['value'] ) is None and state != 0 ): __lowerCamelCase : int = self.adlist[state]['fail_state'] __lowerCamelCase : int = self.find_next_state( SCREAMING_SNAKE_CASE_ , self.adlist[child]['value'] ) if self.adlist[child]["fail_state"] is None: __lowerCamelCase : Union[str, Any] = 0 __lowerCamelCase : Dict = ( self.adlist[child]['output'] + self.adlist[self.adlist[child]['fail_state']]['output'] ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> dict[str, list[int]]: __lowerCamelCase : dict = {} # returns a dict with keywords and list of its occurrences __lowerCamelCase : Any = 0 for i in range(len(SCREAMING_SNAKE_CASE_ ) ): while ( self.find_next_state(SCREAMING_SNAKE_CASE_ , string[i] ) is None and current_state != 0 ): __lowerCamelCase : Union[str, Any] = self.adlist[current_state]['fail_state'] __lowerCamelCase : str = self.find_next_state(SCREAMING_SNAKE_CASE_ , string[i] ) if next_state is None: __lowerCamelCase : Union[str, Any] = 0 else: __lowerCamelCase : Any = next_state for key in self.adlist[current_state]["output"]: if key not in result: __lowerCamelCase : Dict = [] result[key].append(i - len(SCREAMING_SNAKE_CASE_ ) + 1 ) return result if __name__ == "__main__": import doctest doctest.testmod()
185
'''simple docstring''' from numpy import exp, pi, sqrt def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : float = 1.0 ) -> int: return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) ) if __name__ == "__main__": import doctest doctest.testmod()
185
1
'''simple docstring''' import colorsys from PIL import Image # type: ignore def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): UpperCAmelCase : Optional[int] = x UpperCAmelCase : List[Any] = y for step in range(UpperCAmelCase_ ): # noqa: B007 UpperCAmelCase : Dict = a * a - b * b + x UpperCAmelCase : Optional[Any] = 2 * a * b + y UpperCAmelCase : List[Any] = a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def UpperCamelCase( UpperCAmelCase_ ): if distance == 1: return (0, 0, 0) else: return (2_55, 2_55, 2_55) def UpperCamelCase( UpperCAmelCase_ ): if distance == 1: return (0, 0, 0) else: return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(UpperCAmelCase_ , 1 , 1 ) ) def UpperCamelCase( UpperCAmelCase_ = 8_00 , UpperCAmelCase_ = 6_00 , UpperCAmelCase_ = -0.6 , UpperCAmelCase_ = 0 , UpperCAmelCase_ = 3.2 , UpperCAmelCase_ = 50 , UpperCAmelCase_ = True , ): UpperCAmelCase : List[Any] = Image.new('RGB' , (image_width, image_height) ) UpperCAmelCase : List[Any] = img.load() # loop through the image-coordinates for image_x in range(UpperCAmelCase_ ): for image_y in range(UpperCAmelCase_ ): # determine the figure-coordinates based on the image-coordinates UpperCAmelCase : List[str] = figure_width / image_width * image_height UpperCAmelCase : Dict = figure_center_x + (image_x / image_width - 0.5) * figure_width UpperCAmelCase : Any = figure_center_y + (image_y / image_height - 0.5) * figure_height UpperCAmelCase : Any = get_distance(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: UpperCAmelCase : Optional[Any] = get_color_coded_rgb(UpperCAmelCase_ ) else: UpperCAmelCase : Optional[Any] = get_black_and_white_rgb(UpperCAmelCase_ ) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure lowercase__ = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
354
'''simple docstring''' from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowercase__ = {"configuration_van": ["VAN_PRETRAINED_CONFIG_ARCHIVE_MAP", "VanConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = [ "VAN_PRETRAINED_MODEL_ARCHIVE_LIST", "VanForImageClassification", "VanModel", "VanPreTrainedModel", ] if TYPE_CHECKING: from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_van import ( VAN_PRETRAINED_MODEL_ARCHIVE_LIST, VanForImageClassification, VanModel, VanPreTrainedModel, ) else: import sys lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure)
280
0
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' lowerCamelCase_ : Any = ["""image_processor""", """tokenizer"""] lowerCamelCase_ : int = """CLIPImageProcessor""" lowerCamelCase_ : Any = ("""CLIPTokenizer""", """CLIPTokenizerFast""") def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> Any: lowerCamelCase : Optional[int] = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , UpperCamelCase__ , ) lowerCamelCase : Optional[Any] = kwargs.pop("feature_extractor" ) lowerCamelCase : Dict = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(UpperCamelCase__ , UpperCamelCase__ ) def __call__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> int: if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be none." ) if text is not None: lowerCamelCase : Tuple = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ ) if images is not None: lowerCamelCase : Any = self.image_processor(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ ) if text is not None and images is not None: lowerCamelCase : Optional[Any] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**UpperCamelCase__ ) , tensor_type=UpperCamelCase__ ) def _lowercase ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Dict: return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> str: return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ ) @property def _lowercase ( self ) -> Dict: lowerCamelCase : Union[str, Any] = self.tokenizer.model_input_names lowerCamelCase : Tuple = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def _lowercase ( self ) -> Union[str, Any]: warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCamelCase__ , ) return self.image_processor_class @property def _lowercase ( self ) -> Dict: warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCamelCase__ , ) return self.image_processor
48
import math def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> float: if ( not isinstance(_SCREAMING_SNAKE_CASE ,(int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError("power_factor must be a valid float value between -1 and 1." ) return apparent_power * power_factor def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> float: if ( not isinstance(_SCREAMING_SNAKE_CASE ,(int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError("power_factor must be a valid float value between -1 and 1." ) return apparent_power * math.sqrt(1 - power_factor**2 ) if __name__ == "__main__": import doctest doctest.testmod()
48
1
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[int] , _lowerCamelCase : str) -> list[int]: '''simple docstring''' __UpperCamelCase : List[str] = int(_lowerCamelCase) # Initialize Result __UpperCamelCase : Tuple = [] # Traverse through all denomination for denomination in reversed(_lowerCamelCase): # Find denominations while int(_lowerCamelCase) >= int(_lowerCamelCase): total_value -= int(_lowerCamelCase) answer.append(_lowerCamelCase) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": lowercase : Optional[int] = [] lowercase : int = '0' if ( input('Do you want to enter your denominations ? (yY/n): ').strip().lower() == "y" ): lowercase : List[Any] = int(input('Enter the number of denominations you want to add: ').strip()) for i in range(0, n): denominations.append(int(input(f"Denomination {i}: ").strip())) lowercase : Optional[Any] = input('Enter the change you want to make in Indian Currency: ').strip() else: # All denominations of Indian Currency if user does not enter lowercase : Optional[int] = [1, 2, 5, 10, 20, 50, 100, 500, 2000] lowercase : Any = input('Enter the change you want to make: ').strip() if int(value) == 0 or int(value) < 0: print('The total value cannot be zero or negative.') else: print(f"Following is minimal change for {value}: ") lowercase : Any = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=' ')
151
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing the experiment tracking capability, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## lowercase : Any = 16 lowercase : Optional[int] = 32 def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Accelerator , _lowerCamelCase : int = 16) -> int: '''simple docstring''' __UpperCamelCase : Any = AutoTokenizer.from_pretrained("bert-base-cased") __UpperCamelCase : Optional[Any] = load_dataset("glue" , "mrpc") def tokenize_function(_lowerCamelCase : Dict): # max_length=None => use the model max length (it's actually the default) __UpperCamelCase : List[str] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_lowerCamelCase , max_length=_lowerCamelCase) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): __UpperCamelCase : Optional[int] = datasets.map( _lowerCamelCase , batched=_lowerCamelCase , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __UpperCamelCase : List[str] = tokenized_datasets.rename_column("label" , "labels") def collate_fn(_lowerCamelCase : Union[str, Any]): # On TPU it's best to pad everything to the same length or training will be very slow. __UpperCamelCase : str = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": __UpperCamelCase : Optional[Any] = 16 elif accelerator.mixed_precision != "no": __UpperCamelCase : Dict = 8 else: __UpperCamelCase : Optional[Any] = None return tokenizer.pad( _lowerCamelCase , padding="longest" , max_length=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_tensors="pt" , ) # Instantiate dataloaders. __UpperCamelCase : Optional[Any] = DataLoader( tokenized_datasets["train"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase) __UpperCamelCase : int = DataLoader( tokenized_datasets["validation"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase) return train_dataloader, eval_dataloader # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1": from accelerate.test_utils.training import mocked_dataloaders lowercase : Union[str, Any] = mocked_dataloaders # noqa: F811 def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any]) -> str: '''simple docstring''' if os.environ.get("TESTING_MOCKED_DATALOADERS" , _lowerCamelCase) == "1": __UpperCamelCase : List[str] = 2 # Initialize Accelerator # New Code # # We pass in "all" to `log_with` to grab all available trackers in the environment # Note: If using a custom `Tracker` class, should be passed in here such as: # >>> log_with = ["all", MyCustomTrackerClassInstance()] if args.with_tracking: __UpperCamelCase : Union[str, Any] = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir) else: __UpperCamelCase : Optional[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __UpperCamelCase : List[str] = config["lr"] __UpperCamelCase : Optional[Any] = int(config["num_epochs"]) __UpperCamelCase : List[Any] = int(config["seed"]) __UpperCamelCase : Any = int(config["batch_size"]) set_seed(_lowerCamelCase) __UpperCamelCase , __UpperCamelCase : List[Any] = get_dataloaders(_lowerCamelCase , _lowerCamelCase) __UpperCamelCase : List[str] = evaluate.load("glue" , "mrpc") # If the batch size is too big we use gradient accumulation __UpperCamelCase : Union[str, Any] = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: __UpperCamelCase : List[Any] = batch_size // MAX_GPU_BATCH_SIZE __UpperCamelCase : Union[str, Any] = MAX_GPU_BATCH_SIZE # Instantiate the model (we build the model here so that the seed also control new weights initialization) __UpperCamelCase : str = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_lowerCamelCase) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __UpperCamelCase : Optional[int] = model.to(accelerator.device) # Instantiate optimizer __UpperCamelCase : List[str] = AdamW(params=model.parameters() , lr=_lowerCamelCase) # Instantiate scheduler __UpperCamelCase : Union[str, Any] = get_linear_schedule_with_warmup( optimizer=_lowerCamelCase , num_warmup_steps=100 , num_training_steps=(len(_lowerCamelCase) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Union[str, Any] = accelerator.prepare( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) # New Code # # We need to initialize the trackers we use. Overall configurations can also be stored if args.with_tracking: __UpperCamelCase : Dict = os.path.split(_lowerCamelCase)[-1].split(".")[0] accelerator.init_trackers(_lowerCamelCase , _lowerCamelCase) # Now we train the model for epoch in range(_lowerCamelCase): model.train() # New Code # # For our tracking example, we will log the total loss of each epoch if args.with_tracking: __UpperCamelCase : Tuple = 0 for step, batch in enumerate(_lowerCamelCase): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) __UpperCamelCase : Dict = model(**_lowerCamelCase) __UpperCamelCase : Any = outputs.loss # New Code # if args.with_tracking: total_loss += loss.detach().float() __UpperCamelCase : List[Any] = loss / gradient_accumulation_steps accelerator.backward(_lowerCamelCase) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(_lowerCamelCase): # We could avoid this line since we set the accelerator with `device_placement=True` (the default). batch.to(accelerator.device) with torch.no_grad(): __UpperCamelCase : Union[str, Any] = model(**_lowerCamelCase) __UpperCamelCase : str = outputs.logits.argmax(dim=-1) __UpperCamelCase , __UpperCamelCase : Dict = accelerator.gather_for_metrics((predictions, batch["labels"])) metric.add_batch( predictions=_lowerCamelCase , references=_lowerCamelCase , ) __UpperCamelCase : Optional[Any] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'epoch {epoch}:' , _lowerCamelCase) # New Code # # To actually log, we call `Accelerator.log` # The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int` if args.with_tracking: accelerator.log( { "accuracy": eval_metric["accuracy"], "f1": eval_metric["f1"], "train_loss": total_loss.item() / len(_lowerCamelCase), "epoch": epoch, } , step=_lowerCamelCase , ) # New Code # # When a run is finished, you should call `accelerator.end_training()` # to close all of the open trackers if args.with_tracking: accelerator.end_training() def _SCREAMING_SNAKE_CASE ( ) -> Optional[int]: '''simple docstring''' __UpperCamelCase : str = argparse.ArgumentParser(description="Simple example of training script.") parser.add_argument( "--mixed_precision" , type=_lowerCamelCase , default=_lowerCamelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU.") parser.add_argument( "--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , ) parser.add_argument( "--project_dir" , type=_lowerCamelCase , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , ) __UpperCamelCase : Union[str, Any] = parser.parse_args() __UpperCamelCase : str = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(_lowerCamelCase , _lowerCamelCase) if __name__ == "__main__": main()
151
1
'''simple docstring''' import os import socket from contextlib import contextmanager import torch from ..commands.config.default import write_basic_config # noqa: F401 from ..state import PartialState from .dataclasses import DistributedType from .imports import is_deepspeed_available, is_tpu_available from .transformer_engine import convert_model from .versions import is_torch_version if is_deepspeed_available(): from deepspeed import DeepSpeedEngine if is_tpu_available(check_device=False): import torch_xla.core.xla_model as xm def lowerCamelCase ( UpperCAmelCase__ : List[str] ) -> List[Any]: if is_torch_version("""<""" , """2.0.0""" ) or not hasattr(UpperCAmelCase__ , """_dynamo""" ): return False return isinstance(UpperCAmelCase__ , torch._dynamo.eval_frame.OptimizedModule ) def lowerCamelCase ( UpperCAmelCase__ : Tuple , UpperCAmelCase__ : bool = True ) -> Optional[int]: lowercase_ : Optional[int] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel) lowercase_ : Optional[int] = is_compiled_module(UpperCAmelCase__ ) if is_compiled: lowercase_ : Union[str, Any] = model lowercase_ : Dict = model._orig_mod if is_deepspeed_available(): options += (DeepSpeedEngine,) while isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ : Dict = model.module if not keep_fpaa_wrapper: lowercase_ : Any = getattr(UpperCAmelCase__ , """forward""" ) lowercase_ : List[str] = model.__dict__.pop("""_original_forward""" , UpperCAmelCase__ ) if original_forward is not None: while hasattr(UpperCAmelCase__ , """__wrapped__""" ): lowercase_ : Optional[Any] = forward.__wrapped__ if forward == original_forward: break lowercase_ : Any = forward if getattr(UpperCAmelCase__ , """_converted_to_transformer_engine""" , UpperCAmelCase__ ): convert_model(UpperCAmelCase__ , to_transformer_engine=UpperCAmelCase__ ) if is_compiled: lowercase_ : List[str] = model lowercase_ : Dict = compiled_model return model def lowerCamelCase ( ) -> List[Any]: PartialState().wait_for_everyone() def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str] ) -> Union[str, Any]: if PartialState().distributed_type == DistributedType.TPU: xm.save(UpperCAmelCase__ , UpperCAmelCase__ ) elif PartialState().local_process_index == 0: torch.save(UpperCAmelCase__ , UpperCAmelCase__ ) @contextmanager def lowerCamelCase ( **UpperCAmelCase__ : str ) -> Optional[int]: for key, value in kwargs.items(): lowercase_ : List[Any] = str(UpperCAmelCase__ ) yield for key in kwargs: if key.upper() in os.environ: del os.environ[key.upper()] def lowerCamelCase ( UpperCAmelCase__ : Any ) -> int: if not hasattr(UpperCAmelCase__ , """__qualname__""" ) and not hasattr(UpperCAmelCase__ , """__name__""" ): lowercase_ : List[Any] = getattr(UpperCAmelCase__ , """__class__""" , UpperCAmelCase__ ) if hasattr(UpperCAmelCase__ , """__qualname__""" ): return obj.__qualname__ if hasattr(UpperCAmelCase__ , """__name__""" ): return obj.__name__ return str(UpperCAmelCase__ ) def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] ) -> Dict: for key, value in source.items(): if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): lowercase_ : str = destination.setdefault(UpperCAmelCase__ , {} ) merge_dicts(UpperCAmelCase__ , UpperCAmelCase__ ) else: lowercase_ : Optional[int] = value return destination def lowerCamelCase ( UpperCAmelCase__ : int = None ) -> bool: if port is None: lowercase_ : Tuple = 29500 with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s: return s.connect_ex(("""localhost""", port) ) == 0
239
'''simple docstring''' def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> bool: return numa ^ numa < 0 if __name__ == "__main__": import doctest doctest.testmod()
239
1
"""simple docstring""" import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot import BlenderbotTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __magic_name__ = logging.get_logger(__name__) __magic_name__ = { "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } __magic_name__ = { "vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"}, "merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"}, "tokenizer_config_file": { "facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json" }, } __magic_name__ = {"facebook/blenderbot-3B": 128} class SCREAMING_SNAKE_CASE_ ( __a ): """simple docstring""" __lowercase : str = VOCAB_FILES_NAMES __lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP __lowercase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowercase : Tuple = ['''input_ids''', '''attention_mask'''] __lowercase : Dict = BlenderbotTokenizer def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="replace" , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=False , lowerCAmelCase__=True , **lowerCAmelCase__ , ): super().__init__( _a , _a , tokenizer_file=_a , errors=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , add_prefix_space=_a , trim_offsets=_a , **_a , ) __SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if pre_tok_state.get("""add_prefix_space""" , _a) != add_prefix_space: __SCREAMING_SNAKE_CASE = getattr(_a , pre_tok_state.pop("""type""")) __SCREAMING_SNAKE_CASE = add_prefix_space __SCREAMING_SNAKE_CASE = pre_tok_class(**_a) __SCREAMING_SNAKE_CASE = add_prefix_space __SCREAMING_SNAKE_CASE = "post_processor" __SCREAMING_SNAKE_CASE = getattr(self.backend_tokenizer , _a , _a) if tokenizer_component_instance: __SCREAMING_SNAKE_CASE = json.loads(tokenizer_component_instance.__getstate__()) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: __SCREAMING_SNAKE_CASE = tuple(state["""sep"""]) if "cls" in state: __SCREAMING_SNAKE_CASE = tuple(state["""cls"""]) __SCREAMING_SNAKE_CASE = False if state.get("""add_prefix_space""" , _a) != add_prefix_space: __SCREAMING_SNAKE_CASE = add_prefix_space __SCREAMING_SNAKE_CASE = True if state.get("""trim_offsets""" , _a) != trim_offsets: __SCREAMING_SNAKE_CASE = trim_offsets __SCREAMING_SNAKE_CASE = True if changes_to_apply: __SCREAMING_SNAKE_CASE = getattr(_a , state.pop("""type""")) __SCREAMING_SNAKE_CASE = component_class(**_a) setattr(self.backend_tokenizer , _a , _a) @property # Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot def snake_case_ ( self): if self._mask_token is None: if self.verbose: logger.error("""Using mask_token, but it is not set yet.""") return None return str(self._mask_token) @mask_token.setter def snake_case_ ( self , lowerCAmelCase__): __SCREAMING_SNAKE_CASE = AddedToken(_a , lstrip=_a , rstrip=_a) if isinstance(_a , _a) else value __SCREAMING_SNAKE_CASE = value def snake_case_ ( self , *lowerCAmelCase__ , **lowerCAmelCase__): __SCREAMING_SNAKE_CASE = kwargs.get("""is_split_into_words""" , _a) assert self.add_prefix_space or not is_split_into_words, ( f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*_a , **_a) def snake_case_ ( self , *lowerCAmelCase__ , **lowerCAmelCase__): __SCREAMING_SNAKE_CASE = kwargs.get("""is_split_into_words""" , _a) assert self.add_prefix_space or not is_split_into_words, ( f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._encode_plus(*_a , **_a) def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None): __SCREAMING_SNAKE_CASE = self._tokenizer.model.save(_a , name=_a) return tuple(_a) def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None): __SCREAMING_SNAKE_CASE = [self.sep_token_id] __SCREAMING_SNAKE_CASE = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None): return token_ids_a + [self.eos_token_id] def snake_case_ ( self , lowerCAmelCase__): __SCREAMING_SNAKE_CASE = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(""" """ + text) else: # Generated responses should contain them already. inputs.append(_a) __SCREAMING_SNAKE_CASE = " ".join(_a) __SCREAMING_SNAKE_CASE = self.encode(_a) if len(_a) > self.model_max_length: __SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :] logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens.") return input_ids
369
"""simple docstring""" import unittest import torch from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel from diffusers.training_utils import set_seed from diffusers.utils.testing_utils import slow __magic_name__ = False class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): """simple docstring""" def snake_case_ ( self , lowerCAmelCase__=3_2): set_seed(0) __SCREAMING_SNAKE_CASE = UNetaDModel(sample_size=lowerCAmelCase__ , in_channels=3 , out_channels=3) __SCREAMING_SNAKE_CASE = torch.optim.SGD(model.parameters() , lr=0.00_01) return model, optimizer @slow def snake_case_ ( self): __SCREAMING_SNAKE_CASE = """cpu""" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable __SCREAMING_SNAKE_CASE = DDPMScheduler( num_train_timesteps=1_0_0_0 , beta_start=0.00_01 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=lowerCAmelCase__ , ) __SCREAMING_SNAKE_CASE = DDIMScheduler( num_train_timesteps=1_0_0_0 , beta_start=0.00_01 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=lowerCAmelCase__ , ) assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps # shared batches for DDPM and DDIM set_seed(0) __SCREAMING_SNAKE_CASE = [torch.randn((4, 3, 3_2, 3_2)).clip(-1 , 1).to(lowerCAmelCase__) for _ in range(4)] __SCREAMING_SNAKE_CASE = [torch.randn((4, 3, 3_2, 3_2)).to(lowerCAmelCase__) for _ in range(4)] __SCREAMING_SNAKE_CASE = [torch.randint(0 , 1_0_0_0 , (4,)).long().to(lowerCAmelCase__) for _ in range(4)] # train with a DDPM scheduler __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.get_model_optimizer(resolution=3_2) model.train().to(lowerCAmelCase__) for i in range(4): optimizer.zero_grad() __SCREAMING_SNAKE_CASE = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i]) __SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , timesteps[i]).sample __SCREAMING_SNAKE_CASE = torch.nn.functional.mse_loss(lowerCAmelCase__ , noise[i]) loss.backward() optimizer.step() del model, optimizer # recreate the model and optimizer, and retry with DDIM __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.get_model_optimizer(resolution=3_2) model.train().to(lowerCAmelCase__) for i in range(4): optimizer.zero_grad() __SCREAMING_SNAKE_CASE = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i]) __SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , timesteps[i]).sample __SCREAMING_SNAKE_CASE = torch.nn.functional.mse_loss(lowerCAmelCase__ , noise[i]) loss.backward() optimizer.step() del model, optimizer self.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-5)) self.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-5))
255
0
"""simple docstring""" import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowercase( __a , unittest.TestCase ): '''simple docstring''' lowercase__ = DiTPipeline lowercase__ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS lowercase__ = PipelineTesterMixin.required_optional_params - { "latents", "num_images_per_prompt", "callback", "callback_steps", } lowercase__ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS lowercase__ = False def UpperCamelCase_ ( self: Any ): '''simple docstring''' torch.manual_seed(0 ) _snake_case : Tuple = TransformeraDModel( sample_size=16, num_layers=2, patch_size=4, attention_head_dim=8, num_attention_heads=2, in_channels=4, out_channels=8, attention_bias=a_, activation_fn="""gelu-approximate""", num_embeds_ada_norm=1_000, norm_type="""ada_norm_zero""", norm_elementwise_affine=a_, ) _snake_case : List[Any] = AutoencoderKL() _snake_case : Any = DDIMScheduler() _snake_case : Optional[Any] = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler} return components def UpperCamelCase_ ( self: Any, a_: Dict, a_: List[Any]=0 ): '''simple docstring''' if str(a_ ).startswith("""mps""" ): _snake_case : str = torch.manual_seed(a_ ) else: _snake_case : Dict = torch.Generator(device=a_ ).manual_seed(a_ ) _snake_case : Union[str, Any] = { """class_labels""": [1], """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : Tuple = """cpu""" _snake_case : str = self.get_dummy_components() _snake_case : Union[str, Any] = self.pipeline_class(**a_ ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) _snake_case : int = self.get_dummy_inputs(a_ ) _snake_case : Any = pipe(**a_ ).images _snake_case : Tuple = image[0, -3:, -3:, -1] self.assertEqual(image.shape, (1, 16, 16, 3) ) _snake_case : List[str] = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] ) _snake_case : Optional[Any] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(a_, 1E-3 ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' self._test_inference_batch_single_identical(relax_max_difference=a_, expected_max_diff=1E-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available(), reason="""XFormers attention is only available with CUDA and `xformers` installed""", ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @require_torch_gpu @slow class lowercase( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase_ ( self: str ): '''simple docstring''' _snake_case : List[Any] = torch.manual_seed(0 ) _snake_case : Dict = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" ) pipe.to("""cuda""" ) _snake_case : Optional[Any] = ["""vase""", """umbrella""", """white shark""", """white wolf"""] _snake_case : List[str] = pipe.get_label_ids(a_ ) _snake_case : List[str] = pipe(a_, generator=a_, num_inference_steps=40, output_type="""np""" ).images for word, image in zip(a_, a_ ): _snake_case : Dict = load_numpy( f"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy" ) assert np.abs((expected_image - image).max() ) < 1E-2 def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : Dict = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" ) _snake_case : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to("""cuda""" ) _snake_case : Dict = ["""vase""", """umbrella"""] _snake_case : List[str] = pipe.get_label_ids(a_ ) _snake_case : Tuple = torch.manual_seed(0 ) _snake_case : Dict = pipe(a_, generator=a_, num_inference_steps=25, output_type="""np""" ).images for word, image in zip(a_, a_ ): _snake_case : int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" f"/dit/{word}_512.npy" ) assert np.abs((expected_image - image).max() ) < 1E-1
64
import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def UpperCAmelCase ( a_ ) -> List[str]: """simple docstring""" __A = args.pruning_method __A = args.threshold __A = args.model_name_or_path.rstrip("/" ) __A = args.target_model_path print(F'''Load fine-pruned model from {model_name_or_path}''' ) __A = torch.load(os.path.join(a_ , "pytorch_model.bin" ) ) __A = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: __A = tensor print(F'''Copied layer {name}''' ) elif "classifier" in name or "qa_output" in name: __A = tensor print(F'''Copied layer {name}''' ) elif "bias" in name: __A = tensor print(F'''Copied layer {name}''' ) else: if pruning_method == "magnitude": __A = MagnitudeBinarizer.apply(inputs=a_ , threshold=a_ ) __A = tensor * mask print(F'''Pruned layer {name}''' ) elif pruning_method == "topK": if "mask_scores" in name: continue __A = name[:-6] __A = model[F'''{prefix_}mask_scores'''] __A = TopKBinarizer.apply(a_ , a_ ) __A = tensor * mask print(F'''Pruned layer {name}''' ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue __A = name[:-6] __A = model[F'''{prefix_}mask_scores'''] __A = ThresholdBinarizer.apply(a_ , a_ , a_ ) __A = tensor * mask print(F'''Pruned layer {name}''' ) elif pruning_method == "l0": if "mask_scores" in name: continue __A = name[:-6] __A = model[F'''{prefix_}mask_scores'''] __A , __A = -0.1, 1.1 __A = torch.sigmoid(a_ ) __A = s * (r - l) + l __A = s_bar.clamp(min=0.0 , max=1.0 ) __A = tensor * mask print(F'''Pruned layer {name}''' ) else: raise ValueError("Unknown pruning method" ) if target_model_path is None: __A = os.path.join( os.path.dirname(a_ ) , F'''bertarized_{os.path.basename(a_ )}''' ) if not os.path.isdir(a_ ): shutil.copytree(a_ , a_ ) print(F'''\nCreated folder {target_model_path}''' ) torch.save(a_ , os.path.join(a_ , "pytorch_model.bin" ) ) print("\nPruned model saved! See you later!" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE :Tuple = argparse.ArgumentParser() parser.add_argument( '--pruning_method', choices=['l0', 'magnitude', 'topK', 'sigmoied_threshold'], type=str, required=True, help=( 'Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,' ' sigmoied_threshold = Soft movement pruning)' ), ) parser.add_argument( '--threshold', type=float, required=False, help=( 'For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.' 'For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.' 'Not needed for `l0`' ), ) parser.add_argument( '--model_name_or_path', type=str, required=True, help='Folder containing the model that was previously fine-pruned', ) parser.add_argument( '--target_model_path', default=None, type=str, required=False, help='Folder containing the model that was previously fine-pruned', ) SCREAMING_SNAKE_CASE :str = parser.parse_args() main(args)
15
0
def __UpperCamelCase ( lowercase__ : Optional[Any] , lowercase__ : Tuple ) -> str: '''simple docstring''' if b == 0: return 1 if (b % 2) == 0: return actual_power(__lowerCAmelCase , int(b / 2 ) ) * actual_power(__lowerCAmelCase , int(b / 2 ) ) else: return a * actual_power(__lowerCAmelCase , int(b / 2 ) ) * actual_power(__lowerCAmelCase , int(b / 2 ) ) def __UpperCamelCase ( lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] ) -> float: '''simple docstring''' if b < 0: return 1 / actual_power(__lowerCAmelCase , __lowerCAmelCase ) return actual_power(__lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": print(power(-2, -3))
361
import warnings from ...utils import logging from .image_processing_glpn import GLPNImageProcessor __UpperCAmelCase = logging.get_logger(__name__) class __a ( __UpperCamelCase ): def __init__( self : Union[str, Any] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Dict ): warnings.warn( """The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use GLPNImageProcessor instead.""" , UpperCAmelCase , ) super().__init__(*UpperCAmelCase , **UpperCAmelCase )
28
0