code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
from ....configuration_utils import PretrainedConfig from ....utils import logging __a :Any = logging.get_logger(__name__) __a :Tuple = { 'CarlCochet/trajectory-transformer-halfcheetah-medium-v2': ( 'https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json' ), # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer } class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : List[Any] = 'trajectory_transformer' _lowerCamelCase : List[str] = ['past_key_values'] _lowerCamelCase : List[str] = { 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self : List[str] , UpperCAmelCase : Dict=100 , UpperCAmelCase : Optional[Any]=5 , UpperCAmelCase : Optional[int]=1 , UpperCAmelCase : Optional[Any]=1 , UpperCAmelCase : Union[str, Any]=249 , UpperCAmelCase : Dict=6 , UpperCAmelCase : Optional[Any]=17 , UpperCAmelCase : List[str]=25 , UpperCAmelCase : Union[str, Any]=4 , UpperCAmelCase : Optional[Any]=4 , UpperCAmelCase : Tuple=128 , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : List[str]=0.0_006 , UpperCAmelCase : int=512 , UpperCAmelCase : Any=0.02 , UpperCAmelCase : int=1E-12 , UpperCAmelCase : Tuple=1 , UpperCAmelCase : List[str]=True , UpperCAmelCase : Optional[Any]=1 , UpperCAmelCase : Optional[Any]=50256 , UpperCAmelCase : Any=50256 , **UpperCAmelCase : str , ): A_ = vocab_size A_ = action_weight A_ = reward_weight A_ = value_weight A_ = max_position_embeddings A_ = block_size A_ = action_dim A_ = observation_dim A_ = transition_dim A_ = learning_rate A_ = n_layer A_ = n_head A_ = n_embd A_ = embd_pdrop A_ = attn_pdrop A_ = resid_pdrop A_ = initializer_range A_ = layer_norm_eps A_ = kaiming_initializer_range A_ = use_cache super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
312
def __snake_case ( __UpperCamelCase : bytes ): """simple docstring""" return "".join([hex(__UpperCamelCase )[2:].zfill(2 ).upper() for byte in list(__UpperCamelCase )] ) def __snake_case ( __UpperCamelCase : str ): """simple docstring""" if (len(__UpperCamelCase ) % 2) != 0: raise ValueError( "Base16 encoded data is invalid:\nData does not have an even number of hex digits." ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(__UpperCamelCase ) <= set("0123456789ABCDEF" ): raise ValueError( "Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] ,16 ) for i in range(0 ,len(__UpperCamelCase ) ,2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
312
1
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401 from coval.conll import reader, util from coval.eval import evaluator import datasets __a :str = datasets.logging.get_logger(__name__) __a :Optional[Any] = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n' __a :Optional[int] = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n' __a :Optional[Any] = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n' def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Any=False ,__UpperCamelCase : List[Any]=False ,__UpperCamelCase : Dict=True ,__UpperCamelCase : Union[str, Any]=False ,__UpperCamelCase : int="dummy_doc" ): """simple docstring""" A_ = {doc: key_lines} A_ = {doc: sys_lines} A_ = {} A_ = 0 A_ = 0 A_ = 0 A_ = 0 A_ = 0 A_ = 0 A_ , A_ = reader.get_doc_mentions(__UpperCamelCase ,key_doc_lines[doc] ,__UpperCamelCase ) key_singletons_num += singletons_num if NP_only or min_span: A_ = reader.set_annotated_parse_trees(__UpperCamelCase ,key_doc_lines[doc] ,__UpperCamelCase ,__UpperCamelCase ) A_ , A_ = reader.get_doc_mentions(__UpperCamelCase ,sys_doc_lines[doc] ,__UpperCamelCase ) sys_singletons_num += singletons_num if NP_only or min_span: A_ = reader.set_annotated_parse_trees(__UpperCamelCase ,key_doc_lines[doc] ,__UpperCamelCase ,__UpperCamelCase ) if remove_nested: A_ , A_ = reader.remove_nested_coref_mentions(__UpperCamelCase ,__UpperCamelCase ) key_nested_coref_num += nested_mentions key_removed_nested_clusters += removed_clusters A_ , A_ = reader.remove_nested_coref_mentions(__UpperCamelCase ,__UpperCamelCase ) sys_nested_coref_num += nested_mentions sys_removed_nested_clusters += removed_clusters A_ = reader.get_mention_assignments(__UpperCamelCase ,__UpperCamelCase ) A_ = reader.get_mention_assignments(__UpperCamelCase ,__UpperCamelCase ) A_ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster) if remove_nested: logger.info( "Number of removed nested coreferring mentions in the key " f'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''' ) logger.info( "Number of resulting singleton clusters in the key " f'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''' ) if not keep_singletons: logger.info( f'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system ''' "files, respectively" ) return doc_coref_infos def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : List[str] ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : int ,__UpperCamelCase : Tuple ,__UpperCamelCase : Tuple ): """simple docstring""" A_ = get_coref_infos(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) A_ = {} A_ = 0 A_ = 0 for name, metric in metrics: A_ , A_ , A_ = evaluator.evaluate_documents(__UpperCamelCase ,__UpperCamelCase ,beta=1 ) if name in ["muc", "bcub", "ceafe"]: conll += fa conll_subparts_num += 1 output_scores.update({f'''{name}/recall''': recall, f'''{name}/precision''': precision, f'''{name}/f1''': fa} ) logger.info( name.ljust(10 ) ,f'''Recall: {recall * 100:.2f}''' ,f''' Precision: {precision * 100:.2f}''' ,f''' F1: {fa * 100:.2f}''' ,) if conll_subparts_num == 3: A_ = (conll / 3) * 100 logger.info(f'''CoNLL score: {conll:.2f}''' ) output_scores.update({"conll_score": conll} ) return output_scores def __snake_case ( __UpperCamelCase : Dict ): """simple docstring""" A_ = False for line in key_lines: if not line.startswith("#" ): if len(line.split() ) > 6: A_ = line.split()[5] if not parse_col == "-": A_ = True break else: break return has_gold_parse @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _a ( datasets.Metric ): """simple docstring""" def __A ( self : Tuple ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string" ) ), "references": datasets.Sequence(datasets.Value("string" ) ), } ) , codebase_urls=["https://github.com/ns-moosavi/coval"] , reference_urls=[ "https://github.com/ns-moosavi/coval", "https://www.aclweb.org/anthology/P16-1060", "http://www.conll.cemantix.org/2012/data.html", ] , ) def __A ( self : str , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : Dict=True , UpperCAmelCase : Optional[Any]=False , UpperCAmelCase : Union[str, Any]=False , UpperCAmelCase : str=False ): A_ = [ ("mentions", evaluator.mentions), ("muc", evaluator.muc), ("bcub", evaluator.b_cubed), ("ceafe", evaluator.ceafe), ("lea", evaluator.lea), ] if min_span: A_ = util.check_gold_parse_annotation(UpperCAmelCase ) if not has_gold_parse: raise NotImplementedError("References should have gold parse annotation to use 'min_span'." ) # util.parse_key_file(key_file) # key_file = key_file + ".parsed" A_ = evaluate( key_lines=UpperCAmelCase , sys_lines=UpperCAmelCase , metrics=UpperCAmelCase , NP_only=UpperCAmelCase , remove_nested=UpperCAmelCase , keep_singletons=UpperCAmelCase , min_span=UpperCAmelCase , ) return score
312
import cva import numpy as np class _a : """simple docstring""" def __init__( self : Any , UpperCAmelCase : float , UpperCAmelCase : int ): if k in (0.04, 0.06): A_ = k A_ = window_size else: raise ValueError("invalid k value" ) def __str__( self : Optional[Any] ): return str(self.k ) def __A ( self : int , UpperCAmelCase : str ): A_ = cva.imread(UpperCAmelCase , 0 ) A_ , A_ = img.shape A_ = [] A_ = img.copy() A_ = cva.cvtColor(UpperCAmelCase , cva.COLOR_GRAY2RGB ) A_ , A_ = np.gradient(UpperCAmelCase ) A_ = dx**2 A_ = dy**2 A_ = dx * dy A_ = 0.04 A_ = self.window_size // 2 for y in range(UpperCAmelCase , h - offset ): for x in range(UpperCAmelCase , w - offset ): A_ = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() A_ = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() A_ = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() A_ = (wxx * wyy) - (wxy**2) A_ = wxx + wyy A_ = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) , 0 ) color_img.itemset((y, x, 1) , 0 ) color_img.itemset((y, x, 2) , 255 ) return color_img, corner_list if __name__ == "__main__": __a :List[str] = HarrisCorner(0.04, 3) __a , __a :str = edge_detect.detect('path_to_image') cva.imwrite('detect.png', color_img)
312
1
import mpmath # for roots of unity import numpy as np class _a : """simple docstring""" def __init__( self : Optional[Any] , UpperCAmelCase : Any=None , UpperCAmelCase : Dict=None ): # Input as list A_ = list(poly_a or [0] )[:] A_ = list(poly_b or [0] )[:] # Remove leading zero coefficients while self.polyA[-1] == 0: self.polyA.pop() A_ = len(self.polyA ) while self.polyB[-1] == 0: self.polyB.pop() A_ = len(self.polyB ) # Add 0 to make lengths equal a power of 2 A_ = int( 2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) ) while len(self.polyA ) < self.c_max_length: self.polyA.append(0 ) while len(self.polyB ) < self.c_max_length: self.polyB.append(0 ) # A complex root used for the fourier transform A_ = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) ) # The product A_ = self.__multiply() def __A ( self : Union[str, Any] , UpperCAmelCase : Optional[Any] ): A_ = [[x] for x in self.polyA] if which == "A" else [[x] for x in self.polyB] # Corner case if len(UpperCAmelCase ) <= 1: return dft[0] # A_ = self.c_max_length // 2 while next_ncol > 0: A_ = [[] for i in range(UpperCAmelCase )] A_ = self.root**next_ncol # First half of next step A_ = 1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(UpperCAmelCase ): new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] ) current_root *= root # Second half of next step A_ = 1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(UpperCAmelCase ): new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] ) current_root *= root # Update A_ = new_dft A_ = next_ncol // 2 return dft[0] def __A ( self : Dict ): A_ = self.__dft("A" ) A_ = self.__dft("B" ) A_ = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]] del dft_a del dft_b # Corner Case if len(inverce_c[0] ) <= 1: return inverce_c[0] # Inverse DFT A_ = 2 while next_ncol <= self.c_max_length: A_ = [[] for i in range(UpperCAmelCase )] A_ = self.root ** (next_ncol // 2) A_ = 1 # First half of next step for j in range(self.c_max_length // next_ncol ): for i in range(next_ncol // 2 ): # Even positions new_inverse_c[i].append( ( inverce_c[i][j] + inverce_c[i][j + self.c_max_length // next_ncol] ) / 2 ) # Odd positions new_inverse_c[i + next_ncol // 2].append( ( inverce_c[i][j] - inverce_c[i][j + self.c_max_length // next_ncol] ) / (2 * current_root) ) current_root *= root # Update A_ = new_inverse_c next_ncol *= 2 # Unpack A_ = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c] # Remove leading 0's while inverce_c[-1] == 0: inverce_c.pop() return inverce_c def __str__( self : Union[str, Any] ): A_ = "A = " + " + ".join( f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) ) A_ = "B = " + " + ".join( f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) ) A_ = "A*B = " + " + ".join( f'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) ) return f'''{a}\n{b}\n{c}''' # Unit tests if __name__ == "__main__": import doctest doctest.testmod()
312
def __snake_case ( __UpperCamelCase : int = 1000 ): """simple docstring""" return sum(2 * a * ((a - 1) // 2) for a in range(3 ,n + 1 ) ) if __name__ == "__main__": print(solution())
312
1
from typing import Optional, Union import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models.modeling_utils import ModelMixin class _a ( snake_case_ , snake_case_ ): """simple docstring""" @register_to_config def __init__( self : Dict , UpperCAmelCase : int = 768 , ): super().__init__() A_ = nn.Parameter(torch.zeros(1 , UpperCAmelCase ) ) A_ = nn.Parameter(torch.ones(1 , UpperCAmelCase ) ) def __A ( self : str , UpperCAmelCase : Optional[Union[str, torch.device]] = None , UpperCAmelCase : Optional[torch.dtype] = None , ): A_ = nn.Parameter(self.mean.to(UpperCAmelCase ).to(UpperCAmelCase ) ) A_ = nn.Parameter(self.std.to(UpperCAmelCase ).to(UpperCAmelCase ) ) return self def __A ( self : Dict , UpperCAmelCase : List[Any] ): A_ = (embeds - self.mean) * 1.0 / self.std return embeds def __A ( self : int , UpperCAmelCase : int ): A_ = (embeds * self.std) + self.mean return embeds
312
import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : Union[str, Any] = ['image_processor', 'tokenizer'] _lowerCamelCase : Tuple = 'OwlViTImageProcessor' _lowerCamelCase : List[Any] = ('CLIPTokenizer', 'CLIPTokenizerFast') def __init__( self : Optional[Any] , UpperCAmelCase : int=None , UpperCAmelCase : Union[str, Any]=None , **UpperCAmelCase : Any ): A_ = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , UpperCAmelCase , ) A_ = kwargs.pop("feature_extractor" ) A_ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(UpperCAmelCase , UpperCAmelCase ) def __call__( self : Optional[int] , UpperCAmelCase : List[str]=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Dict="max_length" , UpperCAmelCase : Optional[Any]="np" , **UpperCAmelCase : Optional[int] ): if text is None and query_images is None and images is None: raise ValueError( "You have to specify at least one text or query image or image. All three cannot be none." ) if text is not None: if isinstance(UpperCAmelCase , UpperCAmelCase ) or (isinstance(UpperCAmelCase , UpperCAmelCase ) and not isinstance(text[0] , UpperCAmelCase )): A_ = [self.tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )] elif isinstance(UpperCAmelCase , UpperCAmelCase ) and isinstance(text[0] , UpperCAmelCase ): A_ = [] # Maximum number of queries across batch A_ = max([len(UpperCAmelCase ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(UpperCAmelCase ) != max_num_queries: A_ = t + [" "] * (max_num_queries - len(UpperCAmelCase )) A_ = self.tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase ) encodings.append(UpperCAmelCase ) else: raise TypeError("Input text should be a string, a list of strings or a nested list of strings" ) if return_tensors == "np": A_ = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) A_ = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp A_ = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) A_ = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch A_ = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 ) A_ = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf A_ = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 ) A_ = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 ) else: raise ValueError("Target return tensor type could not be returned" ) A_ = BatchEncoding() A_ = input_ids A_ = attention_mask if query_images is not None: A_ = BatchEncoding() A_ = self.image_processor( UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase ).pixel_values A_ = query_pixel_values if images is not None: A_ = self.image_processor(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase ) if text is not None and images is not None: A_ = image_features.pixel_values return encoding elif query_images is not None and images is not None: A_ = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**UpperCAmelCase ) , tensor_type=UpperCAmelCase ) def __A ( self : Optional[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : List[Any] ): return self.image_processor.post_process(*UpperCAmelCase , **UpperCAmelCase ) def __A ( self : str , *UpperCAmelCase : str , **UpperCAmelCase : Union[str, Any] ): return self.image_processor.post_process_object_detection(*UpperCAmelCase , **UpperCAmelCase ) def __A ( self : List[Any] , *UpperCAmelCase : int , **UpperCAmelCase : int ): return self.image_processor.post_process_image_guided_detection(*UpperCAmelCase , **UpperCAmelCase ) def __A ( self : List[Any] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Any ): return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase ) def __A ( self : Tuple , *UpperCAmelCase : Dict , **UpperCAmelCase : str ): return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase ) @property def __A ( self : Union[str, Any] ): warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCAmelCase , ) return self.image_processor_class @property def __A ( self : Optional[Any] ): warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCAmelCase , ) return self.image_processor
312
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __a :List[str] = logging.get_logger(__name__) __a :int = { 'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json', # See all GLPN models at https://huggingface.co/models?filter=glpn } class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : Tuple = 'glpn' def __init__( self : Union[str, Any] , UpperCAmelCase : int=3 , UpperCAmelCase : str=4 , UpperCAmelCase : List[str]=[2, 2, 2, 2] , UpperCAmelCase : Optional[int]=[8, 4, 2, 1] , UpperCAmelCase : str=[32, 64, 160, 256] , UpperCAmelCase : Tuple=[7, 3, 3, 3] , UpperCAmelCase : Union[str, Any]=[4, 2, 2, 2] , UpperCAmelCase : Dict=[1, 2, 5, 8] , UpperCAmelCase : int=[4, 4, 4, 4] , UpperCAmelCase : Tuple="gelu" , UpperCAmelCase : List[Any]=0.0 , UpperCAmelCase : Union[str, Any]=0.0 , UpperCAmelCase : List[str]=0.02 , UpperCAmelCase : Any=0.1 , UpperCAmelCase : Tuple=1E-6 , UpperCAmelCase : Tuple=64 , UpperCAmelCase : Optional[int]=10 , UpperCAmelCase : List[str]=-1 , **UpperCAmelCase : Tuple , ): super().__init__(**UpperCAmelCase ) A_ = num_channels A_ = num_encoder_blocks A_ = depths A_ = sr_ratios A_ = hidden_sizes A_ = patch_sizes A_ = strides A_ = mlp_ratios A_ = num_attention_heads A_ = hidden_act A_ = hidden_dropout_prob A_ = attention_probs_dropout_prob A_ = initializer_range A_ = drop_path_rate A_ = layer_norm_eps A_ = decoder_hidden_size A_ = max_depth A_ = head_in_index
312
from typing import Optional, Union import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models.modeling_utils import ModelMixin class _a ( snake_case_ , snake_case_ ): """simple docstring""" @register_to_config def __init__( self : Dict , UpperCAmelCase : int = 768 , ): super().__init__() A_ = nn.Parameter(torch.zeros(1 , UpperCAmelCase ) ) A_ = nn.Parameter(torch.ones(1 , UpperCAmelCase ) ) def __A ( self : str , UpperCAmelCase : Optional[Union[str, torch.device]] = None , UpperCAmelCase : Optional[torch.dtype] = None , ): A_ = nn.Parameter(self.mean.to(UpperCAmelCase ).to(UpperCAmelCase ) ) A_ = nn.Parameter(self.std.to(UpperCAmelCase ).to(UpperCAmelCase ) ) return self def __A ( self : Dict , UpperCAmelCase : List[Any] ): A_ = (embeds - self.mean) * 1.0 / self.std return embeds def __A ( self : int , UpperCAmelCase : int ): A_ = (embeds * self.std) + self.mean return embeds
312
1
import copy import fnmatch import json import os import pickle as pkl import shutil import sys import tarfile import tempfile from collections import OrderedDict from contextlib import contextmanager from functools import partial from hashlib import shaaaa from io import BytesIO from pathlib import Path from urllib.parse import urlparse from zipfile import ZipFile, is_zipfile import cva import numpy as np import requests import wget from filelock import FileLock from PIL import Image from tqdm.auto import tqdm from yaml import Loader, dump, load try: import torch __a :int = True except ImportError: __a :Optional[Any] = False try: from torch.hub import _get_torch_home __a :Optional[Any] = _get_torch_home() except ImportError: __a :Tuple = os.path.expanduser( os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch')) ) __a :Optional[Any] = os.path.join(torch_cache_home, 'transformers') __a :int = 'https://cdn.huggingface.co' __a :Any = 'https://s3.amazonaws.com/models.huggingface.co/bert' __a :Optional[Any] = '/'.join(str(Path(__file__).resolve()).split('/')[:-1]) __a :str = os.path.join(PATH, 'config.yaml') __a :str = os.path.join(PATH, 'attributes.txt') __a :Optional[Any] = os.path.join(PATH, 'objects.txt') __a :Optional[int] = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path) __a :Dict = os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE) __a :List[Any] = os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE) __a :List[str] = 'pytorch_model.bin' __a :Tuple = 'config.yaml' def __snake_case ( __UpperCamelCase : Optional[Any]=OBJECTS ,__UpperCamelCase : List[str]=ATTRIBUTES ): """simple docstring""" A_ = [] with open(__UpperCamelCase ) as f: for object in f.readlines(): vg_classes.append(object.split("," )[0].lower().strip() ) A_ = [] with open(__UpperCamelCase ) as f: for object in f.readlines(): vg_attrs.append(object.split("," )[0].lower().strip() ) return vg_classes, vg_attrs def __snake_case ( __UpperCamelCase : List[Any] ): """simple docstring""" A_ = OrderedDict() with open(__UpperCamelCase ,"rb" ) as f: A_ = pkl.load(__UpperCamelCase )["model"] for k in copy.deepcopy(list(ckp.keys() ) ): A_ = ckp.pop(__UpperCamelCase ) if isinstance(__UpperCamelCase ,np.ndarray ): A_ = torch.tensor(__UpperCamelCase ) else: assert isinstance(__UpperCamelCase ,torch.tensor ), type(__UpperCamelCase ) A_ = v return r class _a : """simple docstring""" _lowerCamelCase : Union[str, Any] = {} def __init__( self : str , UpperCAmelCase : dict , UpperCAmelCase : str = "root" , UpperCAmelCase : List[str]=0 ): A_ = name A_ = level A_ = {} for k, v in dictionary.items(): if v is None: raise ValueError() A_ = copy.deepcopy(UpperCAmelCase ) A_ = copy.deepcopy(UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ): A_ = Config(UpperCAmelCase , name=UpperCAmelCase , level=level + 1 ) A_ = v setattr(self , UpperCAmelCase , UpperCAmelCase ) A_ = d def __repr__( self : Optional[Any] ): return str(list((self._pointer.keys()) ) ) def __setattr__( self : Any , UpperCAmelCase : Any , UpperCAmelCase : Any ): A_ = val A_ = val A_ = key.split("." ) A_ = len(UpperCAmelCase ) - 1 A_ = self._pointer if len(UpperCAmelCase ) > 1: for i, l in enumerate(UpperCAmelCase ): if hasattr(self , UpperCAmelCase ) and isinstance(getattr(self , UpperCAmelCase ) , UpperCAmelCase ): setattr(getattr(self , UpperCAmelCase ) , ".".join(levels[i:] ) , UpperCAmelCase ) if l == last_level: A_ = val else: A_ = pointer[l] def __A ( self : List[str] ): return self._pointer def __A ( self : int , UpperCAmelCase : Tuple , UpperCAmelCase : int ): with open(f'''{file_name}''' , "w" ) as stream: dump(UpperCAmelCase , UpperCAmelCase ) def __A ( self : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Tuple ): with open(f'''{file_name}''' , "w" ) as stream: json.dump(UpperCAmelCase , UpperCAmelCase ) @staticmethod def __A ( UpperCAmelCase : Optional[int] ): with open(UpperCAmelCase ) as stream: A_ = load(UpperCAmelCase , Loader=UpperCAmelCase ) return data def __str__( self : str ): A_ = " " if self._name != "root": A_ = f'''{t * (self._level-1)}{self._name}:\n''' else: A_ = "" A_ = self._level for i, (k, v) in enumerate(self._pointer.items() ): if isinstance(UpperCAmelCase , UpperCAmelCase ): r += f'''{t * (self._level)}{v}\n''' self._level += 1 else: r += f'''{t * (self._level)}{k}: {v} ({type(UpperCAmelCase ).__name__})\n''' A_ = level return r[:-1] @classmethod def __A ( cls : Optional[Any] , UpperCAmelCase : str , **UpperCAmelCase : str ): A_ , A_ = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase ) return cls(UpperCAmelCase ) @classmethod def __A ( cls : int , UpperCAmelCase : str , **UpperCAmelCase : int ): A_ = kwargs.pop("cache_dir" , UpperCAmelCase ) A_ = kwargs.pop("force_download" , UpperCAmelCase ) A_ = kwargs.pop("resume_download" , UpperCAmelCase ) A_ = kwargs.pop("proxies" , UpperCAmelCase ) A_ = kwargs.pop("local_files_only" , UpperCAmelCase ) if os.path.isdir(UpperCAmelCase ): A_ = os.path.join(UpperCAmelCase , UpperCAmelCase ) elif os.path.isfile(UpperCAmelCase ) or is_remote_url(UpperCAmelCase ): A_ = pretrained_model_name_or_path else: A_ = hf_bucket_url(UpperCAmelCase , filename=UpperCAmelCase , use_cdn=UpperCAmelCase ) try: # Load from URL or cache if already cached A_ = cached_path( UpperCAmelCase , cache_dir=UpperCAmelCase , force_download=UpperCAmelCase , proxies=UpperCAmelCase , resume_download=UpperCAmelCase , local_files_only=UpperCAmelCase , ) # Load config dict if resolved_config_file is None: raise EnvironmentError A_ = Config.load_yaml(UpperCAmelCase ) except EnvironmentError: A_ = "Can't load config for" raise EnvironmentError(UpperCAmelCase ) if resolved_config_file == config_file: print("loading configuration file from path" ) else: print("loading configuration file cache" ) return Config.load_yaml(UpperCAmelCase ), kwargs def __snake_case ( __UpperCamelCase : Union[str, Any] ): """simple docstring""" A_ = torch.load("dump.pt" ,map_location=in_tensor.device ) A_ = in_tensor.numpy() A_ = out_tensor.numpy()[0] print(na.shape ,na[0, 0, :5] ) print(na.shape ,na[0, 0, :5] ) assert np.allclose(__UpperCamelCase ,__UpperCamelCase ,rtol=0.01 ,atol=0.1 ), ( f'''{sum([1 for x in np.isclose(__UpperCamelCase ,__UpperCamelCase ,rtol=0.01 ,atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %''' " element-wise mismatch" ) raise Exception("tensors are all good" ) # Hugging face functions below def __snake_case ( __UpperCamelCase : Optional[int] ): """simple docstring""" A_ = urlparse(__UpperCamelCase ) return parsed.scheme in ("http", "https") def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ,__UpperCamelCase : str=True ): """simple docstring""" A_ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX A_ = "/" not in model_id if legacy_format: return f'''{endpoint}/{model_id}-{filename}''' else: return f'''{endpoint}/{model_id}/{filename}''' def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str]=None ,__UpperCamelCase : int=0 ,__UpperCamelCase : int=None ,): """simple docstring""" A_ = "python/{}".format(sys.version.split()[0] ) if _torch_available: ua += "; torch/{}".format(torch.__version__ ) if isinstance(__UpperCamelCase ,__UpperCamelCase ): ua += "; " + "; ".join("{}/{}".format(__UpperCamelCase ,__UpperCamelCase ) for k, v in user_agent.items() ) elif isinstance(__UpperCamelCase ,__UpperCamelCase ): ua += "; " + user_agent A_ = {"user-agent": ua} if resume_size > 0: A_ = "bytes=%d-" % (resume_size,) A_ = requests.get(__UpperCamelCase ,stream=__UpperCamelCase ,proxies=__UpperCamelCase ,headers=__UpperCamelCase ) if response.status_code == 416: # Range not satisfiable return A_ = response.headers.get("Content-Length" ) A_ = resume_size + int(__UpperCamelCase ) if content_length is not None else None A_ = tqdm( unit="B" ,unit_scale=__UpperCamelCase ,total=__UpperCamelCase ,initial=__UpperCamelCase ,desc="Downloading" ,) for chunk in response.iter_content(chunk_size=1024 ): if chunk: # filter out keep-alive new chunks progress.update(len(__UpperCamelCase ) ) temp_file.write(__UpperCamelCase ) progress.close() def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Any=None ,__UpperCamelCase : Dict=False ,__UpperCamelCase : Union[str, Any]=None ,__UpperCamelCase : Any=10 ,__UpperCamelCase : int=False ,__UpperCamelCase : Optional[Any]=None ,__UpperCamelCase : str=False ,): """simple docstring""" if cache_dir is None: A_ = TRANSFORMERS_CACHE if isinstance(__UpperCamelCase ,__UpperCamelCase ): A_ = str(__UpperCamelCase ) os.makedirs(__UpperCamelCase ,exist_ok=__UpperCamelCase ) A_ = None if not local_files_only: try: A_ = requests.head(__UpperCamelCase ,allow_redirects=__UpperCamelCase ,proxies=__UpperCamelCase ,timeout=__UpperCamelCase ) if response.status_code == 200: A_ = response.headers.get("ETag" ) except (EnvironmentError, requests.exceptions.Timeout): # etag is already None pass A_ = url_to_filename(__UpperCamelCase ,__UpperCamelCase ) # get cache path to put the file A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase ) # etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible. # try to get the last downloaded one if etag is None: if os.path.exists(__UpperCamelCase ): return cache_path else: A_ = [ file for file in fnmatch.filter(os.listdir(__UpperCamelCase ) ,filename + ".*" ) if not file.endswith(".json" ) and not file.endswith(".lock" ) ] if len(__UpperCamelCase ) > 0: return os.path.join(__UpperCamelCase ,matching_files[-1] ) else: # If files cannot be found and local_files_only=True, # the models might've been found if local_files_only=False # Notify the user about that if local_files_only: raise ValueError( "Cannot find the requested files in the cached path and outgoing traffic has been" " disabled. To enable model look-ups and downloads online, set 'local_files_only'" " to False." ) return None # From now on, etag is not None. if os.path.exists(__UpperCamelCase ) and not force_download: return cache_path # Prevent parallel downloads of the same file with a lock. A_ = cache_path + ".lock" with FileLock(__UpperCamelCase ): # If the download just completed while the lock was activated. if os.path.exists(__UpperCamelCase ) and not force_download: # Even if returning early like here, the lock will be released. return cache_path if resume_download: A_ = cache_path + ".incomplete" @contextmanager def _resumable_file_manager(): with open(__UpperCamelCase ,"a+b" ) as f: yield f A_ = _resumable_file_manager if os.path.exists(__UpperCamelCase ): A_ = os.stat(__UpperCamelCase ).st_size else: A_ = 0 else: A_ = partial(tempfile.NamedTemporaryFile ,dir=__UpperCamelCase ,delete=__UpperCamelCase ) A_ = 0 # Download to temporary file, then copy to cache dir once finished. # Otherwise you get corrupt cache entries if the download gets interrupted. with temp_file_manager() as temp_file: print( "%s not found in cache or force_download set to True, downloading to %s" ,__UpperCamelCase ,temp_file.name ,) http_get( __UpperCamelCase ,__UpperCamelCase ,proxies=__UpperCamelCase ,resume_size=__UpperCamelCase ,user_agent=__UpperCamelCase ,) os.replace(temp_file.name ,__UpperCamelCase ) A_ = {"url": url, "etag": etag} A_ = cache_path + ".json" with open(__UpperCamelCase ,"w" ) as meta_file: json.dump(__UpperCamelCase ,__UpperCamelCase ) return cache_path def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : str=None ): """simple docstring""" A_ = url.encode("utf-8" ) A_ = shaaaa(__UpperCamelCase ) A_ = url_hash.hexdigest() if etag: A_ = etag.encode("utf-8" ) A_ = shaaaa(__UpperCamelCase ) filename += "." + etag_hash.hexdigest() if url.endswith(".h5" ): filename += ".h5" return filename def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Union[str, Any]=None ,__UpperCamelCase : List[Any]=False ,__UpperCamelCase : List[str]=None ,__UpperCamelCase : Any=False ,__UpperCamelCase : Optional[int]=None ,__UpperCamelCase : Optional[Any]=False ,__UpperCamelCase : Dict=False ,__UpperCamelCase : Optional[Any]=False ,): """simple docstring""" if cache_dir is None: A_ = TRANSFORMERS_CACHE if isinstance(__UpperCamelCase ,__UpperCamelCase ): A_ = str(__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ): A_ = str(__UpperCamelCase ) if is_remote_url(__UpperCamelCase ): # URL, so get it from the cache (downloading if necessary) A_ = get_from_cache( __UpperCamelCase ,cache_dir=__UpperCamelCase ,force_download=__UpperCamelCase ,proxies=__UpperCamelCase ,resume_download=__UpperCamelCase ,user_agent=__UpperCamelCase ,local_files_only=__UpperCamelCase ,) elif os.path.exists(__UpperCamelCase ): # File, and it exists. A_ = url_or_filename elif urlparse(__UpperCamelCase ).scheme == "": # File, but it doesn't exist. raise EnvironmentError("file {} not found".format(__UpperCamelCase ) ) else: # Something unknown raise ValueError("unable to parse {} as a URL or as a local path".format(__UpperCamelCase ) ) if extract_compressed_file: if not is_zipfile(__UpperCamelCase ) and not tarfile.is_tarfile(__UpperCamelCase ): return output_path # Path where we extract compressed archives # We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/" A_ , A_ = os.path.split(__UpperCamelCase ) A_ = output_file.replace("." ,"-" ) + "-extracted" A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase ) if os.path.isdir(__UpperCamelCase ) and os.listdir(__UpperCamelCase ) and not force_extract: return output_path_extracted # Prevent parallel extractions A_ = output_path + ".lock" with FileLock(__UpperCamelCase ): shutil.rmtree(__UpperCamelCase ,ignore_errors=__UpperCamelCase ) os.makedirs(__UpperCamelCase ) if is_zipfile(__UpperCamelCase ): with ZipFile(__UpperCamelCase ,"r" ) as zip_file: zip_file.extractall(__UpperCamelCase ) zip_file.close() elif tarfile.is_tarfile(__UpperCamelCase ): A_ = tarfile.open(__UpperCamelCase ) tar_file.extractall(__UpperCamelCase ) tar_file.close() else: raise EnvironmentError("Archive format of {} could not be identified".format(__UpperCamelCase ) ) return output_path_extracted return output_path def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Any="," ): """simple docstring""" assert isinstance(__UpperCamelCase ,__UpperCamelCase ) if os.path.isfile(__UpperCamelCase ): with open(__UpperCamelCase ) as f: A_ = eval(f.read() ) else: A_ = requests.get(__UpperCamelCase ) try: A_ = requests.json() except Exception: A_ = req.content.decode() assert data is not None, "could not connect" try: A_ = eval(__UpperCamelCase ) except Exception: A_ = data.split("\n" ) req.close() return data def __snake_case ( __UpperCamelCase : int ): """simple docstring""" A_ = requests.get(__UpperCamelCase ) A_ = np.array(Image.open(BytesIO(response.content ) ) ) return img def __snake_case ( __UpperCamelCase : Tuple ): """simple docstring""" A_ = url.split("/" )[-1] if fn not in os.listdir(os.getcwd() ): wget.download(__UpperCamelCase ) with open(__UpperCamelCase ,"rb" ) as stream: A_ = pkl.load(__UpperCamelCase ) A_ = weights.pop("model" ) A_ = {} for k, v in model.items(): A_ = torch.from_numpy(__UpperCamelCase ) if "running_var" in k: A_ = torch.tensor([0] ) A_ = k.replace("running_var" ,"num_batches_tracked" ) A_ = zero return new def __snake_case ( ): """simple docstring""" print(f'''{os.path.abspath(os.path.join(__UpperCamelCase ,os.pardir ) )}/demo.ipynb''' ) def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Optional[int]="RGB" ): """simple docstring""" assert isinstance(__UpperCamelCase ,__UpperCamelCase ) if os.path.isfile(__UpperCamelCase ): A_ = cva.imread(__UpperCamelCase ) else: A_ = get_image_from_url(__UpperCamelCase ) assert img is not None, f'''could not connect to: {im}''' A_ = cva.cvtColor(__UpperCamelCase ,cva.COLOR_BGR2RGB ) if input_format == "RGB": A_ = img[:, :, ::-1] return img def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : List[str]=1 ): """simple docstring""" return (images[i : i + batch] for i in range(0 ,len(__UpperCamelCase ) ,__UpperCamelCase ))
312
from __future__ import annotations import numpy as np from numpy import floataa from numpy.typing import NDArray def __snake_case ( __UpperCamelCase : NDArray[floataa] ,__UpperCamelCase : NDArray[floataa] ,__UpperCamelCase : list[int] ,__UpperCamelCase : int ,): """simple docstring""" A_ , A_ = coefficient_matrix.shape A_ , A_ = constant_matrix.shape if rowsa != colsa: A_ = f'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}''' raise ValueError(__UpperCamelCase ) if colsa != 1: A_ = f'''Constant matrix must be nx1 but received {rowsa}x{colsa}''' raise ValueError(__UpperCamelCase ) if rowsa != rowsa: A_ = ( "Coefficient and constant matrices dimensions must be nxn and nx1 but " f'''received {rowsa}x{colsa} and {rowsa}x{colsa}''' ) raise ValueError(__UpperCamelCase ) if len(__UpperCamelCase ) != rowsa: A_ = ( "Number of initial values must be equal to number of rows in coefficient " f'''matrix but received {len(__UpperCamelCase )} and {rowsa}''' ) raise ValueError(__UpperCamelCase ) if iterations <= 0: raise ValueError("Iterations must be at least 1" ) A_ = np.concatenate( (coefficient_matrix, constant_matrix) ,axis=1 ) A_ , A_ = table.shape strictly_diagonally_dominant(__UpperCamelCase ) # Iterates the whole matrix for given number of times for _ in range(__UpperCamelCase ): A_ = [] for row in range(__UpperCamelCase ): A_ = 0 for col in range(__UpperCamelCase ): if col == row: A_ = table[row][col] elif col == cols - 1: A_ = table[row][col] else: temp += (-1) * table[row][col] * init_val[col] A_ = (temp + val) / denom new_val.append(__UpperCamelCase ) A_ = new_val return [float(__UpperCamelCase ) for i in new_val] def __snake_case ( __UpperCamelCase : NDArray[floataa] ): """simple docstring""" A_ , A_ = table.shape A_ = True for i in range(0 ,__UpperCamelCase ): A_ = 0 for j in range(0 ,cols - 1 ): if i == j: continue else: total += table[i][j] if table[i][i] <= total: raise ValueError("Coefficient matrix is not strictly diagonally dominant" ) return is_diagonally_dominant # Test Cases if __name__ == "__main__": import doctest doctest.testmod()
312
1
from __future__ import annotations __a :List[str] = list[list[int]] # assigning initial values to the grid __a :Matrix = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution __a :Matrix = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def __snake_case ( __UpperCamelCase : Matrix ,__UpperCamelCase : int ,__UpperCamelCase : int ,__UpperCamelCase : int ): """simple docstring""" for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def __snake_case ( __UpperCamelCase : Matrix ): """simple docstring""" for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def __snake_case ( __UpperCamelCase : Matrix ): """simple docstring""" if location := find_empty_location(__UpperCamelCase ): A_ , A_ = location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 ,10 ): if is_safe(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ): A_ = digit if sudoku(__UpperCamelCase ) is not None: return grid A_ = 0 return None def __snake_case ( __UpperCamelCase : Matrix ): """simple docstring""" for row in grid: for cell in row: print(__UpperCamelCase ,end=" " ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print('\nExample grid:\n' + '=' * 20) print_solution(example_grid) print('\nExample grid solution:') __a :Optional[Any] = sudoku(example_grid) if solution is not None: print_solution(solution) else: print('Cannot find a solution.')
312
from unittest import TestCase from datasets import Dataset from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters def __snake_case ( ): """simple docstring""" A_ = { "repo_name": ["test_repo1", "test_repo2", "test_repo3"], "path": ["test_1.py", "test_2.py", "unit_test.py"], "content": ["a " * 20, "a " * 30, "b " * 7], } A_ = Dataset.from_dict(__UpperCamelCase ) return dataset class _a ( snake_case_ ): """simple docstring""" def __A ( self : Union[str, Any] ): A_ = get_dataset() A_ = make_duplicate_clusters(UpperCAmelCase , 0.85 ) self.assertEqual(len(duplicate_clusters[0] ) , 2 ) def __A ( self : List[Any] ): A_ = get_dataset() A_ , A_ = deduplicate_dataset(UpperCAmelCase ) self.assertEqual(len(UpperCAmelCase ) , 2 ) print(UpperCAmelCase ) self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 ) self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , UpperCAmelCase )
312
1
__a :dict[str, float] = { "km/h": 1.0, "m/s": 3.6, "mph": 1.60_9344, "knot": 1.852, } __a :dict[str, float] = { "km/h": 1.0, "m/s": 0.2_7777_7778, "mph": 0.6_2137_1192, "knot": 0.5_3995_6803, } def __snake_case ( __UpperCamelCase : float ,__UpperCamelCase : str ,__UpperCamelCase : str ): """simple docstring""" if unit_to not in speed_chart or unit_from not in speed_chart_inverse: A_ = ( f'''Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n''' f'''Valid values are: {", ".join(__UpperCamelCase )}''' ) raise ValueError(__UpperCamelCase ) return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] ,3 ) if __name__ == "__main__": import doctest doctest.testmod()
312
import os from typing import Dict, List, Tuple, TypeVar, Union __a :Any = TypeVar('T') __a :Union[str, Any] = Union[List[T], Tuple[T, ...]] __a :List[str] = Union[T, List[T], Dict[str, T]] __a :Any = Union[str, bytes, os.PathLike]
312
1
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class _a ( unittest.TestCase ): """simple docstring""" def __A ( self : Optional[int] ): A_ = tempfile.mkdtemp() # fmt: off A_ = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: on A_ = dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) ) A_ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""] A_ = {"unk_token": "<unk>"} A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(UpperCAmelCase ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(UpperCAmelCase ) ) A_ = { "do_resize": True, "size": 20, "do_center_crop": True, "crop_size": 18, "do_normalize": True, "image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073], "image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711], } A_ = os.path.join(self.tmpdirname , UpperCAmelCase ) with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp: json.dump(UpperCAmelCase , UpperCAmelCase ) def __A ( self : int , **UpperCAmelCase : str ): return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase ) def __A ( self : Optional[Any] , **UpperCAmelCase : Optional[int] ): return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase ) def __A ( self : Optional[Any] , **UpperCAmelCase : Tuple ): return CLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase ) def __A ( self : Any ): shutil.rmtree(self.tmpdirname ) def __A ( self : Tuple ): A_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] A_ = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def __A ( self : str ): A_ = self.get_tokenizer() A_ = self.get_rust_tokenizer() A_ = self.get_image_processor() A_ = CLIPProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) processor_slow.save_pretrained(self.tmpdirname ) A_ = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase ) A_ = CLIPProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) processor_fast.save_pretrained(self.tmpdirname ) A_ = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase ) self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase ) self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase ) def __A ( self : int ): A_ = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) A_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) A_ = self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 ) A_ = CLIPProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=UpperCAmelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , UpperCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCAmelCase ) def __A ( self : Tuple ): A_ = self.get_image_processor() A_ = self.get_tokenizer() A_ = CLIPProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) A_ = self.prepare_image_inputs() A_ = image_processor(UpperCAmelCase , return_tensors="np" ) A_ = processor(images=UpperCAmelCase , return_tensors="np" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __A ( self : Any ): A_ = self.get_image_processor() A_ = self.get_tokenizer() A_ = CLIPProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) A_ = "lower newer" A_ = processor(text=UpperCAmelCase ) A_ = tokenizer(UpperCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __A ( self : Tuple ): A_ = self.get_image_processor() A_ = self.get_tokenizer() A_ = CLIPProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) A_ = "lower newer" A_ = self.prepare_image_inputs() A_ = processor(text=UpperCAmelCase , images=UpperCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(UpperCAmelCase ): processor() def __A ( self : int ): A_ = self.get_image_processor() A_ = self.get_tokenizer() A_ = CLIPProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) A_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] A_ = processor.batch_decode(UpperCAmelCase ) A_ = tokenizer.batch_decode(UpperCAmelCase ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) def __A ( self : List[str] ): A_ = self.get_image_processor() A_ = self.get_tokenizer() A_ = CLIPProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) A_ = "lower newer" A_ = self.prepare_image_inputs() A_ = processor(text=UpperCAmelCase , images=UpperCAmelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
312
__a :Dict = '0.18.2' from .configuration_utils import ConfigMixin from .utils import ( OptionalDependencyNotAvailable, is_flax_available, is_inflect_available, is_invisible_watermark_available, is_k_diffusion_available, is_k_diffusion_version, is_librosa_available, is_note_seq_available, is_onnx_available, is_scipy_available, is_torch_available, is_torchsde_available, is_transformers_available, is_transformers_version, is_unidecode_available, logging, ) try: if not is_onnx_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_onnx_objects import * # noqa F403 else: from .pipelines import OnnxRuntimeModel try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_pt_objects import * # noqa F403 else: from .models import ( AutoencoderKL, ControlNetModel, ModelMixin, PriorTransformer, TaFilmDecoder, TransformeraDModel, UNetaDModel, UNetaDConditionModel, UNetaDModel, UNetaDConditionModel, VQModel, ) from .optimization import ( get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, get_scheduler, ) from .pipelines import ( AudioPipelineOutput, ConsistencyModelPipeline, DanceDiffusionPipeline, DDIMPipeline, DDPMPipeline, DiffusionPipeline, DiTPipeline, ImagePipelineOutput, KarrasVePipeline, LDMPipeline, LDMSuperResolutionPipeline, PNDMPipeline, RePaintPipeline, ScoreSdeVePipeline, ) from .schedulers import ( CMStochasticIterativeScheduler, DDIMInverseScheduler, DDIMParallelScheduler, DDIMScheduler, DDPMParallelScheduler, DDPMScheduler, DEISMultistepScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, HeunDiscreteScheduler, IPNDMScheduler, KarrasVeScheduler, KDPMaAncestralDiscreteScheduler, KDPMaDiscreteScheduler, PNDMScheduler, RePaintScheduler, SchedulerMixin, ScoreSdeVeScheduler, UnCLIPScheduler, UniPCMultistepScheduler, VQDiffusionScheduler, ) from .training_utils import EMAModel try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .schedulers import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .schedulers import DPMSolverSDEScheduler try: if not (is_torch_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipelines import ( AltDiffusionImgaImgPipeline, AltDiffusionPipeline, AudioLDMPipeline, CycleDiffusionPipeline, IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ImageTextPipelineOutput, KandinskyImgaImgPipeline, KandinskyInpaintPipeline, KandinskyPipeline, KandinskyPriorPipeline, KandinskyVaaControlnetImgaImgPipeline, KandinskyVaaControlnetPipeline, KandinskyVaaImgaImgPipeline, KandinskyVaaInpaintPipeline, KandinskyVaaPipeline, KandinskyVaaPriorEmbaEmbPipeline, KandinskyVaaPriorPipeline, LDMTextToImagePipeline, PaintByExamplePipeline, SemanticStableDiffusionPipeline, ShapEImgaImgPipeline, ShapEPipeline, StableDiffusionAttendAndExcitePipeline, StableDiffusionControlNetImgaImgPipeline, StableDiffusionControlNetInpaintPipeline, StableDiffusionControlNetPipeline, StableDiffusionDepthaImgPipeline, StableDiffusionDiffEditPipeline, StableDiffusionImageVariationPipeline, StableDiffusionImgaImgPipeline, StableDiffusionInpaintPipeline, StableDiffusionInpaintPipelineLegacy, StableDiffusionInstructPixaPixPipeline, StableDiffusionLatentUpscalePipeline, StableDiffusionLDMaDPipeline, StableDiffusionModelEditingPipeline, StableDiffusionPanoramaPipeline, StableDiffusionParadigmsPipeline, StableDiffusionPipeline, StableDiffusionPipelineSafe, StableDiffusionPixaPixZeroPipeline, StableDiffusionSAGPipeline, StableDiffusionUpscalePipeline, StableUnCLIPImgaImgPipeline, StableUnCLIPPipeline, TextToVideoSDPipeline, TextToVideoZeroPipeline, UnCLIPImageVariationPipeline, UnCLIPPipeline, UniDiffuserModel, UniDiffuserPipeline, UniDiffuserTextDecoder, VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, VideoToVideoSDPipeline, VQDiffusionPipeline, ) try: if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403 else: from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline try: if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403 else: from .pipelines import StableDiffusionKDiffusionPipeline try: if not (is_torch_available() and is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403 else: from .pipelines import ( OnnxStableDiffusionImgaImgPipeline, OnnxStableDiffusionInpaintPipeline, OnnxStableDiffusionInpaintPipelineLegacy, OnnxStableDiffusionPipeline, OnnxStableDiffusionUpscalePipeline, StableDiffusionOnnxPipeline, ) try: if not (is_torch_available() and is_librosa_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_librosa_objects import * # noqa F403 else: from .pipelines import AudioDiffusionPipeline, Mel try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .pipelines import SpectrogramDiffusionPipeline try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_objects import * # noqa F403 else: from .models.controlnet_flax import FlaxControlNetModel from .models.modeling_flax_utils import FlaxModelMixin from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel from .models.vae_flax import FlaxAutoencoderKL from .pipelines import FlaxDiffusionPipeline from .schedulers import ( FlaxDDIMScheduler, FlaxDDPMScheduler, FlaxDPMSolverMultistepScheduler, FlaxKarrasVeScheduler, FlaxLMSDiscreteScheduler, FlaxPNDMScheduler, FlaxSchedulerMixin, FlaxScoreSdeVeScheduler, ) try: if not (is_flax_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_and_transformers_objects import * # noqa F403 else: from .pipelines import ( FlaxStableDiffusionControlNetPipeline, FlaxStableDiffusionImgaImgPipeline, FlaxStableDiffusionInpaintPipeline, FlaxStableDiffusionPipeline, ) try: if not (is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_note_seq_objects import * # noqa F403 else: from .pipelines import MidiProcessor
312
1
import os import re import sys import traceback import warnings from pathlib import Path from typing import Dict, Optional, Union from uuid import uuida from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami from huggingface_hub.file_download import REGEX_COMMIT_HASH from huggingface_hub.utils import ( EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, is_jinja_available, ) from packaging import version from requests import HTTPError from .. import __version__ from .constants import ( DEPRECATED_REVISION_ARGS, DIFFUSERS_CACHE, HUGGINGFACE_CO_RESOLVE_ENDPOINT, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, ) from .import_utils import ( ENV_VARS_TRUE_VALUES, _flax_version, _jax_version, _onnxruntime_version, _torch_version, is_flax_available, is_onnx_available, is_torch_available, ) from .logging import get_logger __a :Dict = get_logger(__name__) __a :Union[str, Any] = Path(__file__).parent / 'model_card_template.md' __a :Tuple = uuida().hex __a :List[Any] = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES __a :Union[str, Any] = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES __a :Tuple = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/' def __snake_case ( __UpperCamelCase : Union[Dict, str, None] = None ): """simple docstring""" A_ = f'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}''' if DISABLE_TELEMETRY or HF_HUB_OFFLINE: return ua + "; telemetry/off" if is_torch_available(): ua += f'''; torch/{_torch_version}''' if is_flax_available(): ua += f'''; jax/{_jax_version}''' ua += f'''; flax/{_flax_version}''' if is_onnx_available(): ua += f'''; onnxruntime/{_onnxruntime_version}''' # CI will set this value to True if os.environ.get("DIFFUSERS_IS_CI" ,"" ).upper() in ENV_VARS_TRUE_VALUES: ua += "; is_ci/true" if isinstance(__UpperCamelCase ,__UpperCamelCase ): ua += "; " + "; ".join(f'''{k}/{v}''' for k, v in user_agent.items() ) elif isinstance(__UpperCamelCase ,__UpperCamelCase ): ua += "; " + user_agent return ua def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Optional[str] = None ,__UpperCamelCase : Optional[str] = None ): """simple docstring""" if token is None: A_ = HfFolder.get_token() if organization is None: A_ = whoami(__UpperCamelCase )["name"] return f'''{username}/{model_id}''' else: return f'''{organization}/{model_id}''' def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Union[str, Any] ): """simple docstring""" if not is_jinja_available(): raise ValueError( "Modelcard rendering is based on Jinja templates." " Please make sure to have `jinja` installed before using `create_model_card`." " To install it, please run `pip install Jinja2`." ) if hasattr(__UpperCamelCase ,"local_rank" ) and args.local_rank not in [-1, 0]: return A_ = args.hub_token if hasattr(__UpperCamelCase ,"hub_token" ) else None A_ = get_full_repo_name(__UpperCamelCase ,token=__UpperCamelCase ) A_ = ModelCard.from_template( card_data=ModelCardData( # Card metadata object that will be converted to YAML block language="en" ,license="apache-2.0" ,library_name="diffusers" ,tags=[] ,datasets=args.dataset_name ,metrics=[] ,) ,template_path=__UpperCamelCase ,model_name=__UpperCamelCase ,repo_name=__UpperCamelCase ,dataset_name=args.dataset_name if hasattr(__UpperCamelCase ,"dataset_name" ) else None ,learning_rate=args.learning_rate ,train_batch_size=args.train_batch_size ,eval_batch_size=args.eval_batch_size ,gradient_accumulation_steps=( args.gradient_accumulation_steps if hasattr(__UpperCamelCase ,"gradient_accumulation_steps" ) else None ) ,adam_betaa=args.adam_betaa if hasattr(__UpperCamelCase ,"adam_beta1" ) else None ,adam_betaa=args.adam_betaa if hasattr(__UpperCamelCase ,"adam_beta2" ) else None ,adam_weight_decay=args.adam_weight_decay if hasattr(__UpperCamelCase ,"adam_weight_decay" ) else None ,adam_epsilon=args.adam_epsilon if hasattr(__UpperCamelCase ,"adam_epsilon" ) else None ,lr_scheduler=args.lr_scheduler if hasattr(__UpperCamelCase ,"lr_scheduler" ) else None ,lr_warmup_steps=args.lr_warmup_steps if hasattr(__UpperCamelCase ,"lr_warmup_steps" ) else None ,ema_inv_gamma=args.ema_inv_gamma if hasattr(__UpperCamelCase ,"ema_inv_gamma" ) else None ,ema_power=args.ema_power if hasattr(__UpperCamelCase ,"ema_power" ) else None ,ema_max_decay=args.ema_max_decay if hasattr(__UpperCamelCase ,"ema_max_decay" ) else None ,mixed_precision=args.mixed_precision ,) A_ = os.path.join(args.output_dir ,"README.md" ) model_card.save(__UpperCamelCase ) def __snake_case ( __UpperCamelCase : Optional[str] ,__UpperCamelCase : Optional[str] = None ): """simple docstring""" if resolved_file is None or commit_hash is not None: return commit_hash A_ = str(Path(__UpperCamelCase ).as_posix() ) A_ = re.search(R"snapshots/([^/]+)/" ,__UpperCamelCase ) if search is None: return None A_ = search.groups()[0] return commit_hash if REGEX_COMMIT_HASH.match(__UpperCamelCase ) else None # Old default cache path, potentially to be migrated. # This logic was more or less taken from `transformers`, with the following differences: # - Diffusers doesn't use custom environment variables to specify the cache path. # - There is no need to migrate the cache format, just move the files to the new location. __a :str = os.path.expanduser( os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface')) ) __a :List[Any] = os.path.join(hf_cache_home, 'diffusers') def __snake_case ( __UpperCamelCase : Optional[str] = None ,__UpperCamelCase : Optional[str] = None ): """simple docstring""" if new_cache_dir is None: A_ = DIFFUSERS_CACHE if old_cache_dir is None: A_ = old_diffusers_cache A_ = Path(__UpperCamelCase ).expanduser() A_ = Path(__UpperCamelCase ).expanduser() for old_blob_path in old_cache_dir.glob("**/blobs/*" ): if old_blob_path.is_file() and not old_blob_path.is_symlink(): A_ = new_cache_dir / old_blob_path.relative_to(__UpperCamelCase ) new_blob_path.parent.mkdir(parents=__UpperCamelCase ,exist_ok=__UpperCamelCase ) os.replace(__UpperCamelCase ,__UpperCamelCase ) try: os.symlink(__UpperCamelCase ,__UpperCamelCase ) except OSError: logger.warning( "Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." ) # At this point, old_cache_dir contains symlinks to the new cache (it can still be used). __a :Dict = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt') if not os.path.isfile(cache_version_file): __a :Optional[int] = 0 else: with open(cache_version_file) as f: try: __a :Dict = int(f.read()) except ValueError: __a :str = 0 if cache_version < 1: __a :Optional[Any] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0 if old_cache_is_not_empty: logger.warning( 'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your ' 'existing cached models. This is a one-time operation, you can interrupt it or run it ' 'later by calling `diffusers.utils.hub_utils.move_cache()`.' ) try: move_cache() except Exception as e: __a :Optional[Any] = '\n'.join(traceback.format_tb(e.__traceback__)) logger.error( F"There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease " 'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole ' 'message and we will do our best to help.' ) if cache_version < 1: try: os.makedirs(DIFFUSERS_CACHE, exist_ok=True) with open(cache_version_file, 'w') as f: f.write('1') except Exception: logger.warning( F"There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure " 'the directory exists and can be written to.' ) def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Optional[str] = None ): """simple docstring""" if variant is not None: A_ = weights_name.split("." ) A_ = splits[:-1] + [variant] + splits[-1:] A_ = ".".join(__UpperCamelCase ) return weights_name def __snake_case ( __UpperCamelCase : Optional[Any] ,*, __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Any ,__UpperCamelCase : Tuple ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : str ,__UpperCamelCase : int ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : int ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : Tuple ,__UpperCamelCase : Optional[int]=None ,): """simple docstring""" A_ = str(__UpperCamelCase ) if os.path.isfile(__UpperCamelCase ): return pretrained_model_name_or_path elif os.path.isdir(__UpperCamelCase ): if os.path.isfile(os.path.join(__UpperCamelCase ,__UpperCamelCase ) ): # Load from a PyTorch checkpoint A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase ) return model_file elif subfolder is not None and os.path.isfile( os.path.join(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) ): A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) return model_file else: raise EnvironmentError( f'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' ) else: # 1. First check if deprecated way of loading from branches is used if ( revision in DEPRECATED_REVISION_ARGS and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME) and version.parse(version.parse(__UpperCamelCase ).base_version ) >= version.parse("0.20.0" ) ): try: A_ = hf_hub_download( __UpperCamelCase ,filename=_add_variant(__UpperCamelCase ,__UpperCamelCase ) ,cache_dir=__UpperCamelCase ,force_download=__UpperCamelCase ,proxies=__UpperCamelCase ,resume_download=__UpperCamelCase ,local_files_only=__UpperCamelCase ,use_auth_token=__UpperCamelCase ,user_agent=__UpperCamelCase ,subfolder=__UpperCamelCase ,revision=revision or commit_hash ,) warnings.warn( f'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' ,__UpperCamelCase ,) return model_file except: # noqa: E722 warnings.warn( f'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__UpperCamelCase ,__UpperCamelCase )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(__UpperCamelCase ,__UpperCamelCase )}\' so that the correct variant file can be added.''' ,__UpperCamelCase ,) try: # 2. Load model file as usual A_ = hf_hub_download( __UpperCamelCase ,filename=__UpperCamelCase ,cache_dir=__UpperCamelCase ,force_download=__UpperCamelCase ,proxies=__UpperCamelCase ,resume_download=__UpperCamelCase ,local_files_only=__UpperCamelCase ,use_auth_token=__UpperCamelCase ,user_agent=__UpperCamelCase ,subfolder=__UpperCamelCase ,revision=revision or commit_hash ,) return model_file except RepositoryNotFoundError: raise EnvironmentError( f'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier ''' "listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a " "token having permission to this repo with `use_auth_token` or log in with `huggingface-cli " "login`." ) except RevisionNotFoundError: raise EnvironmentError( f'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for ''' "this model name. Check the model page at " f'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' ) except EntryNotFoundError: raise EnvironmentError( f'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' ) except HTTPError as err: raise EnvironmentError( f'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' ) except ValueError: raise EnvironmentError( f'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it''' f''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a''' f''' directory containing a file named {weights_name} or''' " \nCheckout your internet connection or see how to run the library in" " offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." ) except EnvironmentError: raise EnvironmentError( f'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from ''' "'https://huggingface.co/models', make sure you don't have a local directory with the same name. " f'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory ''' f'''containing a file named {weights_name}''' )
312
def __snake_case ( __UpperCamelCase : int = 1000 ): """simple docstring""" return sum(e for e in range(3 ,__UpperCamelCase ) if e % 3 == 0 or e % 5 == 0 ) if __name__ == "__main__": print(F"{solution() = }")
312
1
import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging __a :Dict = logging.get_logger(__name__) def __snake_case ( __UpperCamelCase : Dict ): """simple docstring""" A_ = R"\w+[.]\d+" A_ = re.findall(__UpperCamelCase ,__UpperCamelCase ) for pat in pats: A_ = key.replace(__UpperCamelCase ,"_".join(pat.split("." ) ) ) return key def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : int ): """simple docstring""" A_ = pt_tuple_key[:-1] + ("scale",) if ( any("norm" in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): A_ = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: A_ = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: A_ = pt_tuple_key[:-1] + ("embedding",) return renamed_pt_tuple_key, pt_tensor # conv layer A_ = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: A_ = pt_tensor.transpose(2 ,3 ,1 ,0 ) return renamed_pt_tuple_key, pt_tensor # linear layer A_ = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight": A_ = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight A_ = pt_tuple_key[:-1] + ("weight",) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias A_ = pt_tuple_key[:-1] + ("bias",) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : List[str] ,__UpperCamelCase : int=42 ): """simple docstring""" A_ = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params A_ = flax_model.init_weights(PRNGKey(__UpperCamelCase ) ) A_ = flatten_dict(__UpperCamelCase ) A_ = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): A_ = rename_key(__UpperCamelCase ) A_ = tuple(renamed_pt_key.split("." ) ) # Correctly rename weight parameters A_ , A_ = rename_key_and_reshape_tensor(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # also add unexpected weight so that warning is thrown A_ = jnp.asarray(__UpperCamelCase ) return unflatten_dict(__UpperCamelCase )
312
import unittest from typing import Tuple import torch from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device from diffusers.utils.testing_utils import require_torch @require_torch class _a : """simple docstring""" @property def __A ( self : Union[str, Any] ): return self.get_dummy_input() @property def __A ( self : int ): if self.block_type == "down": return (4, 32, 16, 16) elif self.block_type == "mid": return (4, 32, 32, 32) elif self.block_type == "up": return (4, 32, 64, 64) raise ValueError(f'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' ) def __A ( self : Union[str, Any] , UpperCAmelCase : List[Any]=True , UpperCAmelCase : str=False , UpperCAmelCase : Tuple=False , UpperCAmelCase : Optional[Any]=False , ): A_ = 4 A_ = 32 A_ = (32, 32) A_ = torch.manual_seed(0 ) A_ = torch.device(UpperCAmelCase ) A_ = (batch_size, num_channels) + sizes A_ = randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=UpperCAmelCase ) A_ = {"hidden_states": hidden_states} if include_temb: A_ = 128 A_ = randn_tensor((batch_size, temb_channels) , generator=UpperCAmelCase , device=UpperCAmelCase ) if include_res_hidden_states_tuple: A_ = torch.manual_seed(1 ) A_ = (randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=UpperCAmelCase ),) if include_encoder_hidden_states: A_ = floats_tensor((batch_size, 32, 32) ).to(UpperCAmelCase ) if include_skip_sample: A_ = randn_tensor(((batch_size, 3) + sizes) , generator=UpperCAmelCase , device=UpperCAmelCase ) return dummy_input def __A ( self : Optional[int] ): A_ = { "in_channels": 32, "out_channels": 32, "temb_channels": 128, } if self.block_type == "up": A_ = 32 if self.block_type == "mid": init_dict.pop("out_channels" ) A_ = self.dummy_input return init_dict, inputs_dict def __A ( self : List[str] , UpperCAmelCase : Optional[Any] ): A_ , A_ = self.prepare_init_args_and_inputs_for_common() A_ = self.block_class(**UpperCAmelCase ) unet_block.to(UpperCAmelCase ) unet_block.eval() with torch.no_grad(): A_ = unet_block(**UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ): A_ = output[0] self.assertEqual(output.shape , self.output_shape ) A_ = output[0, -1, -3:, -3:] A_ = torch.tensor(UpperCAmelCase ).to(UpperCAmelCase ) assert torch_all_close(output_slice.flatten() , UpperCAmelCase , atol=5E-3 ) @unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" ) def __A ( self : Union[str, Any] ): A_ , A_ = self.prepare_init_args_and_inputs_for_common() A_ = self.block_class(**UpperCAmelCase ) model.to(UpperCAmelCase ) model.train() A_ = model(**UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ): A_ = output[0] A_ = torch.device(UpperCAmelCase ) A_ = randn_tensor(output.shape , device=UpperCAmelCase ) A_ = torch.nn.functional.mse_loss(UpperCAmelCase , UpperCAmelCase ) loss.backward()
312
1
import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class _a ( unittest.TestCase ): """simple docstring""" def __A ( self : str ): A_ = tempfile.mkdtemp() # fmt: off A_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest"] # fmt: on A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) A_ = { "do_resize": True, "size": {"height": 18, "width": 18}, "do_normalize": True, "image_mean": [0.5, 0.5, 0.5], "image_std": [0.5, 0.5, 0.5], } A_ = os.path.join(self.tmpdirname , UpperCAmelCase ) with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp: json.dump(UpperCAmelCase , UpperCAmelCase ) def __A ( self : Dict , **UpperCAmelCase : Tuple ): return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase ) def __A ( self : Optional[Any] , **UpperCAmelCase : Dict ): return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase ) def __A ( self : List[Any] ): shutil.rmtree(self.tmpdirname ) def __A ( self : Optional[int] ): A_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] A_ = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def __A ( self : Dict ): A_ = self.get_tokenizer() A_ = self.get_image_processor() A_ = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) A_ = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCAmelCase ) def __A ( self : str ): A_ = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) A_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) A_ = self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 ) A_ = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=UpperCAmelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCAmelCase ) def __A ( self : List[Any] ): A_ = self.get_image_processor() A_ = self.get_tokenizer() A_ = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) A_ = self.prepare_image_inputs() A_ = image_processor(UpperCAmelCase , return_tensors="np" ) A_ = processor(images=UpperCAmelCase , return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __A ( self : Optional[Any] ): A_ = self.get_image_processor() A_ = self.get_tokenizer() A_ = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) A_ = "lower newer" A_ = processor(text=UpperCAmelCase ) A_ = tokenizer(UpperCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __A ( self : List[Any] ): A_ = self.get_image_processor() A_ = self.get_tokenizer() A_ = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) A_ = "lower newer" A_ = self.prepare_image_inputs() A_ = processor(text=UpperCAmelCase , images=UpperCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with self.assertRaises(UpperCAmelCase ): processor() def __A ( self : Optional[int] ): A_ = self.get_image_processor() A_ = self.get_tokenizer() A_ = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) A_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] A_ = processor.batch_decode(UpperCAmelCase ) A_ = tokenizer.batch_decode(UpperCAmelCase ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) def __A ( self : List[str] ): A_ = self.get_image_processor() A_ = self.get_tokenizer() A_ = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) A_ = "lower newer" A_ = self.prepare_image_inputs() A_ = processor(text=UpperCAmelCase , images=UpperCAmelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
312
import copy import fnmatch import json import os import pickle as pkl import shutil import sys import tarfile import tempfile from collections import OrderedDict from contextlib import contextmanager from functools import partial from hashlib import shaaaa from io import BytesIO from pathlib import Path from urllib.parse import urlparse from zipfile import ZipFile, is_zipfile import cva import numpy as np import requests import wget from filelock import FileLock from PIL import Image from tqdm.auto import tqdm from yaml import Loader, dump, load try: import torch __a :int = True except ImportError: __a :Optional[Any] = False try: from torch.hub import _get_torch_home __a :Optional[Any] = _get_torch_home() except ImportError: __a :Tuple = os.path.expanduser( os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch')) ) __a :Optional[Any] = os.path.join(torch_cache_home, 'transformers') __a :int = 'https://cdn.huggingface.co' __a :Any = 'https://s3.amazonaws.com/models.huggingface.co/bert' __a :Optional[Any] = '/'.join(str(Path(__file__).resolve()).split('/')[:-1]) __a :str = os.path.join(PATH, 'config.yaml') __a :str = os.path.join(PATH, 'attributes.txt') __a :Optional[Any] = os.path.join(PATH, 'objects.txt') __a :Optional[int] = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path) __a :Dict = os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE) __a :List[Any] = os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE) __a :List[str] = 'pytorch_model.bin' __a :Tuple = 'config.yaml' def __snake_case ( __UpperCamelCase : Optional[Any]=OBJECTS ,__UpperCamelCase : List[str]=ATTRIBUTES ): """simple docstring""" A_ = [] with open(__UpperCamelCase ) as f: for object in f.readlines(): vg_classes.append(object.split("," )[0].lower().strip() ) A_ = [] with open(__UpperCamelCase ) as f: for object in f.readlines(): vg_attrs.append(object.split("," )[0].lower().strip() ) return vg_classes, vg_attrs def __snake_case ( __UpperCamelCase : List[Any] ): """simple docstring""" A_ = OrderedDict() with open(__UpperCamelCase ,"rb" ) as f: A_ = pkl.load(__UpperCamelCase )["model"] for k in copy.deepcopy(list(ckp.keys() ) ): A_ = ckp.pop(__UpperCamelCase ) if isinstance(__UpperCamelCase ,np.ndarray ): A_ = torch.tensor(__UpperCamelCase ) else: assert isinstance(__UpperCamelCase ,torch.tensor ), type(__UpperCamelCase ) A_ = v return r class _a : """simple docstring""" _lowerCamelCase : Union[str, Any] = {} def __init__( self : str , UpperCAmelCase : dict , UpperCAmelCase : str = "root" , UpperCAmelCase : List[str]=0 ): A_ = name A_ = level A_ = {} for k, v in dictionary.items(): if v is None: raise ValueError() A_ = copy.deepcopy(UpperCAmelCase ) A_ = copy.deepcopy(UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ): A_ = Config(UpperCAmelCase , name=UpperCAmelCase , level=level + 1 ) A_ = v setattr(self , UpperCAmelCase , UpperCAmelCase ) A_ = d def __repr__( self : Optional[Any] ): return str(list((self._pointer.keys()) ) ) def __setattr__( self : Any , UpperCAmelCase : Any , UpperCAmelCase : Any ): A_ = val A_ = val A_ = key.split("." ) A_ = len(UpperCAmelCase ) - 1 A_ = self._pointer if len(UpperCAmelCase ) > 1: for i, l in enumerate(UpperCAmelCase ): if hasattr(self , UpperCAmelCase ) and isinstance(getattr(self , UpperCAmelCase ) , UpperCAmelCase ): setattr(getattr(self , UpperCAmelCase ) , ".".join(levels[i:] ) , UpperCAmelCase ) if l == last_level: A_ = val else: A_ = pointer[l] def __A ( self : List[str] ): return self._pointer def __A ( self : int , UpperCAmelCase : Tuple , UpperCAmelCase : int ): with open(f'''{file_name}''' , "w" ) as stream: dump(UpperCAmelCase , UpperCAmelCase ) def __A ( self : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Tuple ): with open(f'''{file_name}''' , "w" ) as stream: json.dump(UpperCAmelCase , UpperCAmelCase ) @staticmethod def __A ( UpperCAmelCase : Optional[int] ): with open(UpperCAmelCase ) as stream: A_ = load(UpperCAmelCase , Loader=UpperCAmelCase ) return data def __str__( self : str ): A_ = " " if self._name != "root": A_ = f'''{t * (self._level-1)}{self._name}:\n''' else: A_ = "" A_ = self._level for i, (k, v) in enumerate(self._pointer.items() ): if isinstance(UpperCAmelCase , UpperCAmelCase ): r += f'''{t * (self._level)}{v}\n''' self._level += 1 else: r += f'''{t * (self._level)}{k}: {v} ({type(UpperCAmelCase ).__name__})\n''' A_ = level return r[:-1] @classmethod def __A ( cls : Optional[Any] , UpperCAmelCase : str , **UpperCAmelCase : str ): A_ , A_ = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase ) return cls(UpperCAmelCase ) @classmethod def __A ( cls : int , UpperCAmelCase : str , **UpperCAmelCase : int ): A_ = kwargs.pop("cache_dir" , UpperCAmelCase ) A_ = kwargs.pop("force_download" , UpperCAmelCase ) A_ = kwargs.pop("resume_download" , UpperCAmelCase ) A_ = kwargs.pop("proxies" , UpperCAmelCase ) A_ = kwargs.pop("local_files_only" , UpperCAmelCase ) if os.path.isdir(UpperCAmelCase ): A_ = os.path.join(UpperCAmelCase , UpperCAmelCase ) elif os.path.isfile(UpperCAmelCase ) or is_remote_url(UpperCAmelCase ): A_ = pretrained_model_name_or_path else: A_ = hf_bucket_url(UpperCAmelCase , filename=UpperCAmelCase , use_cdn=UpperCAmelCase ) try: # Load from URL or cache if already cached A_ = cached_path( UpperCAmelCase , cache_dir=UpperCAmelCase , force_download=UpperCAmelCase , proxies=UpperCAmelCase , resume_download=UpperCAmelCase , local_files_only=UpperCAmelCase , ) # Load config dict if resolved_config_file is None: raise EnvironmentError A_ = Config.load_yaml(UpperCAmelCase ) except EnvironmentError: A_ = "Can't load config for" raise EnvironmentError(UpperCAmelCase ) if resolved_config_file == config_file: print("loading configuration file from path" ) else: print("loading configuration file cache" ) return Config.load_yaml(UpperCAmelCase ), kwargs def __snake_case ( __UpperCamelCase : Union[str, Any] ): """simple docstring""" A_ = torch.load("dump.pt" ,map_location=in_tensor.device ) A_ = in_tensor.numpy() A_ = out_tensor.numpy()[0] print(na.shape ,na[0, 0, :5] ) print(na.shape ,na[0, 0, :5] ) assert np.allclose(__UpperCamelCase ,__UpperCamelCase ,rtol=0.01 ,atol=0.1 ), ( f'''{sum([1 for x in np.isclose(__UpperCamelCase ,__UpperCamelCase ,rtol=0.01 ,atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %''' " element-wise mismatch" ) raise Exception("tensors are all good" ) # Hugging face functions below def __snake_case ( __UpperCamelCase : Optional[int] ): """simple docstring""" A_ = urlparse(__UpperCamelCase ) return parsed.scheme in ("http", "https") def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ,__UpperCamelCase : str=True ): """simple docstring""" A_ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX A_ = "/" not in model_id if legacy_format: return f'''{endpoint}/{model_id}-{filename}''' else: return f'''{endpoint}/{model_id}/{filename}''' def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str]=None ,__UpperCamelCase : int=0 ,__UpperCamelCase : int=None ,): """simple docstring""" A_ = "python/{}".format(sys.version.split()[0] ) if _torch_available: ua += "; torch/{}".format(torch.__version__ ) if isinstance(__UpperCamelCase ,__UpperCamelCase ): ua += "; " + "; ".join("{}/{}".format(__UpperCamelCase ,__UpperCamelCase ) for k, v in user_agent.items() ) elif isinstance(__UpperCamelCase ,__UpperCamelCase ): ua += "; " + user_agent A_ = {"user-agent": ua} if resume_size > 0: A_ = "bytes=%d-" % (resume_size,) A_ = requests.get(__UpperCamelCase ,stream=__UpperCamelCase ,proxies=__UpperCamelCase ,headers=__UpperCamelCase ) if response.status_code == 416: # Range not satisfiable return A_ = response.headers.get("Content-Length" ) A_ = resume_size + int(__UpperCamelCase ) if content_length is not None else None A_ = tqdm( unit="B" ,unit_scale=__UpperCamelCase ,total=__UpperCamelCase ,initial=__UpperCamelCase ,desc="Downloading" ,) for chunk in response.iter_content(chunk_size=1024 ): if chunk: # filter out keep-alive new chunks progress.update(len(__UpperCamelCase ) ) temp_file.write(__UpperCamelCase ) progress.close() def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Any=None ,__UpperCamelCase : Dict=False ,__UpperCamelCase : Union[str, Any]=None ,__UpperCamelCase : Any=10 ,__UpperCamelCase : int=False ,__UpperCamelCase : Optional[Any]=None ,__UpperCamelCase : str=False ,): """simple docstring""" if cache_dir is None: A_ = TRANSFORMERS_CACHE if isinstance(__UpperCamelCase ,__UpperCamelCase ): A_ = str(__UpperCamelCase ) os.makedirs(__UpperCamelCase ,exist_ok=__UpperCamelCase ) A_ = None if not local_files_only: try: A_ = requests.head(__UpperCamelCase ,allow_redirects=__UpperCamelCase ,proxies=__UpperCamelCase ,timeout=__UpperCamelCase ) if response.status_code == 200: A_ = response.headers.get("ETag" ) except (EnvironmentError, requests.exceptions.Timeout): # etag is already None pass A_ = url_to_filename(__UpperCamelCase ,__UpperCamelCase ) # get cache path to put the file A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase ) # etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible. # try to get the last downloaded one if etag is None: if os.path.exists(__UpperCamelCase ): return cache_path else: A_ = [ file for file in fnmatch.filter(os.listdir(__UpperCamelCase ) ,filename + ".*" ) if not file.endswith(".json" ) and not file.endswith(".lock" ) ] if len(__UpperCamelCase ) > 0: return os.path.join(__UpperCamelCase ,matching_files[-1] ) else: # If files cannot be found and local_files_only=True, # the models might've been found if local_files_only=False # Notify the user about that if local_files_only: raise ValueError( "Cannot find the requested files in the cached path and outgoing traffic has been" " disabled. To enable model look-ups and downloads online, set 'local_files_only'" " to False." ) return None # From now on, etag is not None. if os.path.exists(__UpperCamelCase ) and not force_download: return cache_path # Prevent parallel downloads of the same file with a lock. A_ = cache_path + ".lock" with FileLock(__UpperCamelCase ): # If the download just completed while the lock was activated. if os.path.exists(__UpperCamelCase ) and not force_download: # Even if returning early like here, the lock will be released. return cache_path if resume_download: A_ = cache_path + ".incomplete" @contextmanager def _resumable_file_manager(): with open(__UpperCamelCase ,"a+b" ) as f: yield f A_ = _resumable_file_manager if os.path.exists(__UpperCamelCase ): A_ = os.stat(__UpperCamelCase ).st_size else: A_ = 0 else: A_ = partial(tempfile.NamedTemporaryFile ,dir=__UpperCamelCase ,delete=__UpperCamelCase ) A_ = 0 # Download to temporary file, then copy to cache dir once finished. # Otherwise you get corrupt cache entries if the download gets interrupted. with temp_file_manager() as temp_file: print( "%s not found in cache or force_download set to True, downloading to %s" ,__UpperCamelCase ,temp_file.name ,) http_get( __UpperCamelCase ,__UpperCamelCase ,proxies=__UpperCamelCase ,resume_size=__UpperCamelCase ,user_agent=__UpperCamelCase ,) os.replace(temp_file.name ,__UpperCamelCase ) A_ = {"url": url, "etag": etag} A_ = cache_path + ".json" with open(__UpperCamelCase ,"w" ) as meta_file: json.dump(__UpperCamelCase ,__UpperCamelCase ) return cache_path def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : str=None ): """simple docstring""" A_ = url.encode("utf-8" ) A_ = shaaaa(__UpperCamelCase ) A_ = url_hash.hexdigest() if etag: A_ = etag.encode("utf-8" ) A_ = shaaaa(__UpperCamelCase ) filename += "." + etag_hash.hexdigest() if url.endswith(".h5" ): filename += ".h5" return filename def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Union[str, Any]=None ,__UpperCamelCase : List[Any]=False ,__UpperCamelCase : List[str]=None ,__UpperCamelCase : Any=False ,__UpperCamelCase : Optional[int]=None ,__UpperCamelCase : Optional[Any]=False ,__UpperCamelCase : Dict=False ,__UpperCamelCase : Optional[Any]=False ,): """simple docstring""" if cache_dir is None: A_ = TRANSFORMERS_CACHE if isinstance(__UpperCamelCase ,__UpperCamelCase ): A_ = str(__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ): A_ = str(__UpperCamelCase ) if is_remote_url(__UpperCamelCase ): # URL, so get it from the cache (downloading if necessary) A_ = get_from_cache( __UpperCamelCase ,cache_dir=__UpperCamelCase ,force_download=__UpperCamelCase ,proxies=__UpperCamelCase ,resume_download=__UpperCamelCase ,user_agent=__UpperCamelCase ,local_files_only=__UpperCamelCase ,) elif os.path.exists(__UpperCamelCase ): # File, and it exists. A_ = url_or_filename elif urlparse(__UpperCamelCase ).scheme == "": # File, but it doesn't exist. raise EnvironmentError("file {} not found".format(__UpperCamelCase ) ) else: # Something unknown raise ValueError("unable to parse {} as a URL or as a local path".format(__UpperCamelCase ) ) if extract_compressed_file: if not is_zipfile(__UpperCamelCase ) and not tarfile.is_tarfile(__UpperCamelCase ): return output_path # Path where we extract compressed archives # We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/" A_ , A_ = os.path.split(__UpperCamelCase ) A_ = output_file.replace("." ,"-" ) + "-extracted" A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase ) if os.path.isdir(__UpperCamelCase ) and os.listdir(__UpperCamelCase ) and not force_extract: return output_path_extracted # Prevent parallel extractions A_ = output_path + ".lock" with FileLock(__UpperCamelCase ): shutil.rmtree(__UpperCamelCase ,ignore_errors=__UpperCamelCase ) os.makedirs(__UpperCamelCase ) if is_zipfile(__UpperCamelCase ): with ZipFile(__UpperCamelCase ,"r" ) as zip_file: zip_file.extractall(__UpperCamelCase ) zip_file.close() elif tarfile.is_tarfile(__UpperCamelCase ): A_ = tarfile.open(__UpperCamelCase ) tar_file.extractall(__UpperCamelCase ) tar_file.close() else: raise EnvironmentError("Archive format of {} could not be identified".format(__UpperCamelCase ) ) return output_path_extracted return output_path def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Any="," ): """simple docstring""" assert isinstance(__UpperCamelCase ,__UpperCamelCase ) if os.path.isfile(__UpperCamelCase ): with open(__UpperCamelCase ) as f: A_ = eval(f.read() ) else: A_ = requests.get(__UpperCamelCase ) try: A_ = requests.json() except Exception: A_ = req.content.decode() assert data is not None, "could not connect" try: A_ = eval(__UpperCamelCase ) except Exception: A_ = data.split("\n" ) req.close() return data def __snake_case ( __UpperCamelCase : int ): """simple docstring""" A_ = requests.get(__UpperCamelCase ) A_ = np.array(Image.open(BytesIO(response.content ) ) ) return img def __snake_case ( __UpperCamelCase : Tuple ): """simple docstring""" A_ = url.split("/" )[-1] if fn not in os.listdir(os.getcwd() ): wget.download(__UpperCamelCase ) with open(__UpperCamelCase ,"rb" ) as stream: A_ = pkl.load(__UpperCamelCase ) A_ = weights.pop("model" ) A_ = {} for k, v in model.items(): A_ = torch.from_numpy(__UpperCamelCase ) if "running_var" in k: A_ = torch.tensor([0] ) A_ = k.replace("running_var" ,"num_batches_tracked" ) A_ = zero return new def __snake_case ( ): """simple docstring""" print(f'''{os.path.abspath(os.path.join(__UpperCamelCase ,os.pardir ) )}/demo.ipynb''' ) def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Optional[int]="RGB" ): """simple docstring""" assert isinstance(__UpperCamelCase ,__UpperCamelCase ) if os.path.isfile(__UpperCamelCase ): A_ = cva.imread(__UpperCamelCase ) else: A_ = get_image_from_url(__UpperCamelCase ) assert img is not None, f'''could not connect to: {im}''' A_ = cva.cvtColor(__UpperCamelCase ,cva.COLOR_BGR2RGB ) if input_format == "RGB": A_ = img[:, :, ::-1] return img def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : List[str]=1 ): """simple docstring""" return (images[i : i + batch] for i in range(0 ,len(__UpperCamelCase ) ,__UpperCamelCase ))
312
1
from __future__ import annotations import unittest from transformers import LEDConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFLEDForConditionalGeneration, TFLEDModel @require_tf class _a : """simple docstring""" _lowerCamelCase : Tuple = LEDConfig _lowerCamelCase : Optional[int] = {} _lowerCamelCase : int = 'gelu' def __init__( self : Dict , UpperCAmelCase : str , UpperCAmelCase : Any=13 , UpperCAmelCase : List[str]=7 , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : Any=False , UpperCAmelCase : Tuple=99 , UpperCAmelCase : Tuple=32 , UpperCAmelCase : Dict=2 , UpperCAmelCase : Any=4 , UpperCAmelCase : List[str]=37 , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Dict=0.1 , UpperCAmelCase : List[str]=20 , UpperCAmelCase : Optional[int]=2 , UpperCAmelCase : Optional[Any]=1 , UpperCAmelCase : Optional[Any]=0 , UpperCAmelCase : Tuple=4 , ): A_ = parent A_ = batch_size A_ = seq_length A_ = is_training A_ = use_labels A_ = vocab_size A_ = hidden_size A_ = num_hidden_layers A_ = num_attention_heads A_ = intermediate_size A_ = hidden_dropout_prob A_ = attention_probs_dropout_prob A_ = max_position_embeddings A_ = eos_token_id A_ = pad_token_id A_ = bos_token_id A_ = attention_window # `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size # [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention # returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1] # because its local attention only attends to `self.attention_window` and one before and one after A_ = self.attention_window + 2 # because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for # the `test_attention_outputs` and `test_hidden_states_output` tests A_ = ( self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window ) def __A ( self : Tuple ): A_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) A_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) A_ = tf.concat([input_ids, eos_tensor] , axis=1 ) A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A_ = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , ) A_ = prepare_led_inputs_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) A_ = tf.concat( [tf.zeros_like(UpperCAmelCase )[:, :-1], tf.ones_like(UpperCAmelCase )[:, -1:]] , axis=-1 , ) A_ = global_attention_mask return config, inputs_dict def __A ( self : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int ): A_ = TFLEDModel(config=UpperCAmelCase ).get_decoder() A_ = inputs_dict["input_ids"] A_ = input_ids[:1, :] A_ = inputs_dict["attention_mask"][:1, :] A_ = 1 # first forward pass A_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase , use_cache=UpperCAmelCase ) A_ , A_ = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids A_ = ids_tensor((self.batch_size, 3) , config.vocab_size ) A_ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and A_ = tf.concat([input_ids, next_tokens] , axis=-1 ) A_ = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) A_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase )[0] A_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice A_ = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) A_ = output_from_no_past[:, -3:, random_slice_idx] A_ = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , rtol=1E-3 ) def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Dict ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[Any]=None ,__UpperCamelCase : List[Any]=None ,__UpperCamelCase : Optional[Any]=None ,__UpperCamelCase : int=None ,): """simple docstring""" if attention_mask is None: A_ = tf.cast(tf.math.not_equal(__UpperCamelCase ,config.pad_token_id ) ,tf.inta ) if decoder_attention_mask is None: A_ = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ), ] ,axis=-1 ,) if head_mask is None: A_ = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: A_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, } @require_tf class _a ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" _lowerCamelCase : Tuple = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else () _lowerCamelCase : Union[str, Any] = (TFLEDForConditionalGeneration,) if is_tf_available() else () _lowerCamelCase : int = ( { 'conversational': TFLEDForConditionalGeneration, 'feature-extraction': TFLEDModel, 'summarization': TFLEDForConditionalGeneration, 'text2text-generation': TFLEDForConditionalGeneration, 'translation': TFLEDForConditionalGeneration, } if is_tf_available() else {} ) _lowerCamelCase : List[Any] = True _lowerCamelCase : str = False _lowerCamelCase : Optional[Any] = False _lowerCamelCase : Union[str, Any] = False def __A ( self : List[Any] ): A_ = TFLEDModelTester(self ) A_ = ConfigTester(self , config_class=UpperCAmelCase ) def __A ( self : int ): self.config_tester.run_common_tests() def __A ( self : Tuple ): A_ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*UpperCAmelCase ) def __A ( self : Dict ): A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common() A_ = tf.zeros_like(inputs_dict["attention_mask"] ) A_ = 2 A_ = tf.where( tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , ) A_ = True A_ = self.model_tester.seq_length A_ = self.model_tester.encoder_seq_length def check_decoder_attentions_output(UpperCAmelCase : Optional[int] ): A_ = outputs.decoder_attentions self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) def check_encoder_attentions_output(UpperCAmelCase : str ): A_ = [t.numpy() for t in outputs.encoder_attentions] A_ = [t.numpy() for t in outputs.encoder_global_attentions] self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) self.assertListEqual( list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , ) for model_class in self.all_model_classes: A_ = True A_ = False A_ = False A_ = model_class(UpperCAmelCase ) A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) ) A_ = len(UpperCAmelCase ) self.assertEqual(config.output_hidden_states , UpperCAmelCase ) check_encoder_attentions_output(UpperCAmelCase ) if self.is_encoder_decoder: A_ = model_class(UpperCAmelCase ) A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) ) self.assertEqual(config.output_hidden_states , UpperCAmelCase ) check_decoder_attentions_output(UpperCAmelCase ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] A_ = True A_ = model_class(UpperCAmelCase ) A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) ) self.assertEqual(config.output_hidden_states , UpperCAmelCase ) check_encoder_attentions_output(UpperCAmelCase ) # Check attention is always last and order is fine A_ = True A_ = True A_ = model_class(UpperCAmelCase ) A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCAmelCase ) ) self.assertEqual(model.config.output_hidden_states , UpperCAmelCase ) check_encoder_attentions_output(UpperCAmelCase ) @unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." ) def __A ( self : List[Any] ): pass def __A ( self : Optional[int] ): # TODO: Head-masking not yet implement pass def __snake_case ( __UpperCamelCase : List[Any] ): """simple docstring""" return tf.constant(__UpperCamelCase ,dtype=tf.intaa ) __a :Any = 1e-4 @slow @require_tf class _a ( unittest.TestCase ): """simple docstring""" def __A ( self : Optional[Any] ): A_ = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led # change to intended input here A_ = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] ) A_ = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] ) A_ = prepare_led_inputs_dict(model.config , UpperCAmelCase , UpperCAmelCase ) A_ = model(**UpperCAmelCase )[0] A_ = (1, 1024, 768) self.assertEqual(output.shape , UpperCAmelCase ) # change to expected output here A_ = tf.convert_to_tensor( [[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , ) tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1E-3 ) def __A ( self : Optional[int] ): A_ = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ) # change to intended input here A_ = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] ) A_ = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] ) A_ = prepare_led_inputs_dict(model.config , UpperCAmelCase , UpperCAmelCase ) A_ = model(**UpperCAmelCase )[0] A_ = (1, 1024, model.config.vocab_size) self.assertEqual(output.shape , UpperCAmelCase ) # change to expected output here A_ = tf.convert_to_tensor( [[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , ) tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1E-3 , rtol=1E-3 )
312
from __future__ import annotations def __snake_case ( __UpperCamelCase : list[list[int]] ): """simple docstring""" for i in range(1 ,len(matrix[0] ) ): matrix[0][i] += matrix[0][i - 1] # preprocessing the first column for i in range(1 ,len(__UpperCamelCase ) ): matrix[i][0] += matrix[i - 1][0] # updating the path cost for current position for i in range(1 ,len(__UpperCamelCase ) ): for j in range(1 ,len(matrix[0] ) ): matrix[i][j] += min(matrix[i - 1][j] ,matrix[i][j - 1] ) return matrix[-1][-1] if __name__ == "__main__": import doctest doctest.testmod()
312
1
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_electra import ElectraTokenizer __a :List[str] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} __a :Union[str, Any] = { 'vocab_file': { 'google/electra-small-generator': ( 'https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt' ), 'google/electra-base-generator': 'https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt', 'google/electra-large-generator': ( 'https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt' ), 'google/electra-small-discriminator': ( 'https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt' ), 'google/electra-base-discriminator': ( 'https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt' ), 'google/electra-large-discriminator': ( 'https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'google/electra-small-generator': ( 'https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json' ), 'google/electra-base-generator': ( 'https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json' ), 'google/electra-large-generator': ( 'https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json' ), 'google/electra-small-discriminator': ( 'https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json' ), 'google/electra-base-discriminator': ( 'https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json' ), 'google/electra-large-discriminator': ( 'https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json' ), }, } __a :Optional[int] = { 'google/electra-small-generator': 512, 'google/electra-base-generator': 512, 'google/electra-large-generator': 512, 'google/electra-small-discriminator': 512, 'google/electra-base-discriminator': 512, 'google/electra-large-discriminator': 512, } __a :str = { 'google/electra-small-generator': {'do_lower_case': True}, 'google/electra-base-generator': {'do_lower_case': True}, 'google/electra-large-generator': {'do_lower_case': True}, 'google/electra-small-discriminator': {'do_lower_case': True}, 'google/electra-base-discriminator': {'do_lower_case': True}, 'google/electra-large-discriminator': {'do_lower_case': True}, } class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : Tuple = VOCAB_FILES_NAMES _lowerCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase : int = PRETRAINED_INIT_CONFIGURATION _lowerCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase : int = ElectraTokenizer def __init__( self : Tuple , UpperCAmelCase : Dict=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Any=True , UpperCAmelCase : Any="[UNK]" , UpperCAmelCase : Union[str, Any]="[SEP]" , UpperCAmelCase : List[Any]="[PAD]" , UpperCAmelCase : Union[str, Any]="[CLS]" , UpperCAmelCase : List[Any]="[MASK]" , UpperCAmelCase : List[str]=True , UpperCAmelCase : Any=None , **UpperCAmelCase : Union[str, Any] , ): super().__init__( UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , ) A_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , UpperCAmelCase ) != do_lower_case or normalizer_state.get("strip_accents" , UpperCAmelCase ) != strip_accents or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase ) != tokenize_chinese_chars ): A_ = getattr(UpperCAmelCase , normalizer_state.pop("type" ) ) A_ = do_lower_case A_ = strip_accents A_ = tokenize_chinese_chars A_ = normalizer_class(**UpperCAmelCase ) A_ = do_lower_case def __A ( self : int , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any]=None ): A_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __A ( self : Union[str, Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ): A_ = [self.sep_token_id] A_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __A ( self : Tuple , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ): A_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase ) return tuple(UpperCAmelCase )
312
from typing import Dict from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, get_torch_dist_unique_port, require_torch_multi_gpu, require_torch_neuroncore, ) from transformers.training_args import ParallelMode from transformers.utils import logging __a :int = logging.get_logger(__name__) if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset from transformers import Trainer class _a ( snake_case_ ): """simple docstring""" def __init__( self : Tuple , UpperCAmelCase : int = 101 ): A_ = length def __len__( self : int ): return self.length def __getitem__( self : Optional[int] , UpperCAmelCase : Optional[int] ): return i class _a : """simple docstring""" def __call__( self : Any , UpperCAmelCase : Optional[Any] ): return {"input_ids": torch.tensor(UpperCAmelCase ), "labels": torch.tensor(UpperCAmelCase )} class _a ( nn.Module ): """simple docstring""" def __init__( self : int ): super().__init__() # Add some (unused) params otherwise DDP will complain. A_ = nn.Linear(120 , 80 ) def __A ( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : Tuple=None ): if labels is not None: return torch.tensor(0.0 , device=input_ids.device ), input_ids else: return input_ids class _a ( snake_case_ ): """simple docstring""" @require_torch_neuroncore def __A ( self : List[str] ): A_ = f'''--nproc_per_node=2 --master_port={get_torch_dist_unique_port()} {self.test_file_dir}/test_trainer_distributed.py '''.split() A_ = self.get_auto_remove_tmp_dir() A_ = f'''--output_dir {output_dir}'''.split() A_ = ["torchrun"] + distributed_args + args execute_subprocess_async(UpperCAmelCase , env=self.get_env() ) # successful return here == success - any errors would have caused an error in the sub-call class _a ( snake_case_ ): """simple docstring""" @require_torch_multi_gpu def __A ( self : List[str] ): A_ = f'''--nproc_per_node={torch.cuda.device_count()} --master_port={get_torch_dist_unique_port()} {self.test_file_dir}/test_trainer_distributed.py '''.split() A_ = self.get_auto_remove_tmp_dir() A_ = f'''--output_dir {output_dir}'''.split() A_ = ["torchrun"] + distributed_args + args execute_subprocess_async(UpperCAmelCase , env=self.get_env() ) # successful return here == success - any errors would have caused an error in the sub-call if __name__ == "__main__": # The script below is meant to be run under torch.distributed, on a machine with multiple GPUs: # # PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py __a :Union[str, Any] = HfArgumentParser((TrainingArguments,)) __a :Tuple = parser.parse_args_into_dataclasses()[0] logger.warning( F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " F"distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}" ) # Essentially, what we want to verify in the distributed case is that we get all samples back, # in the right order. (this is crucial for prediction for instance) for dataset_length in [101, 40, 7]: __a :int = DummyDataset(dataset_length) def __snake_case ( __UpperCamelCase : EvalPrediction ): """simple docstring""" A_ = list(range(len(__UpperCamelCase ) ) ) A_ = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential if not success and training_args.local_rank == 0: logger.warning( "Predictions and/or labels do not match expected results:\n - predictions: " f'''{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}''' ) return {"success": success} __a :str = Trainer( model=DummyModel(), args=training_args, data_collator=DummyDataCollator(), eval_dataset=dataset, compute_metrics=compute_metrics, ) __a :str = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) __a :str = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) __a :Optional[int] = 2 __a :List[Any] = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) __a :str = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) __a :Union[str, Any] = None
312
1
import inspect import os import unittest import torch import accelerate from accelerate import debug_launcher from accelerate.test_utils import ( execute_subprocess_async, require_cpu, require_huggingface_suite, require_multi_gpu, require_single_gpu, ) from accelerate.utils import patch_environment @require_huggingface_suite class _a ( unittest.TestCase ): """simple docstring""" def __A ( self : List[Any] ): A_ = inspect.getfile(accelerate.test_utils ) A_ = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps", "test_metrics.py"] ) from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401 A_ = test_metrics @require_cpu def __A ( self : Any ): debug_launcher(self.test_metrics.main , num_processes=1 ) @require_cpu def __A ( self : Tuple ): debug_launcher(self.test_metrics.main ) @require_single_gpu def __A ( self : str ): self.test_metrics.main() @require_multi_gpu def __A ( self : Union[str, Any] ): print(f'''Found {torch.cuda.device_count()} devices.''' ) A_ = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(UpperCAmelCase , env=os.environ.copy() )
312
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from timm import create_model from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import BitConfig, BitForImageClassification, BitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() __a :Any = logging.get_logger(__name__) def __snake_case ( __UpperCamelCase : Optional[int] ): """simple docstring""" A_ = "huggingface/label-files" A_ = "imagenet-1k-id2label.json" A_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type="dataset" ) ,"r" ) ) A_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()} A_ = {v: k for k, v in idalabel.items()} A_ = "std_conv" if "bit" in model_name else False # note that when using BiT as backbone for ViT-hybrid checkpoints, # one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same", # config.conv_layer = "std_conv_same" A_ = BitConfig( conv_layer=__UpperCamelCase ,num_labels=1000 ,idalabel=__UpperCamelCase ,labelaid=__UpperCamelCase ,) return config def __snake_case ( __UpperCamelCase : Union[str, Any] ): """simple docstring""" if "stem.conv" in name: A_ = name.replace("stem.conv" ,"bit.embedder.convolution" ) if "blocks" in name: A_ = name.replace("blocks" ,"layers" ) if "head.fc" in name: A_ = name.replace("head.fc" ,"classifier.1" ) if name.startswith("norm" ): A_ = "bit." + name if "bit" not in name and "classifier" not in name: A_ = "bit.encoder." + name return name def __snake_case ( ): """simple docstring""" A_ = "http://images.cocodataset.org/val2017/000000039769.jpg" A_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw ) return im @torch.no_grad() def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Tuple=False ): """simple docstring""" A_ = get_config(__UpperCamelCase ) # load original model from timm A_ = create_model(__UpperCamelCase ,pretrained=__UpperCamelCase ) timm_model.eval() # load state_dict of original model A_ = timm_model.state_dict() for key in state_dict.copy().keys(): A_ = state_dict.pop(__UpperCamelCase ) A_ = val.squeeze() if "head" in key else val # load HuggingFace model A_ = BitForImageClassification(__UpperCamelCase ) model.eval() model.load_state_dict(__UpperCamelCase ) # create image processor A_ = create_transform(**resolve_data_config({} ,model=__UpperCamelCase ) ) A_ = transform.transforms A_ = { "bilinear": PILImageResampling.BILINEAR, "bicubic": PILImageResampling.BICUBIC, "nearest": PILImageResampling.NEAREST, } A_ = BitImageProcessor( do_resize=__UpperCamelCase ,size={"shortest_edge": timm_transforms[0].size} ,resample=pillow_resamplings[timm_transforms[0].interpolation.value] ,do_center_crop=__UpperCamelCase ,crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} ,do_normalize=__UpperCamelCase ,image_mean=timm_transforms[-1].mean.tolist() ,image_std=timm_transforms[-1].std.tolist() ,) A_ = prepare_img() A_ = transform(__UpperCamelCase ).unsqueeze(0 ) A_ = processor(__UpperCamelCase ,return_tensors="pt" ).pixel_values # verify pixel values assert torch.allclose(__UpperCamelCase ,__UpperCamelCase ) # verify logits with torch.no_grad(): A_ = model(__UpperCamelCase ) A_ = outputs.logits print("Logits:" ,logits[0, :3] ) print("Predicted class:" ,model.config.idalabel[logits.argmax(-1 ).item()] ) A_ = timm_model(__UpperCamelCase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(__UpperCamelCase ,outputs.logits ,atol=1E-3 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase ) print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' ) model.save_pretrained(__UpperCamelCase ) processor.save_pretrained(__UpperCamelCase ) if push_to_hub: print(f'''Pushing model {model_name} and processor to the hub''' ) model.push_to_hub(f'''ybelkada/{model_name}''' ) processor.push_to_hub(f'''ybelkada/{model_name}''' ) if __name__ == "__main__": __a :List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='resnetv2_50x1_bitm', type=str, help='Name of the BiT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether to push the model to the hub.', ) __a :str = parser.parse_args() convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
312
1
import collections import json import math import os import re import time from fnmatch import fnmatch from typing import Dict import requests from slack_sdk import WebClient __a :Tuple = WebClient(token=os.environ['CI_SLACK_BOT_TOKEN']) def __snake_case ( __UpperCamelCase : List[Any] ): """simple docstring""" A_ = test_results.split(" " ) A_ = 0 A_ = 0 # When the output is short enough, the output is surrounded by = signs: "== OUTPUT ==" # When it is too long, those signs are not present. A_ = expressions[-2] if "=" in expressions[-1] else expressions[-1] for i, expression in enumerate(__UpperCamelCase ): if "failed" in expression: failed += int(expressions[i - 1] ) if "passed" in expression: success += int(expressions[i - 1] ) return failed, success, time_spent def __snake_case ( __UpperCamelCase : List[str] ): """simple docstring""" A_ = {} A_ = None A_ = False for line in failures_short_lines.split("\n" ): if re.search(R"_ \[doctest\]" ,__UpperCamelCase ): A_ = True A_ = line.split(" " )[2] elif in_error and not line.split(" " )[0].isdigit(): A_ = line A_ = False return failures class _a : """simple docstring""" def __init__( self : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Dict ): A_ = title A_ = doc_test_results["time_spent"].split("," )[0] A_ = doc_test_results["success"] A_ = doc_test_results["failures"] A_ = self.n_success + self.n_failures # Failures and success of the modeling tests A_ = doc_test_results @property def __A ( self : List[str] ): A_ = [self._time_spent] A_ = 0 for time in time_spent: A_ = time.split(":" ) # Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute. if len(UpperCAmelCase ) == 1: A_ = [0, 0, time_parts[0]] A_ , A_ , A_ = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] ) total_secs += hours * 3600 + minutes * 60 + seconds A_ , A_ , A_ = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60 return f'''{int(UpperCAmelCase )}h{int(UpperCAmelCase )}m{int(UpperCAmelCase )}s''' @property def __A ( self : Optional[Any] ): return {"type": "header", "text": {"type": "plain_text", "text": self.title}} @property def __A ( self : Dict ): return { "type": "section", "text": { "type": "plain_text", "text": f'''🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.''', "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": f'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''', }, } @property def __A ( self : str ): return { "type": "section", "text": { "type": "plain_text", "text": ( f'''There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in''' f''' {self.time}.''' ), "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": f'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''', }, } @property def __A ( self : Tuple ): A_ = 40 A_ = {k: v["failed"] for k, v in doc_test_results.items() if isinstance(UpperCAmelCase , UpperCAmelCase )} A_ = "" for category, failures in category_failures.items(): if len(UpperCAmelCase ) == 0: continue if report != "": report += "\n\n" report += f'''*{category} failures*:'''.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n" report += "`" report += "`\n`".join(UpperCAmelCase ) report += "`" return { "type": "section", "text": { "type": "mrkdwn", "text": f'''The following examples had failures:\n\n\n{report}\n''', }, } @property def __A ( self : List[Any] ): A_ = [self.header] if self.n_failures > 0: blocks.append(self.failures ) if self.n_failures > 0: blocks.extend([self.category_failures] ) if self.n_failures == 0: blocks.append(self.no_failures ) return json.dumps(UpperCAmelCase ) @staticmethod def __A ( ): A_ = [ { "type": "section", "text": { "type": "plain_text", "text": "There was an issue running the tests.", }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": f'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''', }, } ] print("Sending the following payload" ) print(json.dumps({"blocks": json.loads(UpperCAmelCase )} ) ) client.chat_postMessage( channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text="There was an issue running the tests." , blocks=UpperCAmelCase , ) def __A ( self : Optional[int] ): print("Sending the following payload" ) print(json.dumps({"blocks": json.loads(self.payload )} ) ) A_ = f'''{self.n_failures} failures out of {self.n_tests} tests,''' if self.n_failures else "All tests passed." A_ = client.chat_postMessage( channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , blocks=self.payload , text=UpperCAmelCase , ) def __A ( self : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] ): A_ = "" for key, value in failures.items(): A_ = value[:200] + " [Truncated]" if len(UpperCAmelCase ) > 250 else value failures_text += f'''*{key}*\n_{value}_\n\n''' A_ = job_name A_ = {"type": "section", "text": {"type": "mrkdwn", "text": text}} if job_link is not None: A_ = { "type": "button", "text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True}, "url": job_link, } return [ {"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}}, content, {"type": "section", "text": {"type": "mrkdwn", "text": failures_text}}, ] def __A ( self : Union[str, Any] ): if self.thread_ts is None: raise ValueError("Can only post reply if a post has been made." ) A_ = self.doc_test_results.pop("job_link" ) self.doc_test_results.pop("failures" ) self.doc_test_results.pop("success" ) self.doc_test_results.pop("time_spent" ) A_ = sorted(self.doc_test_results.items() , key=lambda UpperCAmelCase : t[0] ) for job, job_result in sorted_dict: if len(job_result["failures"] ): A_ = f'''*Num failures* :{len(job_result["failed"] )} \n''' A_ = job_result["failures"] A_ = self.get_reply_blocks(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , text=UpperCAmelCase ) print("Sending the following reply" ) print(json.dumps({"blocks": blocks} ) ) client.chat_postMessage( channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text=f'''Results for {job}''' , blocks=UpperCAmelCase , thread_ts=self.thread_ts["ts"] , ) time.sleep(1 ) def __snake_case ( ): """simple docstring""" A_ = os.environ["GITHUB_RUN_ID"] A_ = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100''' A_ = requests.get(__UpperCamelCase ).json() A_ = {} try: jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} ) A_ = math.ceil((result["total_count"] - 100) / 100 ) for i in range(__UpperCamelCase ): A_ = requests.get(url + f'''&page={i + 2}''' ).json() jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} ) return jobs except Exception as e: print("Unknown error, could not fetch links." ,__UpperCamelCase ) return {} def __snake_case ( __UpperCamelCase : str ): """simple docstring""" A_ = {} if os.path.exists(__UpperCamelCase ): A_ = os.listdir(__UpperCamelCase ) for file in files: try: with open(os.path.join(__UpperCamelCase ,__UpperCamelCase ) ,encoding="utf-8" ) as f: A_ = f.read() except UnicodeDecodeError as e: raise ValueError(f'''Could not open {os.path.join(__UpperCamelCase ,__UpperCamelCase )}.''' ) from e return _artifact def __snake_case ( ): """simple docstring""" class _a : """simple docstring""" def __init__( self : Union[str, Any] , UpperCAmelCase : str ): A_ = name A_ = [] def __str__( self : List[str] ): return self.name def __A ( self : int , UpperCAmelCase : str ): self.paths.append({"name": self.name, "path": path} ) A_ = {} A_ = filter(os.path.isdir ,os.listdir() ) for directory in directories: A_ = directory if artifact_name not in _available_artifacts: A_ = Artifact(__UpperCamelCase ) _available_artifacts[artifact_name].add_path(__UpperCamelCase ) return _available_artifacts if __name__ == "__main__": __a :Union[str, Any] = get_job_links() __a :Union[str, Any] = retrieve_available_artifacts() __a :List[str] = collections.OrderedDict( [ ('*.py', 'API Examples'), ('*.md', 'MD Examples'), ] ) # This dict will contain all the information relative to each doc test category: # - failed: list of failed tests # - failures: dict in the format 'test': 'error_message' __a :int = { v: { 'failed': [], 'failures': {}, } for v in docs.values() } # Link to the GitHub Action job __a :Tuple = github_actions_job_links.get('run_doctests') __a :Any = available_artifacts['doc_tests_gpu_test_reports'].paths[0] __a :str = retrieve_artifact(artifact_path['name']) if "stats" in artifact: __a , __a , __a :Tuple = handle_test_results(artifact['stats']) __a :Union[str, Any] = failed __a :List[str] = success __a :List[Any] = time_spent[1:-1] + ', ' __a :int = extract_first_line_failure(artifact['failures_short']) for line in artifact["summary_short"].split('\n'): if re.search('FAILED', line): __a :Dict = line.replace('FAILED ', '') __a :str = line.split()[0].replace('\n', '') if "::" in line: __a , __a :Any = line.split('::') else: __a , __a :Dict = line, line for file_regex in docs.keys(): if fnmatch(file_path, file_regex): __a :List[Any] = docs[file_regex] doc_test_results[category]["failed"].append(test) __a :str = all_failures[test] if test in all_failures else 'N/A' __a :Optional[int] = failure break __a :Tuple = Message('🤗 Results of the doc tests.', doc_test_results) message.post() message.post_reply()
312
import os import re import sys import traceback import warnings from pathlib import Path from typing import Dict, Optional, Union from uuid import uuida from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami from huggingface_hub.file_download import REGEX_COMMIT_HASH from huggingface_hub.utils import ( EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, is_jinja_available, ) from packaging import version from requests import HTTPError from .. import __version__ from .constants import ( DEPRECATED_REVISION_ARGS, DIFFUSERS_CACHE, HUGGINGFACE_CO_RESOLVE_ENDPOINT, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, ) from .import_utils import ( ENV_VARS_TRUE_VALUES, _flax_version, _jax_version, _onnxruntime_version, _torch_version, is_flax_available, is_onnx_available, is_torch_available, ) from .logging import get_logger __a :Dict = get_logger(__name__) __a :Union[str, Any] = Path(__file__).parent / 'model_card_template.md' __a :Tuple = uuida().hex __a :List[Any] = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES __a :Union[str, Any] = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES __a :Tuple = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/' def __snake_case ( __UpperCamelCase : Union[Dict, str, None] = None ): """simple docstring""" A_ = f'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}''' if DISABLE_TELEMETRY or HF_HUB_OFFLINE: return ua + "; telemetry/off" if is_torch_available(): ua += f'''; torch/{_torch_version}''' if is_flax_available(): ua += f'''; jax/{_jax_version}''' ua += f'''; flax/{_flax_version}''' if is_onnx_available(): ua += f'''; onnxruntime/{_onnxruntime_version}''' # CI will set this value to True if os.environ.get("DIFFUSERS_IS_CI" ,"" ).upper() in ENV_VARS_TRUE_VALUES: ua += "; is_ci/true" if isinstance(__UpperCamelCase ,__UpperCamelCase ): ua += "; " + "; ".join(f'''{k}/{v}''' for k, v in user_agent.items() ) elif isinstance(__UpperCamelCase ,__UpperCamelCase ): ua += "; " + user_agent return ua def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Optional[str] = None ,__UpperCamelCase : Optional[str] = None ): """simple docstring""" if token is None: A_ = HfFolder.get_token() if organization is None: A_ = whoami(__UpperCamelCase )["name"] return f'''{username}/{model_id}''' else: return f'''{organization}/{model_id}''' def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Union[str, Any] ): """simple docstring""" if not is_jinja_available(): raise ValueError( "Modelcard rendering is based on Jinja templates." " Please make sure to have `jinja` installed before using `create_model_card`." " To install it, please run `pip install Jinja2`." ) if hasattr(__UpperCamelCase ,"local_rank" ) and args.local_rank not in [-1, 0]: return A_ = args.hub_token if hasattr(__UpperCamelCase ,"hub_token" ) else None A_ = get_full_repo_name(__UpperCamelCase ,token=__UpperCamelCase ) A_ = ModelCard.from_template( card_data=ModelCardData( # Card metadata object that will be converted to YAML block language="en" ,license="apache-2.0" ,library_name="diffusers" ,tags=[] ,datasets=args.dataset_name ,metrics=[] ,) ,template_path=__UpperCamelCase ,model_name=__UpperCamelCase ,repo_name=__UpperCamelCase ,dataset_name=args.dataset_name if hasattr(__UpperCamelCase ,"dataset_name" ) else None ,learning_rate=args.learning_rate ,train_batch_size=args.train_batch_size ,eval_batch_size=args.eval_batch_size ,gradient_accumulation_steps=( args.gradient_accumulation_steps if hasattr(__UpperCamelCase ,"gradient_accumulation_steps" ) else None ) ,adam_betaa=args.adam_betaa if hasattr(__UpperCamelCase ,"adam_beta1" ) else None ,adam_betaa=args.adam_betaa if hasattr(__UpperCamelCase ,"adam_beta2" ) else None ,adam_weight_decay=args.adam_weight_decay if hasattr(__UpperCamelCase ,"adam_weight_decay" ) else None ,adam_epsilon=args.adam_epsilon if hasattr(__UpperCamelCase ,"adam_epsilon" ) else None ,lr_scheduler=args.lr_scheduler if hasattr(__UpperCamelCase ,"lr_scheduler" ) else None ,lr_warmup_steps=args.lr_warmup_steps if hasattr(__UpperCamelCase ,"lr_warmup_steps" ) else None ,ema_inv_gamma=args.ema_inv_gamma if hasattr(__UpperCamelCase ,"ema_inv_gamma" ) else None ,ema_power=args.ema_power if hasattr(__UpperCamelCase ,"ema_power" ) else None ,ema_max_decay=args.ema_max_decay if hasattr(__UpperCamelCase ,"ema_max_decay" ) else None ,mixed_precision=args.mixed_precision ,) A_ = os.path.join(args.output_dir ,"README.md" ) model_card.save(__UpperCamelCase ) def __snake_case ( __UpperCamelCase : Optional[str] ,__UpperCamelCase : Optional[str] = None ): """simple docstring""" if resolved_file is None or commit_hash is not None: return commit_hash A_ = str(Path(__UpperCamelCase ).as_posix() ) A_ = re.search(R"snapshots/([^/]+)/" ,__UpperCamelCase ) if search is None: return None A_ = search.groups()[0] return commit_hash if REGEX_COMMIT_HASH.match(__UpperCamelCase ) else None # Old default cache path, potentially to be migrated. # This logic was more or less taken from `transformers`, with the following differences: # - Diffusers doesn't use custom environment variables to specify the cache path. # - There is no need to migrate the cache format, just move the files to the new location. __a :str = os.path.expanduser( os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface')) ) __a :List[Any] = os.path.join(hf_cache_home, 'diffusers') def __snake_case ( __UpperCamelCase : Optional[str] = None ,__UpperCamelCase : Optional[str] = None ): """simple docstring""" if new_cache_dir is None: A_ = DIFFUSERS_CACHE if old_cache_dir is None: A_ = old_diffusers_cache A_ = Path(__UpperCamelCase ).expanduser() A_ = Path(__UpperCamelCase ).expanduser() for old_blob_path in old_cache_dir.glob("**/blobs/*" ): if old_blob_path.is_file() and not old_blob_path.is_symlink(): A_ = new_cache_dir / old_blob_path.relative_to(__UpperCamelCase ) new_blob_path.parent.mkdir(parents=__UpperCamelCase ,exist_ok=__UpperCamelCase ) os.replace(__UpperCamelCase ,__UpperCamelCase ) try: os.symlink(__UpperCamelCase ,__UpperCamelCase ) except OSError: logger.warning( "Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." ) # At this point, old_cache_dir contains symlinks to the new cache (it can still be used). __a :Dict = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt') if not os.path.isfile(cache_version_file): __a :Optional[int] = 0 else: with open(cache_version_file) as f: try: __a :Dict = int(f.read()) except ValueError: __a :str = 0 if cache_version < 1: __a :Optional[Any] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0 if old_cache_is_not_empty: logger.warning( 'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your ' 'existing cached models. This is a one-time operation, you can interrupt it or run it ' 'later by calling `diffusers.utils.hub_utils.move_cache()`.' ) try: move_cache() except Exception as e: __a :Optional[Any] = '\n'.join(traceback.format_tb(e.__traceback__)) logger.error( F"There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease " 'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole ' 'message and we will do our best to help.' ) if cache_version < 1: try: os.makedirs(DIFFUSERS_CACHE, exist_ok=True) with open(cache_version_file, 'w') as f: f.write('1') except Exception: logger.warning( F"There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure " 'the directory exists and can be written to.' ) def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Optional[str] = None ): """simple docstring""" if variant is not None: A_ = weights_name.split("." ) A_ = splits[:-1] + [variant] + splits[-1:] A_ = ".".join(__UpperCamelCase ) return weights_name def __snake_case ( __UpperCamelCase : Optional[Any] ,*, __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Any ,__UpperCamelCase : Tuple ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : str ,__UpperCamelCase : int ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : int ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : Tuple ,__UpperCamelCase : Optional[int]=None ,): """simple docstring""" A_ = str(__UpperCamelCase ) if os.path.isfile(__UpperCamelCase ): return pretrained_model_name_or_path elif os.path.isdir(__UpperCamelCase ): if os.path.isfile(os.path.join(__UpperCamelCase ,__UpperCamelCase ) ): # Load from a PyTorch checkpoint A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase ) return model_file elif subfolder is not None and os.path.isfile( os.path.join(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) ): A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) return model_file else: raise EnvironmentError( f'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' ) else: # 1. First check if deprecated way of loading from branches is used if ( revision in DEPRECATED_REVISION_ARGS and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME) and version.parse(version.parse(__UpperCamelCase ).base_version ) >= version.parse("0.20.0" ) ): try: A_ = hf_hub_download( __UpperCamelCase ,filename=_add_variant(__UpperCamelCase ,__UpperCamelCase ) ,cache_dir=__UpperCamelCase ,force_download=__UpperCamelCase ,proxies=__UpperCamelCase ,resume_download=__UpperCamelCase ,local_files_only=__UpperCamelCase ,use_auth_token=__UpperCamelCase ,user_agent=__UpperCamelCase ,subfolder=__UpperCamelCase ,revision=revision or commit_hash ,) warnings.warn( f'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' ,__UpperCamelCase ,) return model_file except: # noqa: E722 warnings.warn( f'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__UpperCamelCase ,__UpperCamelCase )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(__UpperCamelCase ,__UpperCamelCase )}\' so that the correct variant file can be added.''' ,__UpperCamelCase ,) try: # 2. Load model file as usual A_ = hf_hub_download( __UpperCamelCase ,filename=__UpperCamelCase ,cache_dir=__UpperCamelCase ,force_download=__UpperCamelCase ,proxies=__UpperCamelCase ,resume_download=__UpperCamelCase ,local_files_only=__UpperCamelCase ,use_auth_token=__UpperCamelCase ,user_agent=__UpperCamelCase ,subfolder=__UpperCamelCase ,revision=revision or commit_hash ,) return model_file except RepositoryNotFoundError: raise EnvironmentError( f'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier ''' "listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a " "token having permission to this repo with `use_auth_token` or log in with `huggingface-cli " "login`." ) except RevisionNotFoundError: raise EnvironmentError( f'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for ''' "this model name. Check the model page at " f'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' ) except EntryNotFoundError: raise EnvironmentError( f'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' ) except HTTPError as err: raise EnvironmentError( f'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' ) except ValueError: raise EnvironmentError( f'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it''' f''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a''' f''' directory containing a file named {weights_name} or''' " \nCheckout your internet connection or see how to run the library in" " offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." ) except EnvironmentError: raise EnvironmentError( f'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from ''' "'https://huggingface.co/models', make sure you don't have a local directory with the same name. " f'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory ''' f'''containing a file named {weights_name}''' )
312
1
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ): """simple docstring""" A_ = len(__UpperCamelCase ) A_ = len(__UpperCamelCase ) A_ = ( first_str_length if first_str_length > second_str_length else second_str_length ) A_ = [] for char_count in range(__UpperCamelCase ): if char_count < first_str_length: output_list.append(first_str[char_count] ) if char_count < second_str_length: output_list.append(second_str[char_count] ) return "".join(__UpperCamelCase ) if __name__ == "__main__": print(alternative_string_arrange('AB', 'XYZ'), end=' ')
312
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __a :Any = { 'configuration_mgp_str': ['MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MgpstrConfig'], 'processing_mgp_str': ['MgpstrProcessor'], 'tokenization_mgp_str': ['MgpstrTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :Optional[Any] = [ 'MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST', 'MgpstrModel', 'MgpstrPreTrainedModel', 'MgpstrForSceneTextRecognition', ] if TYPE_CHECKING: from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig from .processing_mgp_str import MgpstrProcessor from .tokenization_mgp_str import MgpstrTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mgp_str import ( MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST, MgpstrForSceneTextRecognition, MgpstrModel, MgpstrPreTrainedModel, ) else: import sys __a :List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
312
1
import os import re from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __a :Any = logging.get_logger(__name__) __a :List[Any] = {'vocab_file': 'spiece.model'} __a :Optional[int] = { 'vocab_file': { 'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model', 'google/bigbird-roberta-large': ( 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model' ), 'google/bigbird-base-trivia-itc': ( 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model' ), } } __a :Optional[int] = { 'google/bigbird-roberta-base': 4096, 'google/bigbird-roberta-large': 4096, 'google/bigbird-base-trivia-itc': 4096, } class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES _lowerCamelCase : Dict = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase : Dict = ['input_ids', 'attention_mask'] _lowerCamelCase : List[int] = [] def __init__( self : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any]="<unk>" , UpperCAmelCase : int="<s>" , UpperCAmelCase : Optional[Any]="</s>" , UpperCAmelCase : int="<pad>" , UpperCAmelCase : Optional[int]="[SEP]" , UpperCAmelCase : List[Any]="[MASK]" , UpperCAmelCase : int="[CLS]" , UpperCAmelCase : Optional[Dict[str, Any]] = None , **UpperCAmelCase : Dict , ): A_ = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else bos_token A_ = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else eos_token A_ = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else unk_token A_ = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else pad_token A_ = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else cls_token A_ = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else sep_token # Mask token behave like a normal word, i.e. include the space before it A_ = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token A_ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , sep_token=UpperCAmelCase , mask_token=UpperCAmelCase , cls_token=UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase , ) A_ = vocab_file A_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(UpperCAmelCase ) @property def __A ( self : Optional[int] ): return self.sp_model.get_piece_size() def __A ( self : int ): A_ = {self.convert_ids_to_tokens(UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Dict ): A_ = self.__dict__.copy() A_ = None return state def __setstate__( self : Tuple , UpperCAmelCase : List[str] ): A_ = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): A_ = {} A_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __A ( self : Tuple , UpperCAmelCase : str ): return self.sp_model.encode(UpperCAmelCase , out_type=UpperCAmelCase ) def __A ( self : int , UpperCAmelCase : Optional[Any] ): return self.sp_model.piece_to_id(UpperCAmelCase ) def __A ( self : List[str] , UpperCAmelCase : int ): A_ = self.sp_model.IdToPiece(UpperCAmelCase ) return token def __A ( self : Tuple , UpperCAmelCase : Optional[int] ): A_ = [] A_ = "" A_ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(UpperCAmelCase ) + token A_ = True A_ = [] else: current_sub_tokens.append(UpperCAmelCase ) A_ = False out_string += self.sp_model.decode(UpperCAmelCase ) return out_string.strip() def __A ( self : Tuple , UpperCAmelCase : List[int] , UpperCAmelCase : bool = False , UpperCAmelCase : bool = None , UpperCAmelCase : bool = True , **UpperCAmelCase : Optional[int] , ): A_ = kwargs.pop("use_source_tokenizer" , UpperCAmelCase ) A_ = self.convert_ids_to_tokens(UpperCAmelCase , skip_special_tokens=UpperCAmelCase ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 A_ = [] A_ = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(UpperCAmelCase ) ) A_ = [] sub_texts.append(UpperCAmelCase ) else: current_sub_text.append(UpperCAmelCase ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(UpperCAmelCase ) ) # Mimic the behavior of the Rust tokenizer: # No space before [MASK] and [SEP] if spaces_between_special_tokens: A_ = re.sub(R" (\[(MASK|SEP)\])" , R"\1" , " ".join(UpperCAmelCase ) ) else: A_ = "".join(UpperCAmelCase ) A_ = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: A_ = self.clean_up_tokenization(UpperCAmelCase ) return clean_text else: return text def __A ( self : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ): if not os.path.isdir(UpperCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return A_ = os.path.join( UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(UpperCAmelCase , "wb" ) as fi: A_ = self.sp_model.serialized_model_proto() fi.write(UpperCAmelCase ) return (out_vocab_file,) def __A ( self : Union[str, Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] A_ = [self.cls_token_id] A_ = [self.sep_token_id] return cls + token_ids_a + sep + token_ids_a + sep def __A ( self : Dict , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None , UpperCAmelCase : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(UpperCAmelCase )) + [1] return [1] + ([0] * len(UpperCAmelCase )) + [1] + ([0] * len(UpperCAmelCase )) + [1] def __A ( self : Any , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ): A_ = [self.sep_token_id] A_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
312
import functools from typing import Any def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : list[str] ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or len(__UpperCamelCase ) == 0: raise ValueError("the string should be not empty string" ) if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not all( isinstance(__UpperCamelCase ,__UpperCamelCase ) and len(__UpperCamelCase ) > 0 for item in words ): raise ValueError("the words should be a list of non-empty strings" ) # Build trie A_ = {} A_ = "WORD_KEEPER" for word in words: A_ = trie for c in word: if c not in trie_node: A_ = {} A_ = trie_node[c] A_ = True A_ = len(__UpperCamelCase ) # Dynamic programming method @functools.cache def is_breakable(__UpperCamelCase : int ) -> bool: if index == len_string: return True A_ = trie for i in range(__UpperCamelCase ,__UpperCamelCase ): A_ = trie_node.get(string[i] ,__UpperCamelCase ) if trie_node is None: return False if trie_node.get(__UpperCamelCase ,__UpperCamelCase ) and is_breakable(i + 1 ): return True return False return is_breakable(0 ) if __name__ == "__main__": import doctest doctest.testmod()
312
1
__a :List[str] = 256 # Modulus to hash a string __a :Optional[Any] = 100_0003 def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ): """simple docstring""" A_ = len(__UpperCamelCase ) A_ = len(__UpperCamelCase ) if p_len > t_len: return False A_ = 0 A_ = 0 A_ = 1 # Calculating the hash of pattern and substring of text for i in range(__UpperCamelCase ): A_ = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus A_ = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue A_ = (modulus_power * alphabet_size) % modulus for i in range(0 ,t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash A_ = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def __snake_case ( ): """simple docstring""" A_ = "abc1abc12" A_ = "alskfjaldsabc1abc1abc12k23adsfabcabc" A_ = "alskfjaldsk23adsfabcabc" assert rabin_karp(__UpperCamelCase ,__UpperCamelCase ) and not rabin_karp(__UpperCamelCase ,__UpperCamelCase ) # Test 2) A_ = "ABABX" A_ = "ABABZABABYABABX" assert rabin_karp(__UpperCamelCase ,__UpperCamelCase ) # Test 3) A_ = "AAAB" A_ = "ABAAAAAB" assert rabin_karp(__UpperCamelCase ,__UpperCamelCase ) # Test 4) A_ = "abcdabcy" A_ = "abcxabcdabxabcdabcdabcy" assert rabin_karp(__UpperCamelCase ,__UpperCamelCase ) # Test 5) A_ = "Lü" A_ = "Lüsai" assert rabin_karp(__UpperCamelCase ,__UpperCamelCase ) A_ = "Lue" assert not rabin_karp(__UpperCamelCase ,__UpperCamelCase ) print("Success." ) if __name__ == "__main__": test_rabin_karp()
312
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_electra import ElectraTokenizer __a :List[str] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} __a :Union[str, Any] = { 'vocab_file': { 'google/electra-small-generator': ( 'https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt' ), 'google/electra-base-generator': 'https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt', 'google/electra-large-generator': ( 'https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt' ), 'google/electra-small-discriminator': ( 'https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt' ), 'google/electra-base-discriminator': ( 'https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt' ), 'google/electra-large-discriminator': ( 'https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'google/electra-small-generator': ( 'https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json' ), 'google/electra-base-generator': ( 'https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json' ), 'google/electra-large-generator': ( 'https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json' ), 'google/electra-small-discriminator': ( 'https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json' ), 'google/electra-base-discriminator': ( 'https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json' ), 'google/electra-large-discriminator': ( 'https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json' ), }, } __a :Optional[int] = { 'google/electra-small-generator': 512, 'google/electra-base-generator': 512, 'google/electra-large-generator': 512, 'google/electra-small-discriminator': 512, 'google/electra-base-discriminator': 512, 'google/electra-large-discriminator': 512, } __a :str = { 'google/electra-small-generator': {'do_lower_case': True}, 'google/electra-base-generator': {'do_lower_case': True}, 'google/electra-large-generator': {'do_lower_case': True}, 'google/electra-small-discriminator': {'do_lower_case': True}, 'google/electra-base-discriminator': {'do_lower_case': True}, 'google/electra-large-discriminator': {'do_lower_case': True}, } class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : Tuple = VOCAB_FILES_NAMES _lowerCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase : int = PRETRAINED_INIT_CONFIGURATION _lowerCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase : int = ElectraTokenizer def __init__( self : Tuple , UpperCAmelCase : Dict=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Any=True , UpperCAmelCase : Any="[UNK]" , UpperCAmelCase : Union[str, Any]="[SEP]" , UpperCAmelCase : List[Any]="[PAD]" , UpperCAmelCase : Union[str, Any]="[CLS]" , UpperCAmelCase : List[Any]="[MASK]" , UpperCAmelCase : List[str]=True , UpperCAmelCase : Any=None , **UpperCAmelCase : Union[str, Any] , ): super().__init__( UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , ) A_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , UpperCAmelCase ) != do_lower_case or normalizer_state.get("strip_accents" , UpperCAmelCase ) != strip_accents or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase ) != tokenize_chinese_chars ): A_ = getattr(UpperCAmelCase , normalizer_state.pop("type" ) ) A_ = do_lower_case A_ = strip_accents A_ = tokenize_chinese_chars A_ = normalizer_class(**UpperCAmelCase ) A_ = do_lower_case def __A ( self : int , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any]=None ): A_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __A ( self : Union[str, Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ): A_ = [self.sep_token_id] A_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __A ( self : Tuple , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ): A_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase ) return tuple(UpperCAmelCase )
312
1
import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow __a :str = logging.getLogger() @unittest.skip('Temporarily disable the doc tests.' ) @require_torch @require_tf @slow class _a ( unittest.TestCase ): """simple docstring""" def __A ( self : str , UpperCAmelCase : Path , UpperCAmelCase : Union[str, None] = None , UpperCAmelCase : Union[List[str], None] = None , UpperCAmelCase : Union[str, List[str], None] = None , UpperCAmelCase : bool = True , ): A_ = [file for file in os.listdir(UpperCAmelCase ) if os.path.isfile(os.path.join(UpperCAmelCase , UpperCAmelCase ) )] if identifier is not None: A_ = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(UpperCAmelCase , UpperCAmelCase ): for n_ in n_identifier: A_ = [file for file in files if n_ not in file] else: A_ = [file for file in files if n_identifier not in file] A_ = ignore_files or [] ignore_files.append("__init__.py" ) A_ = [file for file in files if file not in ignore_files] for file in files: # Open all files print("Testing" , UpperCAmelCase ) if only_modules: A_ = file.split("." )[0] try: A_ = getattr(UpperCAmelCase , UpperCAmelCase ) A_ = doctest.DocTestSuite(UpperCAmelCase ) A_ = unittest.TextTestRunner().run(UpperCAmelCase ) self.assertIs(len(result.failures ) , 0 ) except AttributeError: logger.info(f'''{module_identifier} is not a module.''' ) else: A_ = doctest.testfile(str(".." / directory / file ) , optionflags=doctest.ELLIPSIS ) self.assertIs(result.failed , 0 ) def __A ( self : List[str] ): A_ = Path("src/transformers" ) A_ = "modeling" A_ = [ "modeling_ctrl.py", "modeling_tf_ctrl.py", ] self.analyze_directory(UpperCAmelCase , identifier=UpperCAmelCase , ignore_files=UpperCAmelCase ) def __A ( self : Dict ): A_ = Path("src/transformers" ) A_ = "tokenization" self.analyze_directory(UpperCAmelCase , identifier=UpperCAmelCase ) def __A ( self : Any ): A_ = Path("src/transformers" ) A_ = "configuration" self.analyze_directory(UpperCAmelCase , identifier=UpperCAmelCase ) def __A ( self : Optional[int] ): A_ = Path("src/transformers" ) A_ = ["configuration", "modeling", "tokenization"] self.analyze_directory(UpperCAmelCase , n_identifier=UpperCAmelCase ) def __A ( self : Optional[int] ): A_ = Path("docs/source" ) A_ = ["favicon.ico"] self.analyze_directory(UpperCAmelCase , ignore_files=UpperCAmelCase , only_modules=UpperCAmelCase )
312
# flake8: noqa # Lint as: python3 from typing import Dict, List, Optional, Type from .. import config from ..utils import logging from .formatting import ( ArrowFormatter, CustomFormatter, Formatter, PandasFormatter, PythonFormatter, TensorFormatter, format_table, query_table, ) from .np_formatter import NumpyFormatter __a :Optional[Any] = logging.get_logger(__name__) __a :Dict[Optional[str], Type[Formatter]] = {} __a :Dict[Optional[str], str] = {} __a :Dict[Optional[str], Exception] = {} def __snake_case ( __UpperCamelCase : type ,__UpperCamelCase : Optional[str] ,__UpperCamelCase : Optional[List[str]] = None ,): """simple docstring""" A_ = aliases if aliases is not None else [] if format_type in _FORMAT_TYPES: logger.warning( f'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' ) A_ = formatter_cls for alias in set(aliases + [format_type] ): if alias in _FORMAT_TYPES_ALIASES: logger.warning( f'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' ) A_ = format_type def __snake_case ( __UpperCamelCase : Exception ,__UpperCamelCase : Optional[str] ,__UpperCamelCase : Optional[List[str]] = None ): """simple docstring""" A_ = aliases if aliases is not None else [] for alias in set(aliases + [format_type] ): A_ = unavailable_error # Here we define all the available formatting functions that can be used by `Dataset.set_format` _register_formatter(PythonFormatter, None, aliases=['python']) _register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow']) _register_formatter(NumpyFormatter, 'numpy', aliases=['np']) _register_formatter(PandasFormatter, 'pandas', aliases=['pd']) _register_formatter(CustomFormatter, 'custom') if config.TORCH_AVAILABLE: from .torch_formatter import TorchFormatter _register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch']) else: __a :List[Any] = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.') _register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch']) if config.TF_AVAILABLE: from .tf_formatter import TFFormatter _register_formatter(TFFormatter, 'tensorflow', aliases=['tf']) else: __a :List[str] = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.') _register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf']) if config.JAX_AVAILABLE: from .jax_formatter import JaxFormatter _register_formatter(JaxFormatter, 'jax', aliases=[]) else: __a :Tuple = ValueError('JAX needs to be installed to be able to return JAX arrays.') _register_unavailable_formatter(_jax_error, 'jax', aliases=[]) def __snake_case ( __UpperCamelCase : Optional[str] ): """simple docstring""" if format_type in _FORMAT_TYPES_ALIASES: return _FORMAT_TYPES_ALIASES[format_type] else: return format_type def __snake_case ( __UpperCamelCase : Optional[str] ,**__UpperCamelCase : List[Any] ): """simple docstring""" A_ = get_format_type_from_alias(__UpperCamelCase ) if format_type in _FORMAT_TYPES: return _FORMAT_TYPES[format_type](**__UpperCamelCase ) if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE: raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type] else: raise ValueError( f'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
312
1
from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available from ...utils import OptionalDependencyNotAvailable __a :Optional[int] = {'configuration_dpt': ['DPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DPTConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :List[Any] = ['DPTFeatureExtractor'] __a :int = ['DPTImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :List[Any] = [ 'DPT_PRETRAINED_MODEL_ARCHIVE_LIST', 'DPTForDepthEstimation', 'DPTForSemanticSegmentation', 'DPTModel', 'DPTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_dpt import DPTFeatureExtractor from .image_processing_dpt import DPTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dpt import ( DPT_PRETRAINED_MODEL_ARCHIVE_LIST, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel, DPTPreTrainedModel, ) else: import sys __a :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
312
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __a :int = { 'configuration_mask2former': [ 'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Mask2FormerConfig', ], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :Union[str, Any] = ['Mask2FormerImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :Optional[Any] = [ 'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'Mask2FormerForUniversalSegmentation', 'Mask2FormerModel', 'Mask2FormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_maskaformer import MaskaFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskaformer import ( MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskaFormerForUniversalSegmentation, MaskaFormerModel, MaskaFormerPreTrainedModel, ) else: import sys __a :Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
312
1
import warnings from ...utils import logging from .image_processing_glpn import GLPNImageProcessor __a :str = logging.get_logger(__name__) class _a ( snake_case_ ): """simple docstring""" def __init__( self : List[str] , *UpperCAmelCase : Any , **UpperCAmelCase : List[Any] ): warnings.warn( "The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use GLPNImageProcessor instead." , UpperCAmelCase , ) super().__init__(*UpperCAmelCase , **UpperCAmelCase )
312
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, ClassLabel, Features from .base import TaskTemplate @dataclass(frozen=snake_case_ ) class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : str = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True} ) _lowerCamelCase : ClassVar[Features] = Features({'audio': Audio()} ) _lowerCamelCase : ClassVar[Features] = Features({'labels': ClassLabel} ) _lowerCamelCase : str = "audio" _lowerCamelCase : str = "labels" def __A ( self : str , UpperCAmelCase : List[Any] ): if self.label_column not in features: raise ValueError(f'''Column {self.label_column} is not present in features.''' ) if not isinstance(features[self.label_column] , UpperCAmelCase ): raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' ) A_ = copy.deepcopy(self ) A_ = self.label_schema.copy() A_ = features[self.label_column] A_ = label_schema return task_template @property def __A ( self : List[str] ): return { self.audio_column: "audio", self.label_column: "labels", }
312
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) __a :List[str] = { 'configuration_lxmert': ['LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LxmertConfig'], 'tokenization_lxmert': ['LxmertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :Union[str, Any] = ['LxmertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :str = [ 'LxmertEncoder', 'LxmertForPreTraining', 'LxmertForQuestionAnswering', 'LxmertModel', 'LxmertPreTrainedModel', 'LxmertVisualFeatureEncoder', 'LxmertXLayer', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :Tuple = [ 'TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFLxmertForPreTraining', 'TFLxmertMainLayer', 'TFLxmertModel', 'TFLxmertPreTrainedModel', 'TFLxmertVisualFeatureEncoder', ] if TYPE_CHECKING: from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig from .tokenization_lxmert import LxmertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_lxmert_fast import LxmertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_lxmert import ( LxmertEncoder, LxmertForPreTraining, LxmertForQuestionAnswering, LxmertModel, LxmertPreTrainedModel, LxmertVisualFeatureEncoder, LxmertXLayer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_lxmert import ( TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFLxmertForPreTraining, TFLxmertMainLayer, TFLxmertModel, TFLxmertPreTrainedModel, TFLxmertVisualFeatureEncoder, ) else: import sys __a :List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
312
def __snake_case ( __UpperCamelCase : bytes ): """simple docstring""" return "".join([hex(__UpperCamelCase )[2:].zfill(2 ).upper() for byte in list(__UpperCamelCase )] ) def __snake_case ( __UpperCamelCase : str ): """simple docstring""" if (len(__UpperCamelCase ) % 2) != 0: raise ValueError( "Base16 encoded data is invalid:\nData does not have an even number of hex digits." ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(__UpperCamelCase ) <= set("0123456789ABCDEF" ): raise ValueError( "Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] ,16 ) for i in range(0 ,len(__UpperCamelCase ) ,2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
312
1
import argparse import requests import torch from PIL import Image from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel def __snake_case ( __UpperCamelCase : Dict ): """simple docstring""" if "img_encoder.pos_embed" in name: A_ = name.replace("img_encoder.pos_embed" ,"vision_model.embeddings.position_embeddings" ) if "img_encoder.patch_embed.proj" in name: A_ = name.replace("img_encoder.patch_embed.proj" ,"vision_model.embeddings.patch_embeddings.projection" ) if "img_encoder.patch_embed.norm" in name: A_ = name.replace("img_encoder.patch_embed.norm" ,"vision_model.embeddings.layernorm" ) if "img_encoder.layers" in name: A_ = name.replace("img_encoder.layers" ,"vision_model.encoder.stages" ) if "blocks" in name and "res" not in name: A_ = name.replace("blocks" ,"layers" ) if "attn" in name and "pre_assign" not in name: A_ = name.replace("attn" ,"self_attn" ) if "proj" in name and "self_attn" in name and "text" not in name: A_ = name.replace("proj" ,"out_proj" ) if "pre_assign_attn.attn.proj" in name: A_ = name.replace("pre_assign_attn.attn.proj" ,"pre_assign_attn.attn.out_proj" ) if "norm1" in name: A_ = name.replace("norm1" ,"layer_norm1" ) if "norm2" in name and "pre_assign" not in name: A_ = name.replace("norm2" ,"layer_norm2" ) if "img_encoder.norm" in name: A_ = name.replace("img_encoder.norm" ,"vision_model.layernorm" ) # text encoder if "text_encoder.token_embedding" in name: A_ = name.replace("text_encoder.token_embedding" ,"text_model.embeddings.token_embedding" ) if "text_encoder.positional_embedding" in name: A_ = name.replace("text_encoder.positional_embedding" ,"text_model.embeddings.position_embedding.weight" ) if "text_encoder.transformer.resblocks." in name: A_ = name.replace("text_encoder.transformer.resblocks." ,"text_model.encoder.layers." ) if "ln_1" in name: A_ = name.replace("ln_1" ,"layer_norm1" ) if "ln_2" in name: A_ = name.replace("ln_2" ,"layer_norm2" ) if "c_fc" in name: A_ = name.replace("c_fc" ,"fc1" ) if "c_proj" in name: A_ = name.replace("c_proj" ,"fc2" ) if "text_encoder" in name: A_ = name.replace("text_encoder" ,"text_model" ) if "ln_final" in name: A_ = name.replace("ln_final" ,"final_layer_norm" ) # projection layers if "img_projector.linear_hidden." in name: A_ = name.replace("img_projector.linear_hidden." ,"visual_projection." ) if "img_projector.linear_out." in name: A_ = name.replace("img_projector.linear_out." ,"visual_projection.3." ) if "text_projector.linear_hidden" in name: A_ = name.replace("text_projector.linear_hidden" ,"text_projection" ) if "text_projector.linear_out" in name: A_ = name.replace("text_projector.linear_out" ,"text_projection.3" ) return name def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : List[str] ): """simple docstring""" for key in orig_state_dict.copy().keys(): A_ = orig_state_dict.pop(__UpperCamelCase ) if "qkv" in key: # weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors A_ = key.split("." ) A_ , A_ = int(key_split[2] ), int(key_split[4] ) A_ = config.vision_config.hidden_size if "weight" in key: A_ = val[:dim, :] A_ = val[dim : dim * 2, :] A_ = val[-dim:, :] else: A_ = val[:dim] A_ = val[dim : dim * 2] A_ = val[-dim:] elif "in_proj" in key: # weights and biases of the key, value and query projections of text encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors A_ = key.split("." ) A_ = int(key_split[3] ) A_ = config.text_config.hidden_size if "weight" in key: A_ = val[:dim, :] A_ = val[ dim : dim * 2, : ] A_ = val[-dim:, :] else: A_ = val[:dim] A_ = val[dim : dim * 2] A_ = val[-dim:] else: A_ = rename_key(__UpperCamelCase ) # squeeze if necessary if ( "text_projection.0" in new_name or "text_projection.3" in new_name or "visual_projection.0" in new_name or "visual_projection.3" in new_name ): A_ = val.squeeze_() else: A_ = val return orig_state_dict def __snake_case ( ): """simple docstring""" A_ = "http://images.cocodataset.org/val2017/000000039769.jpg" A_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw ) return im @torch.no_grad() def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : str="groupvit-gcc-yfcc" ,__UpperCamelCase : List[str]=False ): """simple docstring""" A_ = GroupViTConfig() A_ = GroupViTModel(__UpperCamelCase ).eval() A_ = torch.load(__UpperCamelCase ,map_location="cpu" )["model"] A_ = convert_state_dict(__UpperCamelCase ,__UpperCamelCase ) A_ , A_ = model.load_state_dict(__UpperCamelCase ,strict=__UpperCamelCase ) assert missing_keys == ["text_model.embeddings.position_ids"] assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(__UpperCamelCase ) == 0) # verify result A_ = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" ) A_ = prepare_img() A_ = processor(text=["a photo of a cat", "a photo of a dog"] ,images=__UpperCamelCase ,padding=__UpperCamelCase ,return_tensors="pt" ) with torch.no_grad(): A_ = model(**__UpperCamelCase ) if model_name == "groupvit-gcc-yfcc": A_ = torch.tensor([[13.3523, 6.3629]] ) elif model_name == "groupvit-gcc-redcaps": A_ = torch.tensor([[16.1873, 8.6230]] ) else: raise ValueError(f'''Model name {model_name} not supported.''' ) assert torch.allclose(outputs.logits_per_image ,__UpperCamelCase ,atol=1E-3 ) processor.save_pretrained(__UpperCamelCase ) model.save_pretrained(__UpperCamelCase ) print("Successfully saved processor and model to" ,__UpperCamelCase ) if push_to_hub: print("Pushing to the hub..." ) processor.push_to_hub(__UpperCamelCase ,organization="nielsr" ) model.push_to_hub(__UpperCamelCase ,organization="nielsr" ) if __name__ == "__main__": __a :Optional[int] = argparse.ArgumentParser() parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.' ) parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint') parser.add_argument( '--model_name', default='groupvit-gccy-fcc', type=str, help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'', ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.', ) __a :str = parser.parse_args() convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
312
import cva import numpy as np class _a : """simple docstring""" def __init__( self : Any , UpperCAmelCase : float , UpperCAmelCase : int ): if k in (0.04, 0.06): A_ = k A_ = window_size else: raise ValueError("invalid k value" ) def __str__( self : Optional[Any] ): return str(self.k ) def __A ( self : int , UpperCAmelCase : str ): A_ = cva.imread(UpperCAmelCase , 0 ) A_ , A_ = img.shape A_ = [] A_ = img.copy() A_ = cva.cvtColor(UpperCAmelCase , cva.COLOR_GRAY2RGB ) A_ , A_ = np.gradient(UpperCAmelCase ) A_ = dx**2 A_ = dy**2 A_ = dx * dy A_ = 0.04 A_ = self.window_size // 2 for y in range(UpperCAmelCase , h - offset ): for x in range(UpperCAmelCase , w - offset ): A_ = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() A_ = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() A_ = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() A_ = (wxx * wyy) - (wxy**2) A_ = wxx + wyy A_ = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) , 0 ) color_img.itemset((y, x, 1) , 0 ) color_img.itemset((y, x, 2) , 255 ) return color_img, corner_list if __name__ == "__main__": __a :List[str] = HarrisCorner(0.04, 3) __a , __a :str = edge_detect.detect('path_to_image') cva.imwrite('detect.png', color_img)
312
1
from typing import Dict from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, get_torch_dist_unique_port, require_torch_multi_gpu, require_torch_neuroncore, ) from transformers.training_args import ParallelMode from transformers.utils import logging __a :int = logging.get_logger(__name__) if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset from transformers import Trainer class _a ( snake_case_ ): """simple docstring""" def __init__( self : Tuple , UpperCAmelCase : int = 101 ): A_ = length def __len__( self : int ): return self.length def __getitem__( self : Optional[int] , UpperCAmelCase : Optional[int] ): return i class _a : """simple docstring""" def __call__( self : Any , UpperCAmelCase : Optional[Any] ): return {"input_ids": torch.tensor(UpperCAmelCase ), "labels": torch.tensor(UpperCAmelCase )} class _a ( nn.Module ): """simple docstring""" def __init__( self : int ): super().__init__() # Add some (unused) params otherwise DDP will complain. A_ = nn.Linear(120 , 80 ) def __A ( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : Tuple=None ): if labels is not None: return torch.tensor(0.0 , device=input_ids.device ), input_ids else: return input_ids class _a ( snake_case_ ): """simple docstring""" @require_torch_neuroncore def __A ( self : List[str] ): A_ = f'''--nproc_per_node=2 --master_port={get_torch_dist_unique_port()} {self.test_file_dir}/test_trainer_distributed.py '''.split() A_ = self.get_auto_remove_tmp_dir() A_ = f'''--output_dir {output_dir}'''.split() A_ = ["torchrun"] + distributed_args + args execute_subprocess_async(UpperCAmelCase , env=self.get_env() ) # successful return here == success - any errors would have caused an error in the sub-call class _a ( snake_case_ ): """simple docstring""" @require_torch_multi_gpu def __A ( self : List[str] ): A_ = f'''--nproc_per_node={torch.cuda.device_count()} --master_port={get_torch_dist_unique_port()} {self.test_file_dir}/test_trainer_distributed.py '''.split() A_ = self.get_auto_remove_tmp_dir() A_ = f'''--output_dir {output_dir}'''.split() A_ = ["torchrun"] + distributed_args + args execute_subprocess_async(UpperCAmelCase , env=self.get_env() ) # successful return here == success - any errors would have caused an error in the sub-call if __name__ == "__main__": # The script below is meant to be run under torch.distributed, on a machine with multiple GPUs: # # PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py __a :Union[str, Any] = HfArgumentParser((TrainingArguments,)) __a :Tuple = parser.parse_args_into_dataclasses()[0] logger.warning( F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " F"distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}" ) # Essentially, what we want to verify in the distributed case is that we get all samples back, # in the right order. (this is crucial for prediction for instance) for dataset_length in [101, 40, 7]: __a :int = DummyDataset(dataset_length) def __snake_case ( __UpperCamelCase : EvalPrediction ): """simple docstring""" A_ = list(range(len(__UpperCamelCase ) ) ) A_ = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential if not success and training_args.local_rank == 0: logger.warning( "Predictions and/or labels do not match expected results:\n - predictions: " f'''{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}''' ) return {"success": success} __a :str = Trainer( model=DummyModel(), args=training_args, data_collator=DummyDataCollator(), eval_dataset=dataset, compute_metrics=compute_metrics, ) __a :str = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) __a :str = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) __a :Optional[int] = 2 __a :List[Any] = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) __a :str = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) __a :Union[str, Any] = None
312
def __snake_case ( __UpperCamelCase : int = 1000 ): """simple docstring""" return sum(2 * a * ((a - 1) // 2) for a in range(3 ,n + 1 ) ) if __name__ == "__main__": print(solution())
312
1
import unittest from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin __a :List[str] = get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece @require_tokenizers class _a ( snake_case_ , unittest.TestCase ): """simple docstring""" _lowerCamelCase : Optional[Any] = XLNetTokenizer _lowerCamelCase : List[str] = XLNetTokenizerFast _lowerCamelCase : Optional[int] = True _lowerCamelCase : List[str] = True def __A ( self : Optional[Any] ): super().setUp() # We have a SentencePiece fixture for testing A_ = XLNetTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase ) tokenizer.sanitize_special_tokens() tokenizer.save_pretrained(self.tmpdirname ) def __A ( self : Tuple ): A_ = "<s>" A_ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase ) , UpperCAmelCase ) def __A ( self : List[str] ): A_ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<unk>" ) self.assertEqual(vocab_keys[1] , "<s>" ) self.assertEqual(vocab_keys[-1] , "<eod>" ) self.assertEqual(len(UpperCAmelCase ) , 1006 ) def __A ( self : List[str] ): self.assertEqual(self.get_tokenizer().vocab_size , 1000 ) def __A ( self : Tuple ): A_ = XLNetTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase ) A_ = tokenizer.tokenize("This is a test" ) self.assertListEqual(UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [285, 46, 10, 170, 382] ) A_ = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( UpperCAmelCase , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) A_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase ) self.assertListEqual(UpperCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] ) A_ = tokenizer.convert_ids_to_tokens(UpperCAmelCase ) self.assertListEqual( UpperCAmelCase , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) def __A ( self : Dict ): A_ = XLNetTokenizer(UpperCAmelCase , do_lower_case=UpperCAmelCase ) A_ = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( UpperCAmelCase , [ SPIECE_UNDERLINE + "", "i", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "se", ".", ] , ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["▁he", "ll", "o"] ) def __A ( self : List[Any] ): A_ = XLNetTokenizer(UpperCAmelCase , do_lower_case=UpperCAmelCase ) A_ = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( UpperCAmelCase , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "se", ".", ] , ) @slow def __A ( self : Union[str, Any] ): A_ = XLNetTokenizer.from_pretrained("xlnet-base-cased" ) A_ = tokenizer.encode("sequence builders" , add_special_tokens=UpperCAmelCase ) A_ = tokenizer.encode("multi-sequence build" , add_special_tokens=UpperCAmelCase ) A_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase ) A_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase ) assert encoded_sentence == text + [4, 3] assert encoded_pair == text + [4] + text_a + [4, 3] @slow def __A ( self : Union[str, Any] ): # fmt: off A_ = {"input_ids": [[17, 21442, 270, 17, 10, 14645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 22018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 14431, 13, 5500, 11, 1176, 580, 13, 16819, 4797, 23, 17, 10, 17135, 658, 19, 457, 7932, 13, 184, 19, 3154, 17135, 6468, 19, 1404, 12269, 19, 4229, 5356, 16264, 46, 19, 17, 20545, 10395, 9, 9, 9, 11, 28, 6421, 9531, 20729, 17, 10, 353, 17022, 11, 21, 6421, 9531, 16949, 17, 10, 11509, 753, 11, 33, 95, 2421, 7385, 956, 14431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 24738, 19, 13203, 658, 218, 787, 21, 430, 18482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 22178, 27, 1064, 22, 956, 13, 11101, 1429, 5854, 24313, 18953, 40, 422, 24366, 68, 1758, 37, 10483, 14257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 13894, 3380, 23, 95, 18, 17634, 2288, 9, 4, 3]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase , model_name="xlnet-base-cased" , revision="c841166438c31ec7ca9a106dee7bb312b73ae511" , )
312
import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : Union[str, Any] = ['image_processor', 'tokenizer'] _lowerCamelCase : Tuple = 'OwlViTImageProcessor' _lowerCamelCase : List[Any] = ('CLIPTokenizer', 'CLIPTokenizerFast') def __init__( self : Optional[Any] , UpperCAmelCase : int=None , UpperCAmelCase : Union[str, Any]=None , **UpperCAmelCase : Any ): A_ = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , UpperCAmelCase , ) A_ = kwargs.pop("feature_extractor" ) A_ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(UpperCAmelCase , UpperCAmelCase ) def __call__( self : Optional[int] , UpperCAmelCase : List[str]=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Dict="max_length" , UpperCAmelCase : Optional[Any]="np" , **UpperCAmelCase : Optional[int] ): if text is None and query_images is None and images is None: raise ValueError( "You have to specify at least one text or query image or image. All three cannot be none." ) if text is not None: if isinstance(UpperCAmelCase , UpperCAmelCase ) or (isinstance(UpperCAmelCase , UpperCAmelCase ) and not isinstance(text[0] , UpperCAmelCase )): A_ = [self.tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )] elif isinstance(UpperCAmelCase , UpperCAmelCase ) and isinstance(text[0] , UpperCAmelCase ): A_ = [] # Maximum number of queries across batch A_ = max([len(UpperCAmelCase ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(UpperCAmelCase ) != max_num_queries: A_ = t + [" "] * (max_num_queries - len(UpperCAmelCase )) A_ = self.tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase ) encodings.append(UpperCAmelCase ) else: raise TypeError("Input text should be a string, a list of strings or a nested list of strings" ) if return_tensors == "np": A_ = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) A_ = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp A_ = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) A_ = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch A_ = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 ) A_ = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf A_ = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 ) A_ = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 ) else: raise ValueError("Target return tensor type could not be returned" ) A_ = BatchEncoding() A_ = input_ids A_ = attention_mask if query_images is not None: A_ = BatchEncoding() A_ = self.image_processor( UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase ).pixel_values A_ = query_pixel_values if images is not None: A_ = self.image_processor(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase ) if text is not None and images is not None: A_ = image_features.pixel_values return encoding elif query_images is not None and images is not None: A_ = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**UpperCAmelCase ) , tensor_type=UpperCAmelCase ) def __A ( self : Optional[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : List[Any] ): return self.image_processor.post_process(*UpperCAmelCase , **UpperCAmelCase ) def __A ( self : str , *UpperCAmelCase : str , **UpperCAmelCase : Union[str, Any] ): return self.image_processor.post_process_object_detection(*UpperCAmelCase , **UpperCAmelCase ) def __A ( self : List[Any] , *UpperCAmelCase : int , **UpperCAmelCase : int ): return self.image_processor.post_process_image_guided_detection(*UpperCAmelCase , **UpperCAmelCase ) def __A ( self : List[Any] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Any ): return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase ) def __A ( self : Tuple , *UpperCAmelCase : Dict , **UpperCAmelCase : str ): return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase ) @property def __A ( self : Union[str, Any] ): warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCAmelCase , ) return self.image_processor_class @property def __A ( self : Optional[Any] ): warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCAmelCase , ) return self.image_processor
312
1
import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import evaluate import numpy as np from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('4.31.0') require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/text-classification/requirements.txt') __a :Any = logging.getLogger(__name__) @dataclass class _a : """simple docstring""" _lowerCamelCase : Optional[int] = field( default=1_2_8 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) _lowerCamelCase : bool = field( default=snake_case_ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} ) _lowerCamelCase : bool = field( default=snake_case_ , metadata={ 'help': ( 'Whether to pad all samples to `max_seq_length`. ' 'If False, will pad the samples dynamically when batching to the maximum length in the batch.' ) } , ) _lowerCamelCase : Optional[int] = field( default=snake_case_ , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of training examples to this ' 'value if set.' ) } , ) _lowerCamelCase : Optional[int] = field( default=snake_case_ , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of evaluation examples to this ' 'value if set.' ) } , ) _lowerCamelCase : Optional[int] = field( default=snake_case_ , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of prediction examples to this ' 'value if set.' ) } , ) @dataclass class _a : """simple docstring""" _lowerCamelCase : str = field( default=snake_case_ , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) _lowerCamelCase : str = field( default=snake_case_ , metadata={'help': 'Evaluation language. Also train language if `train_language` is set to None.'} ) _lowerCamelCase : Optional[str] = field( default=snake_case_ , metadata={'help': 'Train language if it is different from the evaluation language.'} ) _lowerCamelCase : Optional[str] = field( default=snake_case_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) _lowerCamelCase : Optional[str] = field( default=snake_case_ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) _lowerCamelCase : Optional[str] = field( default=snake_case_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) _lowerCamelCase : Optional[bool] = field( default=snake_case_ , metadata={'help': 'arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'} , ) _lowerCamelCase : bool = field( default=snake_case_ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , ) _lowerCamelCase : str = field( default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , ) _lowerCamelCase : bool = field( default=snake_case_ , metadata={ 'help': ( 'Will use the token generated when running `huggingface-cli login` (necessary to use this script ' 'with private models).' ) } , ) _lowerCamelCase : bool = field( default=snake_case_ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , ) def __snake_case ( ): """simple docstring""" A_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) A_ , A_ , A_ = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_xnli" ,__UpperCamelCase ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" ,datefmt="%m/%d/%Y %H:%M:%S" ,handlers=[logging.StreamHandler(sys.stdout )] ,) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() A_ = training_args.get_process_log_level() logger.setLevel(__UpperCamelCase ) datasets.utils.logging.set_verbosity(__UpperCamelCase ) transformers.utils.logging.set_verbosity(__UpperCamelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(f'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. A_ = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: A_ = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None: logger.info( f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed ) # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. # Downloading and loading xnli dataset from the hub. if training_args.do_train: if model_args.train_language is None: A_ = load_dataset( "xnli" ,model_args.language ,split="train" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,) else: A_ = load_dataset( "xnli" ,model_args.train_language ,split="train" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,) A_ = train_dataset.features["label"].names if training_args.do_eval: A_ = load_dataset( "xnli" ,model_args.language ,split="validation" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,) A_ = eval_dataset.features["label"].names if training_args.do_predict: A_ = load_dataset( "xnli" ,model_args.language ,split="test" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,) A_ = predict_dataset.features["label"].names # Labels A_ = len(__UpperCamelCase ) # Load pretrained model and tokenizer # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. A_ = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=__UpperCamelCase ,idalabel={str(__UpperCamelCase ): label for i, label in enumerate(__UpperCamelCase )} ,labelaid={label: i for i, label in enumerate(__UpperCamelCase )} ,finetuning_task="xnli" ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,) A_ = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,do_lower_case=model_args.do_lower_case ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast_tokenizer ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,) A_ = AutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path ,from_tf=bool(".ckpt" in model_args.model_name_or_path ) ,config=__UpperCamelCase ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,ignore_mismatched_sizes=model_args.ignore_mismatched_sizes ,) # Preprocessing the datasets # Padding strategy if data_args.pad_to_max_length: A_ = "max_length" else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch A_ = False def preprocess_function(__UpperCamelCase : List[str] ): # Tokenize the texts return tokenizer( examples["premise"] ,examples["hypothesis"] ,padding=__UpperCamelCase ,max_length=data_args.max_seq_length ,truncation=__UpperCamelCase ,) if training_args.do_train: if data_args.max_train_samples is not None: A_ = min(len(__UpperCamelCase ) ,data_args.max_train_samples ) A_ = train_dataset.select(range(__UpperCamelCase ) ) with training_args.main_process_first(desc="train dataset map pre-processing" ): A_ = train_dataset.map( __UpperCamelCase ,batched=__UpperCamelCase ,load_from_cache_file=not data_args.overwrite_cache ,desc="Running tokenizer on train dataset" ,) # Log a few random samples from the training set: for index in random.sample(range(len(__UpperCamelCase ) ) ,3 ): logger.info(f'''Sample {index} of the training set: {train_dataset[index]}.''' ) if training_args.do_eval: if data_args.max_eval_samples is not None: A_ = min(len(__UpperCamelCase ) ,data_args.max_eval_samples ) A_ = eval_dataset.select(range(__UpperCamelCase ) ) with training_args.main_process_first(desc="validation dataset map pre-processing" ): A_ = eval_dataset.map( __UpperCamelCase ,batched=__UpperCamelCase ,load_from_cache_file=not data_args.overwrite_cache ,desc="Running tokenizer on validation dataset" ,) if training_args.do_predict: if data_args.max_predict_samples is not None: A_ = min(len(__UpperCamelCase ) ,data_args.max_predict_samples ) A_ = predict_dataset.select(range(__UpperCamelCase ) ) with training_args.main_process_first(desc="prediction dataset map pre-processing" ): A_ = predict_dataset.map( __UpperCamelCase ,batched=__UpperCamelCase ,load_from_cache_file=not data_args.overwrite_cache ,desc="Running tokenizer on prediction dataset" ,) # Get the metric function A_ = evaluate.load("xnli" ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(__UpperCamelCase : EvalPrediction ): A_ = p.predictions[0] if isinstance(p.predictions ,__UpperCamelCase ) else p.predictions A_ = np.argmax(__UpperCamelCase ,axis=1 ) return metric.compute(predictions=__UpperCamelCase ,references=p.label_ids ) # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: A_ = default_data_collator elif training_args.fpaa: A_ = DataCollatorWithPadding(__UpperCamelCase ,pad_to_multiple_of=8 ) else: A_ = None # Initialize our Trainer A_ = Trainer( model=__UpperCamelCase ,args=__UpperCamelCase ,train_dataset=train_dataset if training_args.do_train else None ,eval_dataset=eval_dataset if training_args.do_eval else None ,compute_metrics=__UpperCamelCase ,tokenizer=__UpperCamelCase ,data_collator=__UpperCamelCase ,) # Training if training_args.do_train: A_ = None if training_args.resume_from_checkpoint is not None: A_ = training_args.resume_from_checkpoint elif last_checkpoint is not None: A_ = last_checkpoint A_ = trainer.train(resume_from_checkpoint=__UpperCamelCase ) A_ = train_result.metrics A_ = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(__UpperCamelCase ) ) A_ = min(__UpperCamelCase ,len(__UpperCamelCase ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics("train" ,__UpperCamelCase ) trainer.save_metrics("train" ,__UpperCamelCase ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***" ) A_ = trainer.evaluate(eval_dataset=__UpperCamelCase ) A_ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__UpperCamelCase ) A_ = min(__UpperCamelCase ,len(__UpperCamelCase ) ) trainer.log_metrics("eval" ,__UpperCamelCase ) trainer.save_metrics("eval" ,__UpperCamelCase ) # Prediction if training_args.do_predict: logger.info("*** Predict ***" ) A_ , A_ , A_ = trainer.predict(__UpperCamelCase ,metric_key_prefix="predict" ) A_ = ( data_args.max_predict_samples if data_args.max_predict_samples is not None else len(__UpperCamelCase ) ) A_ = min(__UpperCamelCase ,len(__UpperCamelCase ) ) trainer.log_metrics("predict" ,__UpperCamelCase ) trainer.save_metrics("predict" ,__UpperCamelCase ) A_ = np.argmax(__UpperCamelCase ,axis=1 ) A_ = os.path.join(training_args.output_dir ,"predictions.txt" ) if trainer.is_world_process_zero(): with open(__UpperCamelCase ,"w" ) as writer: writer.write("index\tprediction\n" ) for index, item in enumerate(__UpperCamelCase ): A_ = label_list[item] writer.write(f'''{index}\t{item}\n''' ) if __name__ == "__main__": main()
312
from typing import Optional, Union import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models.modeling_utils import ModelMixin class _a ( snake_case_ , snake_case_ ): """simple docstring""" @register_to_config def __init__( self : Dict , UpperCAmelCase : int = 768 , ): super().__init__() A_ = nn.Parameter(torch.zeros(1 , UpperCAmelCase ) ) A_ = nn.Parameter(torch.ones(1 , UpperCAmelCase ) ) def __A ( self : str , UpperCAmelCase : Optional[Union[str, torch.device]] = None , UpperCAmelCase : Optional[torch.dtype] = None , ): A_ = nn.Parameter(self.mean.to(UpperCAmelCase ).to(UpperCAmelCase ) ) A_ = nn.Parameter(self.std.to(UpperCAmelCase ).to(UpperCAmelCase ) ) return self def __A ( self : Dict , UpperCAmelCase : List[Any] ): A_ = (embeds - self.mean) * 1.0 / self.std return embeds def __A ( self : int , UpperCAmelCase : int ): A_ = (embeds * self.std) + self.mean return embeds
312
1
from math import pi, sqrt, tan def __snake_case ( __UpperCamelCase : float ): """simple docstring""" if side_length < 0: raise ValueError("surface_area_cube() only accepts non-negative values" ) return 6 * side_length**2 def __snake_case ( __UpperCamelCase : float ,__UpperCamelCase : float ,__UpperCamelCase : float ): """simple docstring""" if length < 0 or breadth < 0 or height < 0: raise ValueError("surface_area_cuboid() only accepts non-negative values" ) return 2 * ((length * breadth) + (breadth * height) + (length * height)) def __snake_case ( __UpperCamelCase : float ): """simple docstring""" if radius < 0: raise ValueError("surface_area_sphere() only accepts non-negative values" ) return 4 * pi * radius**2 def __snake_case ( __UpperCamelCase : float ): """simple docstring""" if radius < 0: raise ValueError("surface_area_hemisphere() only accepts non-negative values" ) return 3 * pi * radius**2 def __snake_case ( __UpperCamelCase : float ,__UpperCamelCase : float ): """simple docstring""" if radius < 0 or height < 0: raise ValueError("surface_area_cone() only accepts non-negative values" ) return pi * radius * (radius + (height**2 + radius**2) ** 0.5) def __snake_case ( __UpperCamelCase : float ,__UpperCamelCase : float ,__UpperCamelCase : float ): """simple docstring""" if radius_a < 0 or radius_a < 0 or height < 0: raise ValueError( "surface_area_conical_frustum() only accepts non-negative values" ) A_ = (height**2 + (radius_a - radius_a) ** 2) ** 0.5 return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2) def __snake_case ( __UpperCamelCase : float ,__UpperCamelCase : float ): """simple docstring""" if radius < 0 or height < 0: raise ValueError("surface_area_cylinder() only accepts non-negative values" ) return 2 * pi * radius * (height + radius) def __snake_case ( __UpperCamelCase : float ,__UpperCamelCase : float ): """simple docstring""" if torus_radius < 0 or tube_radius < 0: raise ValueError("surface_area_torus() only accepts non-negative values" ) if torus_radius < tube_radius: raise ValueError( "surface_area_torus() does not support spindle or self intersecting tori" ) return 4 * pow(__UpperCamelCase ,2 ) * torus_radius * tube_radius def __snake_case ( __UpperCamelCase : float ,__UpperCamelCase : float ): """simple docstring""" if length < 0 or width < 0: raise ValueError("area_rectangle() only accepts non-negative values" ) return length * width def __snake_case ( __UpperCamelCase : float ): """simple docstring""" if side_length < 0: raise ValueError("area_square() only accepts non-negative values" ) return side_length**2 def __snake_case ( __UpperCamelCase : float ,__UpperCamelCase : float ): """simple docstring""" if base < 0 or height < 0: raise ValueError("area_triangle() only accepts non-negative values" ) return (base * height) / 2 def __snake_case ( __UpperCamelCase : float ,__UpperCamelCase : float ,__UpperCamelCase : float ): """simple docstring""" if sidea < 0 or sidea < 0 or sidea < 0: raise ValueError("area_triangle_three_sides() only accepts non-negative values" ) elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea: raise ValueError("Given three sides do not form a triangle" ) A_ = (sidea + sidea + sidea) / 2 A_ = sqrt( semi_perimeter * (semi_perimeter - sidea) * (semi_perimeter - sidea) * (semi_perimeter - sidea) ) return area def __snake_case ( __UpperCamelCase : float ,__UpperCamelCase : float ): """simple docstring""" if base < 0 or height < 0: raise ValueError("area_parallelogram() only accepts non-negative values" ) return base * height def __snake_case ( __UpperCamelCase : float ,__UpperCamelCase : float ,__UpperCamelCase : float ): """simple docstring""" if basea < 0 or basea < 0 or height < 0: raise ValueError("area_trapezium() only accepts non-negative values" ) return 1 / 2 * (basea + basea) * height def __snake_case ( __UpperCamelCase : float ): """simple docstring""" if radius < 0: raise ValueError("area_circle() only accepts non-negative values" ) return pi * radius**2 def __snake_case ( __UpperCamelCase : float ,__UpperCamelCase : float ): """simple docstring""" if radius_x < 0 or radius_y < 0: raise ValueError("area_ellipse() only accepts non-negative values" ) return pi * radius_x * radius_y def __snake_case ( __UpperCamelCase : float ,__UpperCamelCase : float ): """simple docstring""" if diagonal_a < 0 or diagonal_a < 0: raise ValueError("area_rhombus() only accepts non-negative values" ) return 1 / 2 * diagonal_a * diagonal_a def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : float ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or sides < 3: raise ValueError( "area_reg_polygon() only accepts integers greater than or \ equal to three as number of sides" ) elif length < 0: raise ValueError( "area_reg_polygon() only accepts non-negative values as \ length of a side" ) return (sides * length**2) / (4 * tan(pi / sides )) return (sides * length**2) / (4 * tan(pi / sides )) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) # verbose so we can see methods missing tests print('[DEMO] Areas of various geometric shapes: \n') print(F"Rectangle: {area_rectangle(10, 20) = }") print(F"Square: {area_square(10) = }") print(F"Triangle: {area_triangle(10, 10) = }") print(F"Triangle: {area_triangle_three_sides(5, 12, 13) = }") print(F"Parallelogram: {area_parallelogram(10, 20) = }") print(F"Rhombus: {area_rhombus(10, 20) = }") print(F"Trapezium: {area_trapezium(10, 20, 30) = }") print(F"Circle: {area_circle(20) = }") print(F"Ellipse: {area_ellipse(10, 20) = }") print('\nSurface Areas of various geometric shapes: \n') print(F"Cube: {surface_area_cube(20) = }") print(F"Cuboid: {surface_area_cuboid(10, 20, 30) = }") print(F"Sphere: {surface_area_sphere(20) = }") print(F"Hemisphere: {surface_area_hemisphere(20) = }") print(F"Cone: {surface_area_cone(10, 20) = }") print(F"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }") print(F"Cylinder: {surface_area_cylinder(10, 20) = }") print(F"Torus: {surface_area_torus(20, 10) = }") print(F"Equilateral Triangle: {area_reg_polygon(3, 10) = }") print(F"Square: {area_reg_polygon(4, 10) = }") print(F"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
312
from __future__ import annotations import numpy as np from numpy import floataa from numpy.typing import NDArray def __snake_case ( __UpperCamelCase : NDArray[floataa] ,__UpperCamelCase : NDArray[floataa] ,__UpperCamelCase : list[int] ,__UpperCamelCase : int ,): """simple docstring""" A_ , A_ = coefficient_matrix.shape A_ , A_ = constant_matrix.shape if rowsa != colsa: A_ = f'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}''' raise ValueError(__UpperCamelCase ) if colsa != 1: A_ = f'''Constant matrix must be nx1 but received {rowsa}x{colsa}''' raise ValueError(__UpperCamelCase ) if rowsa != rowsa: A_ = ( "Coefficient and constant matrices dimensions must be nxn and nx1 but " f'''received {rowsa}x{colsa} and {rowsa}x{colsa}''' ) raise ValueError(__UpperCamelCase ) if len(__UpperCamelCase ) != rowsa: A_ = ( "Number of initial values must be equal to number of rows in coefficient " f'''matrix but received {len(__UpperCamelCase )} and {rowsa}''' ) raise ValueError(__UpperCamelCase ) if iterations <= 0: raise ValueError("Iterations must be at least 1" ) A_ = np.concatenate( (coefficient_matrix, constant_matrix) ,axis=1 ) A_ , A_ = table.shape strictly_diagonally_dominant(__UpperCamelCase ) # Iterates the whole matrix for given number of times for _ in range(__UpperCamelCase ): A_ = [] for row in range(__UpperCamelCase ): A_ = 0 for col in range(__UpperCamelCase ): if col == row: A_ = table[row][col] elif col == cols - 1: A_ = table[row][col] else: temp += (-1) * table[row][col] * init_val[col] A_ = (temp + val) / denom new_val.append(__UpperCamelCase ) A_ = new_val return [float(__UpperCamelCase ) for i in new_val] def __snake_case ( __UpperCamelCase : NDArray[floataa] ): """simple docstring""" A_ , A_ = table.shape A_ = True for i in range(0 ,__UpperCamelCase ): A_ = 0 for j in range(0 ,cols - 1 ): if i == j: continue else: total += table[i][j] if table[i][i] <= total: raise ValueError("Coefficient matrix is not strictly diagonally dominant" ) return is_diagonally_dominant # Test Cases if __name__ == "__main__": import doctest doctest.testmod()
312
1
import pytest from datasets.splits import SplitDict, SplitInfo from datasets.utils.py_utils import asdict @pytest.mark.parametrize( "split_dict" ,[ SplitDict(), SplitDict({"train": SplitInfo(name="train" ,num_bytes=1337 ,num_examples=42 ,dataset_name="my_dataset" )} ), SplitDict({"train": SplitInfo(name="train" ,num_bytes=1337 ,num_examples=42 )} ), SplitDict({"train": SplitInfo()} ), ] ,) def __snake_case ( __UpperCamelCase : SplitDict ): """simple docstring""" A_ = split_dict._to_yaml_list() assert len(__UpperCamelCase ) == len(__UpperCamelCase ) A_ = SplitDict._from_yaml_list(__UpperCamelCase ) for split_name, split_info in split_dict.items(): # dataset_name field is deprecated, and is therefore not part of the YAML dump A_ = None # the split name of split_dict takes over the name of the split info object A_ = split_name assert split_dict == reloaded @pytest.mark.parametrize( "split_info" ,[SplitInfo(), SplitInfo(dataset_name=__UpperCamelCase ), SplitInfo(dataset_name="my_dataset" )] ) def __snake_case ( __UpperCamelCase : Optional[int] ): """simple docstring""" A_ = asdict(SplitDict({"train": split_info} ) ) assert "dataset_name" in split_dict_asdict["train"] assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
312
from unittest import TestCase from datasets import Dataset from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters def __snake_case ( ): """simple docstring""" A_ = { "repo_name": ["test_repo1", "test_repo2", "test_repo3"], "path": ["test_1.py", "test_2.py", "unit_test.py"], "content": ["a " * 20, "a " * 30, "b " * 7], } A_ = Dataset.from_dict(__UpperCamelCase ) return dataset class _a ( snake_case_ ): """simple docstring""" def __A ( self : Union[str, Any] ): A_ = get_dataset() A_ = make_duplicate_clusters(UpperCAmelCase , 0.85 ) self.assertEqual(len(duplicate_clusters[0] ) , 2 ) def __A ( self : List[Any] ): A_ = get_dataset() A_ , A_ = deduplicate_dataset(UpperCAmelCase ) self.assertEqual(len(UpperCAmelCase ) , 2 ) print(UpperCAmelCase ) self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 ) self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , UpperCAmelCase )
312
1
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __a :Dict = {'configuration_mmbt': ['MMBTConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :List[Any] = ['MMBTForClassification', 'MMBTModel', 'ModalEmbeddings'] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys __a :Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
312
import os from typing import Dict, List, Tuple, TypeVar, Union __a :Any = TypeVar('T') __a :Union[str, Any] = Union[List[T], Tuple[T, ...]] __a :List[str] = Union[T, List[T], Dict[str, T]] __a :Any = Union[str, bytes, os.PathLike]
312
1
def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : int ): """simple docstring""" while a != 0: A_ , A_ = b % a, a return b def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : int ): """simple docstring""" if gcd(__UpperCamelCase ,__UpperCamelCase ) != 1: A_ = f'''mod inverse of {a!r} and {m!r} does not exist''' raise ValueError(__UpperCamelCase ) A_ , A_ , A_ = 1, 0, a A_ , A_ , A_ = 0, 1, m while va != 0: A_ = ua // va A_ , A_ , A_ , A_ , A_ , A_ = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va return ua % m
312
__a :Dict = '0.18.2' from .configuration_utils import ConfigMixin from .utils import ( OptionalDependencyNotAvailable, is_flax_available, is_inflect_available, is_invisible_watermark_available, is_k_diffusion_available, is_k_diffusion_version, is_librosa_available, is_note_seq_available, is_onnx_available, is_scipy_available, is_torch_available, is_torchsde_available, is_transformers_available, is_transformers_version, is_unidecode_available, logging, ) try: if not is_onnx_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_onnx_objects import * # noqa F403 else: from .pipelines import OnnxRuntimeModel try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_pt_objects import * # noqa F403 else: from .models import ( AutoencoderKL, ControlNetModel, ModelMixin, PriorTransformer, TaFilmDecoder, TransformeraDModel, UNetaDModel, UNetaDConditionModel, UNetaDModel, UNetaDConditionModel, VQModel, ) from .optimization import ( get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, get_scheduler, ) from .pipelines import ( AudioPipelineOutput, ConsistencyModelPipeline, DanceDiffusionPipeline, DDIMPipeline, DDPMPipeline, DiffusionPipeline, DiTPipeline, ImagePipelineOutput, KarrasVePipeline, LDMPipeline, LDMSuperResolutionPipeline, PNDMPipeline, RePaintPipeline, ScoreSdeVePipeline, ) from .schedulers import ( CMStochasticIterativeScheduler, DDIMInverseScheduler, DDIMParallelScheduler, DDIMScheduler, DDPMParallelScheduler, DDPMScheduler, DEISMultistepScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, HeunDiscreteScheduler, IPNDMScheduler, KarrasVeScheduler, KDPMaAncestralDiscreteScheduler, KDPMaDiscreteScheduler, PNDMScheduler, RePaintScheduler, SchedulerMixin, ScoreSdeVeScheduler, UnCLIPScheduler, UniPCMultistepScheduler, VQDiffusionScheduler, ) from .training_utils import EMAModel try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .schedulers import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .schedulers import DPMSolverSDEScheduler try: if not (is_torch_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipelines import ( AltDiffusionImgaImgPipeline, AltDiffusionPipeline, AudioLDMPipeline, CycleDiffusionPipeline, IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ImageTextPipelineOutput, KandinskyImgaImgPipeline, KandinskyInpaintPipeline, KandinskyPipeline, KandinskyPriorPipeline, KandinskyVaaControlnetImgaImgPipeline, KandinskyVaaControlnetPipeline, KandinskyVaaImgaImgPipeline, KandinskyVaaInpaintPipeline, KandinskyVaaPipeline, KandinskyVaaPriorEmbaEmbPipeline, KandinskyVaaPriorPipeline, LDMTextToImagePipeline, PaintByExamplePipeline, SemanticStableDiffusionPipeline, ShapEImgaImgPipeline, ShapEPipeline, StableDiffusionAttendAndExcitePipeline, StableDiffusionControlNetImgaImgPipeline, StableDiffusionControlNetInpaintPipeline, StableDiffusionControlNetPipeline, StableDiffusionDepthaImgPipeline, StableDiffusionDiffEditPipeline, StableDiffusionImageVariationPipeline, StableDiffusionImgaImgPipeline, StableDiffusionInpaintPipeline, StableDiffusionInpaintPipelineLegacy, StableDiffusionInstructPixaPixPipeline, StableDiffusionLatentUpscalePipeline, StableDiffusionLDMaDPipeline, StableDiffusionModelEditingPipeline, StableDiffusionPanoramaPipeline, StableDiffusionParadigmsPipeline, StableDiffusionPipeline, StableDiffusionPipelineSafe, StableDiffusionPixaPixZeroPipeline, StableDiffusionSAGPipeline, StableDiffusionUpscalePipeline, StableUnCLIPImgaImgPipeline, StableUnCLIPPipeline, TextToVideoSDPipeline, TextToVideoZeroPipeline, UnCLIPImageVariationPipeline, UnCLIPPipeline, UniDiffuserModel, UniDiffuserPipeline, UniDiffuserTextDecoder, VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, VideoToVideoSDPipeline, VQDiffusionPipeline, ) try: if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403 else: from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline try: if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403 else: from .pipelines import StableDiffusionKDiffusionPipeline try: if not (is_torch_available() and is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403 else: from .pipelines import ( OnnxStableDiffusionImgaImgPipeline, OnnxStableDiffusionInpaintPipeline, OnnxStableDiffusionInpaintPipelineLegacy, OnnxStableDiffusionPipeline, OnnxStableDiffusionUpscalePipeline, StableDiffusionOnnxPipeline, ) try: if not (is_torch_available() and is_librosa_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_librosa_objects import * # noqa F403 else: from .pipelines import AudioDiffusionPipeline, Mel try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .pipelines import SpectrogramDiffusionPipeline try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_objects import * # noqa F403 else: from .models.controlnet_flax import FlaxControlNetModel from .models.modeling_flax_utils import FlaxModelMixin from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel from .models.vae_flax import FlaxAutoencoderKL from .pipelines import FlaxDiffusionPipeline from .schedulers import ( FlaxDDIMScheduler, FlaxDDPMScheduler, FlaxDPMSolverMultistepScheduler, FlaxKarrasVeScheduler, FlaxLMSDiscreteScheduler, FlaxPNDMScheduler, FlaxSchedulerMixin, FlaxScoreSdeVeScheduler, ) try: if not (is_flax_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_and_transformers_objects import * # noqa F403 else: from .pipelines import ( FlaxStableDiffusionControlNetPipeline, FlaxStableDiffusionImgaImgPipeline, FlaxStableDiffusionInpaintPipeline, FlaxStableDiffusionPipeline, ) try: if not (is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_note_seq_objects import * # noqa F403 else: from .pipelines import MidiProcessor
312
1
from __future__ import annotations from typing import Dict from ...configuration_utils import PretrainedConfig __a :Any = { 'susnato/ernie-m-base_pytorch': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json', 'susnato/ernie-m-large_pytorch': 'https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json', } class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : Dict = 'ernie_m' _lowerCamelCase : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"} def __init__( self : List[str] , UpperCAmelCase : int = 250002 , UpperCAmelCase : int = 768 , UpperCAmelCase : int = 12 , UpperCAmelCase : int = 12 , UpperCAmelCase : int = 3072 , UpperCAmelCase : str = "gelu" , UpperCAmelCase : float = 0.1 , UpperCAmelCase : float = 0.1 , UpperCAmelCase : int = 514 , UpperCAmelCase : float = 0.02 , UpperCAmelCase : int = 1 , UpperCAmelCase : float = 1E-05 , UpperCAmelCase : Dict=None , UpperCAmelCase : Any=False , UpperCAmelCase : Tuple=0.0 , **UpperCAmelCase : str , ): super().__init__(pad_token_id=UpperCAmelCase , **UpperCAmelCase ) A_ = vocab_size A_ = hidden_size A_ = num_hidden_layers A_ = num_attention_heads A_ = intermediate_size A_ = hidden_act A_ = hidden_dropout_prob A_ = attention_probs_dropout_prob A_ = max_position_embeddings A_ = initializer_range A_ = layer_norm_eps A_ = classifier_dropout A_ = is_decoder A_ = act_dropout
312
def __snake_case ( __UpperCamelCase : int = 1000 ): """simple docstring""" return sum(e for e in range(3 ,__UpperCamelCase ) if e % 3 == 0 or e % 5 == 0 ) if __name__ == "__main__": print(F"{solution() = }")
312
1
import unittest import numpy as np from diffusers import OnnxStableDiffusionInpaintPipelineLegacy from diffusers.utils.testing_utils import ( is_onnx_available, load_image, load_numpy, nightly, require_onnxruntime, require_torch_gpu, ) if is_onnx_available(): import onnxruntime as ort @nightly @require_onnxruntime @require_torch_gpu class _a ( unittest.TestCase ): """simple docstring""" @property def __A ( self : Tuple ): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def __A ( self : List[str] ): A_ = ort.SessionOptions() A_ = False return options def __A ( self : Dict ): A_ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo.png" ) A_ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo_mask.png" ) A_ = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy" ) # using the PNDM scheduler by default A_ = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=UpperCAmelCase , feature_extractor=UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=UpperCAmelCase ) A_ = "A red cat sitting on a park bench" A_ = np.random.RandomState(0 ) A_ = pipe( prompt=UpperCAmelCase , image=UpperCAmelCase , mask_image=UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=UpperCAmelCase , output_type="np" , ) A_ = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 1E-2
312
import unittest from typing import Tuple import torch from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device from diffusers.utils.testing_utils import require_torch @require_torch class _a : """simple docstring""" @property def __A ( self : Union[str, Any] ): return self.get_dummy_input() @property def __A ( self : int ): if self.block_type == "down": return (4, 32, 16, 16) elif self.block_type == "mid": return (4, 32, 32, 32) elif self.block_type == "up": return (4, 32, 64, 64) raise ValueError(f'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' ) def __A ( self : Union[str, Any] , UpperCAmelCase : List[Any]=True , UpperCAmelCase : str=False , UpperCAmelCase : Tuple=False , UpperCAmelCase : Optional[Any]=False , ): A_ = 4 A_ = 32 A_ = (32, 32) A_ = torch.manual_seed(0 ) A_ = torch.device(UpperCAmelCase ) A_ = (batch_size, num_channels) + sizes A_ = randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=UpperCAmelCase ) A_ = {"hidden_states": hidden_states} if include_temb: A_ = 128 A_ = randn_tensor((batch_size, temb_channels) , generator=UpperCAmelCase , device=UpperCAmelCase ) if include_res_hidden_states_tuple: A_ = torch.manual_seed(1 ) A_ = (randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=UpperCAmelCase ),) if include_encoder_hidden_states: A_ = floats_tensor((batch_size, 32, 32) ).to(UpperCAmelCase ) if include_skip_sample: A_ = randn_tensor(((batch_size, 3) + sizes) , generator=UpperCAmelCase , device=UpperCAmelCase ) return dummy_input def __A ( self : Optional[int] ): A_ = { "in_channels": 32, "out_channels": 32, "temb_channels": 128, } if self.block_type == "up": A_ = 32 if self.block_type == "mid": init_dict.pop("out_channels" ) A_ = self.dummy_input return init_dict, inputs_dict def __A ( self : List[str] , UpperCAmelCase : Optional[Any] ): A_ , A_ = self.prepare_init_args_and_inputs_for_common() A_ = self.block_class(**UpperCAmelCase ) unet_block.to(UpperCAmelCase ) unet_block.eval() with torch.no_grad(): A_ = unet_block(**UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ): A_ = output[0] self.assertEqual(output.shape , self.output_shape ) A_ = output[0, -1, -3:, -3:] A_ = torch.tensor(UpperCAmelCase ).to(UpperCAmelCase ) assert torch_all_close(output_slice.flatten() , UpperCAmelCase , atol=5E-3 ) @unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" ) def __A ( self : Union[str, Any] ): A_ , A_ = self.prepare_init_args_and_inputs_for_common() A_ = self.block_class(**UpperCAmelCase ) model.to(UpperCAmelCase ) model.train() A_ = model(**UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ): A_ = output[0] A_ = torch.device(UpperCAmelCase ) A_ = randn_tensor(output.shape , device=UpperCAmelCase ) A_ = torch.nn.functional.mse_loss(UpperCAmelCase , UpperCAmelCase ) loss.backward()
312
1
__a :dict[str, float] = { "joule": 1.0, "kilojoule": 1000, "megajoule": 100_0000, "gigajoule": 10_0000_0000, "wattsecond": 1.0, "watthour": 3600, "kilowatthour": 360_0000, "newtonmeter": 1.0, "calorie_nutr": 4186.8, "kilocalorie_nutr": 418_6800.00, "electronvolt": 1.602_176_634e-19, "britishthermalunit_it": 1055.0_5585, "footpound": 1.35_5818, } def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ,__UpperCamelCase : float ): """simple docstring""" if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: A_ = ( f'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n''' f'''Valid values are: {", ".join(__UpperCamelCase )}''' ) raise ValueError(__UpperCamelCase ) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
312
import copy import fnmatch import json import os import pickle as pkl import shutil import sys import tarfile import tempfile from collections import OrderedDict from contextlib import contextmanager from functools import partial from hashlib import shaaaa from io import BytesIO from pathlib import Path from urllib.parse import urlparse from zipfile import ZipFile, is_zipfile import cva import numpy as np import requests import wget from filelock import FileLock from PIL import Image from tqdm.auto import tqdm from yaml import Loader, dump, load try: import torch __a :int = True except ImportError: __a :Optional[Any] = False try: from torch.hub import _get_torch_home __a :Optional[Any] = _get_torch_home() except ImportError: __a :Tuple = os.path.expanduser( os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch')) ) __a :Optional[Any] = os.path.join(torch_cache_home, 'transformers') __a :int = 'https://cdn.huggingface.co' __a :Any = 'https://s3.amazonaws.com/models.huggingface.co/bert' __a :Optional[Any] = '/'.join(str(Path(__file__).resolve()).split('/')[:-1]) __a :str = os.path.join(PATH, 'config.yaml') __a :str = os.path.join(PATH, 'attributes.txt') __a :Optional[Any] = os.path.join(PATH, 'objects.txt') __a :Optional[int] = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path) __a :Dict = os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE) __a :List[Any] = os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE) __a :List[str] = 'pytorch_model.bin' __a :Tuple = 'config.yaml' def __snake_case ( __UpperCamelCase : Optional[Any]=OBJECTS ,__UpperCamelCase : List[str]=ATTRIBUTES ): """simple docstring""" A_ = [] with open(__UpperCamelCase ) as f: for object in f.readlines(): vg_classes.append(object.split("," )[0].lower().strip() ) A_ = [] with open(__UpperCamelCase ) as f: for object in f.readlines(): vg_attrs.append(object.split("," )[0].lower().strip() ) return vg_classes, vg_attrs def __snake_case ( __UpperCamelCase : List[Any] ): """simple docstring""" A_ = OrderedDict() with open(__UpperCamelCase ,"rb" ) as f: A_ = pkl.load(__UpperCamelCase )["model"] for k in copy.deepcopy(list(ckp.keys() ) ): A_ = ckp.pop(__UpperCamelCase ) if isinstance(__UpperCamelCase ,np.ndarray ): A_ = torch.tensor(__UpperCamelCase ) else: assert isinstance(__UpperCamelCase ,torch.tensor ), type(__UpperCamelCase ) A_ = v return r class _a : """simple docstring""" _lowerCamelCase : Union[str, Any] = {} def __init__( self : str , UpperCAmelCase : dict , UpperCAmelCase : str = "root" , UpperCAmelCase : List[str]=0 ): A_ = name A_ = level A_ = {} for k, v in dictionary.items(): if v is None: raise ValueError() A_ = copy.deepcopy(UpperCAmelCase ) A_ = copy.deepcopy(UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ): A_ = Config(UpperCAmelCase , name=UpperCAmelCase , level=level + 1 ) A_ = v setattr(self , UpperCAmelCase , UpperCAmelCase ) A_ = d def __repr__( self : Optional[Any] ): return str(list((self._pointer.keys()) ) ) def __setattr__( self : Any , UpperCAmelCase : Any , UpperCAmelCase : Any ): A_ = val A_ = val A_ = key.split("." ) A_ = len(UpperCAmelCase ) - 1 A_ = self._pointer if len(UpperCAmelCase ) > 1: for i, l in enumerate(UpperCAmelCase ): if hasattr(self , UpperCAmelCase ) and isinstance(getattr(self , UpperCAmelCase ) , UpperCAmelCase ): setattr(getattr(self , UpperCAmelCase ) , ".".join(levels[i:] ) , UpperCAmelCase ) if l == last_level: A_ = val else: A_ = pointer[l] def __A ( self : List[str] ): return self._pointer def __A ( self : int , UpperCAmelCase : Tuple , UpperCAmelCase : int ): with open(f'''{file_name}''' , "w" ) as stream: dump(UpperCAmelCase , UpperCAmelCase ) def __A ( self : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Tuple ): with open(f'''{file_name}''' , "w" ) as stream: json.dump(UpperCAmelCase , UpperCAmelCase ) @staticmethod def __A ( UpperCAmelCase : Optional[int] ): with open(UpperCAmelCase ) as stream: A_ = load(UpperCAmelCase , Loader=UpperCAmelCase ) return data def __str__( self : str ): A_ = " " if self._name != "root": A_ = f'''{t * (self._level-1)}{self._name}:\n''' else: A_ = "" A_ = self._level for i, (k, v) in enumerate(self._pointer.items() ): if isinstance(UpperCAmelCase , UpperCAmelCase ): r += f'''{t * (self._level)}{v}\n''' self._level += 1 else: r += f'''{t * (self._level)}{k}: {v} ({type(UpperCAmelCase ).__name__})\n''' A_ = level return r[:-1] @classmethod def __A ( cls : Optional[Any] , UpperCAmelCase : str , **UpperCAmelCase : str ): A_ , A_ = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase ) return cls(UpperCAmelCase ) @classmethod def __A ( cls : int , UpperCAmelCase : str , **UpperCAmelCase : int ): A_ = kwargs.pop("cache_dir" , UpperCAmelCase ) A_ = kwargs.pop("force_download" , UpperCAmelCase ) A_ = kwargs.pop("resume_download" , UpperCAmelCase ) A_ = kwargs.pop("proxies" , UpperCAmelCase ) A_ = kwargs.pop("local_files_only" , UpperCAmelCase ) if os.path.isdir(UpperCAmelCase ): A_ = os.path.join(UpperCAmelCase , UpperCAmelCase ) elif os.path.isfile(UpperCAmelCase ) or is_remote_url(UpperCAmelCase ): A_ = pretrained_model_name_or_path else: A_ = hf_bucket_url(UpperCAmelCase , filename=UpperCAmelCase , use_cdn=UpperCAmelCase ) try: # Load from URL or cache if already cached A_ = cached_path( UpperCAmelCase , cache_dir=UpperCAmelCase , force_download=UpperCAmelCase , proxies=UpperCAmelCase , resume_download=UpperCAmelCase , local_files_only=UpperCAmelCase , ) # Load config dict if resolved_config_file is None: raise EnvironmentError A_ = Config.load_yaml(UpperCAmelCase ) except EnvironmentError: A_ = "Can't load config for" raise EnvironmentError(UpperCAmelCase ) if resolved_config_file == config_file: print("loading configuration file from path" ) else: print("loading configuration file cache" ) return Config.load_yaml(UpperCAmelCase ), kwargs def __snake_case ( __UpperCamelCase : Union[str, Any] ): """simple docstring""" A_ = torch.load("dump.pt" ,map_location=in_tensor.device ) A_ = in_tensor.numpy() A_ = out_tensor.numpy()[0] print(na.shape ,na[0, 0, :5] ) print(na.shape ,na[0, 0, :5] ) assert np.allclose(__UpperCamelCase ,__UpperCamelCase ,rtol=0.01 ,atol=0.1 ), ( f'''{sum([1 for x in np.isclose(__UpperCamelCase ,__UpperCamelCase ,rtol=0.01 ,atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %''' " element-wise mismatch" ) raise Exception("tensors are all good" ) # Hugging face functions below def __snake_case ( __UpperCamelCase : Optional[int] ): """simple docstring""" A_ = urlparse(__UpperCamelCase ) return parsed.scheme in ("http", "https") def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ,__UpperCamelCase : str=True ): """simple docstring""" A_ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX A_ = "/" not in model_id if legacy_format: return f'''{endpoint}/{model_id}-{filename}''' else: return f'''{endpoint}/{model_id}/{filename}''' def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str]=None ,__UpperCamelCase : int=0 ,__UpperCamelCase : int=None ,): """simple docstring""" A_ = "python/{}".format(sys.version.split()[0] ) if _torch_available: ua += "; torch/{}".format(torch.__version__ ) if isinstance(__UpperCamelCase ,__UpperCamelCase ): ua += "; " + "; ".join("{}/{}".format(__UpperCamelCase ,__UpperCamelCase ) for k, v in user_agent.items() ) elif isinstance(__UpperCamelCase ,__UpperCamelCase ): ua += "; " + user_agent A_ = {"user-agent": ua} if resume_size > 0: A_ = "bytes=%d-" % (resume_size,) A_ = requests.get(__UpperCamelCase ,stream=__UpperCamelCase ,proxies=__UpperCamelCase ,headers=__UpperCamelCase ) if response.status_code == 416: # Range not satisfiable return A_ = response.headers.get("Content-Length" ) A_ = resume_size + int(__UpperCamelCase ) if content_length is not None else None A_ = tqdm( unit="B" ,unit_scale=__UpperCamelCase ,total=__UpperCamelCase ,initial=__UpperCamelCase ,desc="Downloading" ,) for chunk in response.iter_content(chunk_size=1024 ): if chunk: # filter out keep-alive new chunks progress.update(len(__UpperCamelCase ) ) temp_file.write(__UpperCamelCase ) progress.close() def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Any=None ,__UpperCamelCase : Dict=False ,__UpperCamelCase : Union[str, Any]=None ,__UpperCamelCase : Any=10 ,__UpperCamelCase : int=False ,__UpperCamelCase : Optional[Any]=None ,__UpperCamelCase : str=False ,): """simple docstring""" if cache_dir is None: A_ = TRANSFORMERS_CACHE if isinstance(__UpperCamelCase ,__UpperCamelCase ): A_ = str(__UpperCamelCase ) os.makedirs(__UpperCamelCase ,exist_ok=__UpperCamelCase ) A_ = None if not local_files_only: try: A_ = requests.head(__UpperCamelCase ,allow_redirects=__UpperCamelCase ,proxies=__UpperCamelCase ,timeout=__UpperCamelCase ) if response.status_code == 200: A_ = response.headers.get("ETag" ) except (EnvironmentError, requests.exceptions.Timeout): # etag is already None pass A_ = url_to_filename(__UpperCamelCase ,__UpperCamelCase ) # get cache path to put the file A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase ) # etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible. # try to get the last downloaded one if etag is None: if os.path.exists(__UpperCamelCase ): return cache_path else: A_ = [ file for file in fnmatch.filter(os.listdir(__UpperCamelCase ) ,filename + ".*" ) if not file.endswith(".json" ) and not file.endswith(".lock" ) ] if len(__UpperCamelCase ) > 0: return os.path.join(__UpperCamelCase ,matching_files[-1] ) else: # If files cannot be found and local_files_only=True, # the models might've been found if local_files_only=False # Notify the user about that if local_files_only: raise ValueError( "Cannot find the requested files in the cached path and outgoing traffic has been" " disabled. To enable model look-ups and downloads online, set 'local_files_only'" " to False." ) return None # From now on, etag is not None. if os.path.exists(__UpperCamelCase ) and not force_download: return cache_path # Prevent parallel downloads of the same file with a lock. A_ = cache_path + ".lock" with FileLock(__UpperCamelCase ): # If the download just completed while the lock was activated. if os.path.exists(__UpperCamelCase ) and not force_download: # Even if returning early like here, the lock will be released. return cache_path if resume_download: A_ = cache_path + ".incomplete" @contextmanager def _resumable_file_manager(): with open(__UpperCamelCase ,"a+b" ) as f: yield f A_ = _resumable_file_manager if os.path.exists(__UpperCamelCase ): A_ = os.stat(__UpperCamelCase ).st_size else: A_ = 0 else: A_ = partial(tempfile.NamedTemporaryFile ,dir=__UpperCamelCase ,delete=__UpperCamelCase ) A_ = 0 # Download to temporary file, then copy to cache dir once finished. # Otherwise you get corrupt cache entries if the download gets interrupted. with temp_file_manager() as temp_file: print( "%s not found in cache or force_download set to True, downloading to %s" ,__UpperCamelCase ,temp_file.name ,) http_get( __UpperCamelCase ,__UpperCamelCase ,proxies=__UpperCamelCase ,resume_size=__UpperCamelCase ,user_agent=__UpperCamelCase ,) os.replace(temp_file.name ,__UpperCamelCase ) A_ = {"url": url, "etag": etag} A_ = cache_path + ".json" with open(__UpperCamelCase ,"w" ) as meta_file: json.dump(__UpperCamelCase ,__UpperCamelCase ) return cache_path def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : str=None ): """simple docstring""" A_ = url.encode("utf-8" ) A_ = shaaaa(__UpperCamelCase ) A_ = url_hash.hexdigest() if etag: A_ = etag.encode("utf-8" ) A_ = shaaaa(__UpperCamelCase ) filename += "." + etag_hash.hexdigest() if url.endswith(".h5" ): filename += ".h5" return filename def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Union[str, Any]=None ,__UpperCamelCase : List[Any]=False ,__UpperCamelCase : List[str]=None ,__UpperCamelCase : Any=False ,__UpperCamelCase : Optional[int]=None ,__UpperCamelCase : Optional[Any]=False ,__UpperCamelCase : Dict=False ,__UpperCamelCase : Optional[Any]=False ,): """simple docstring""" if cache_dir is None: A_ = TRANSFORMERS_CACHE if isinstance(__UpperCamelCase ,__UpperCamelCase ): A_ = str(__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ): A_ = str(__UpperCamelCase ) if is_remote_url(__UpperCamelCase ): # URL, so get it from the cache (downloading if necessary) A_ = get_from_cache( __UpperCamelCase ,cache_dir=__UpperCamelCase ,force_download=__UpperCamelCase ,proxies=__UpperCamelCase ,resume_download=__UpperCamelCase ,user_agent=__UpperCamelCase ,local_files_only=__UpperCamelCase ,) elif os.path.exists(__UpperCamelCase ): # File, and it exists. A_ = url_or_filename elif urlparse(__UpperCamelCase ).scheme == "": # File, but it doesn't exist. raise EnvironmentError("file {} not found".format(__UpperCamelCase ) ) else: # Something unknown raise ValueError("unable to parse {} as a URL or as a local path".format(__UpperCamelCase ) ) if extract_compressed_file: if not is_zipfile(__UpperCamelCase ) and not tarfile.is_tarfile(__UpperCamelCase ): return output_path # Path where we extract compressed archives # We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/" A_ , A_ = os.path.split(__UpperCamelCase ) A_ = output_file.replace("." ,"-" ) + "-extracted" A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase ) if os.path.isdir(__UpperCamelCase ) and os.listdir(__UpperCamelCase ) and not force_extract: return output_path_extracted # Prevent parallel extractions A_ = output_path + ".lock" with FileLock(__UpperCamelCase ): shutil.rmtree(__UpperCamelCase ,ignore_errors=__UpperCamelCase ) os.makedirs(__UpperCamelCase ) if is_zipfile(__UpperCamelCase ): with ZipFile(__UpperCamelCase ,"r" ) as zip_file: zip_file.extractall(__UpperCamelCase ) zip_file.close() elif tarfile.is_tarfile(__UpperCamelCase ): A_ = tarfile.open(__UpperCamelCase ) tar_file.extractall(__UpperCamelCase ) tar_file.close() else: raise EnvironmentError("Archive format of {} could not be identified".format(__UpperCamelCase ) ) return output_path_extracted return output_path def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Any="," ): """simple docstring""" assert isinstance(__UpperCamelCase ,__UpperCamelCase ) if os.path.isfile(__UpperCamelCase ): with open(__UpperCamelCase ) as f: A_ = eval(f.read() ) else: A_ = requests.get(__UpperCamelCase ) try: A_ = requests.json() except Exception: A_ = req.content.decode() assert data is not None, "could not connect" try: A_ = eval(__UpperCamelCase ) except Exception: A_ = data.split("\n" ) req.close() return data def __snake_case ( __UpperCamelCase : int ): """simple docstring""" A_ = requests.get(__UpperCamelCase ) A_ = np.array(Image.open(BytesIO(response.content ) ) ) return img def __snake_case ( __UpperCamelCase : Tuple ): """simple docstring""" A_ = url.split("/" )[-1] if fn not in os.listdir(os.getcwd() ): wget.download(__UpperCamelCase ) with open(__UpperCamelCase ,"rb" ) as stream: A_ = pkl.load(__UpperCamelCase ) A_ = weights.pop("model" ) A_ = {} for k, v in model.items(): A_ = torch.from_numpy(__UpperCamelCase ) if "running_var" in k: A_ = torch.tensor([0] ) A_ = k.replace("running_var" ,"num_batches_tracked" ) A_ = zero return new def __snake_case ( ): """simple docstring""" print(f'''{os.path.abspath(os.path.join(__UpperCamelCase ,os.pardir ) )}/demo.ipynb''' ) def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Optional[int]="RGB" ): """simple docstring""" assert isinstance(__UpperCamelCase ,__UpperCamelCase ) if os.path.isfile(__UpperCamelCase ): A_ = cva.imread(__UpperCamelCase ) else: A_ = get_image_from_url(__UpperCamelCase ) assert img is not None, f'''could not connect to: {im}''' A_ = cva.cvtColor(__UpperCamelCase ,cva.COLOR_BGR2RGB ) if input_format == "RGB": A_ = img[:, :, ::-1] return img def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : List[str]=1 ): """simple docstring""" return (images[i : i + batch] for i in range(0 ,len(__UpperCamelCase ) ,__UpperCamelCase ))
312
1
from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging __a :Any = logging.get_logger(__name__) class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : int = ['pixel_values'] def __init__( self : Any , UpperCAmelCase : bool = True , UpperCAmelCase : Union[int, float] = 1 / 255 , UpperCAmelCase : bool = True , UpperCAmelCase : int = 8 , **UpperCAmelCase : List[str] , ): super().__init__(**UpperCAmelCase ) A_ = do_rescale A_ = rescale_factor A_ = do_pad A_ = pad_size def __A ( self : Optional[int] , UpperCAmelCase : np.ndarray , UpperCAmelCase : float , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[int] ): return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase ) def __A ( self : List[str] , UpperCAmelCase : np.ndarray , UpperCAmelCase : int , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None ): A_ , A_ = get_image_size(UpperCAmelCase ) A_ = (old_height // size + 1) * size - old_height A_ = (old_width // size + 1) * size - old_width return pad(UpperCAmelCase , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=UpperCAmelCase ) def __A ( self : Tuple , UpperCAmelCase : ImageInput , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[float] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCAmelCase : Optional[Any] , ): A_ = do_rescale if do_rescale is not None else self.do_rescale A_ = rescale_factor if rescale_factor is not None else self.rescale_factor A_ = do_pad if do_pad is not None else self.do_pad A_ = pad_size if pad_size is not None else self.pad_size A_ = make_list_of_images(UpperCAmelCase ) if not valid_images(UpperCAmelCase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) # All transformations expect numpy arrays. A_ = [to_numpy_array(UpperCAmelCase ) for image in images] if do_rescale: A_ = [self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase ) for image in images] if do_pad: A_ = [self.pad(UpperCAmelCase , size=UpperCAmelCase ) for image in images] A_ = [to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images] A_ = {"pixel_values": images} return BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase )
312
from __future__ import annotations def __snake_case ( __UpperCamelCase : list[list[int]] ): """simple docstring""" for i in range(1 ,len(matrix[0] ) ): matrix[0][i] += matrix[0][i - 1] # preprocessing the first column for i in range(1 ,len(__UpperCamelCase ) ): matrix[i][0] += matrix[i - 1][0] # updating the path cost for current position for i in range(1 ,len(__UpperCamelCase ) ): for j in range(1 ,len(matrix[0] ) ): matrix[i][j] += min(matrix[i - 1][j] ,matrix[i][j - 1] ) return matrix[-1][-1] if __name__ == "__main__": import doctest doctest.testmod()
312
1
import argparse import shutil import time from json import JSONDecodeError from logging import getLogger from pathlib import Path from typing import Dict, List import torch from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import ( SeqaSeqDataset, calculate_bleu, calculate_rouge, chunks, lmap, load_json, parse_numeric_n_bool_cl_kwargs, save_json, use_task_specific_params, write_txt_file, ) __a :int = getLogger(__name__) def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : str ,__UpperCamelCase : str ,__UpperCamelCase : int = 8 ,__UpperCamelCase : int = 1024 ,__UpperCamelCase : str="val" ,__UpperCamelCase : Optional[int]=None ,__UpperCamelCase : Any=False ,__UpperCamelCase : List[str]="summarization" ,__UpperCamelCase : Tuple=None ,__UpperCamelCase : Union[str, Any]=1 ,__UpperCamelCase : Dict = None ,__UpperCamelCase : List[str]="" ,**__UpperCamelCase : Union[str, Any] ,): """simple docstring""" A_ = str(__UpperCamelCase ) assert local_rank is not None torch.distributed.init_process_group(backend="nccl" ,rank=__UpperCamelCase ) A_ = Path(__UpperCamelCase ) A_ = save_dir.joinpath(f'''rank_{local_rank}_output.json''' ) torch.cuda.set_device(__UpperCamelCase ) A_ = AutoModelForSeqaSeqLM.from_pretrained(__UpperCamelCase ).cuda() if fpaa: A_ = model.half() # determine if we need to increase num_beams use_task_specific_params(__UpperCamelCase ,__UpperCamelCase ) # update config with task specific params A_ = generate_kwargs.pop("num_beams" ,model.config.num_beams ) # AttributeError risk? if num_return_sequences > num_beams: A_ = num_return_sequences A_ = AutoTokenizer.from_pretrained(__UpperCamelCase ) logger.info(f'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type. if max_source_length is None: A_ = tokenizer.model_max_length if prefix is None: A_ = prefix or getattr(model.config ,"prefix" ,"" ) or "" A_ = SeqaSeqDataset( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,max_target_length=1024 ,type_path=__UpperCamelCase ,n_obs=__UpperCamelCase ,prefix=__UpperCamelCase ,**__UpperCamelCase ,) # I set shuffle=True for a more accurate progress bar. # If all the longest samples are first, the prog bar estimate is too high at the beginning. A_ = ds.make_sortish_sampler(__UpperCamelCase ,distributed=__UpperCamelCase ,add_extra_examples=__UpperCamelCase ,shuffle=__UpperCamelCase ) A_ = DataLoader(__UpperCamelCase ,sampler=__UpperCamelCase ,batch_size=__UpperCamelCase ,collate_fn=ds.collate_fn ) A_ = [] for batch in tqdm(__UpperCamelCase ): A_ = model.generate( input_ids=batch["input_ids"].to(model.device ) ,attention_mask=batch["attention_mask"].to(model.device ) ,num_return_sequences=__UpperCamelCase ,num_beams=__UpperCamelCase ,**__UpperCamelCase ,) A_ = tokenizer.batch_decode(__UpperCamelCase ,skip_special_tokens=__UpperCamelCase ,clean_up_tokenization_spaces=__UpperCamelCase ) A_ = batch["ids"] if num_return_sequences > 1: A_ = chunks(__UpperCamelCase ,__UpperCamelCase ) # batch size chunks, each of size num_return_seq for i, pred in enumerate(__UpperCamelCase ): results.append({"pred": pred, "id": ids[i].item()} ) save_json(__UpperCamelCase ,__UpperCamelCase ) return results, sampler.num_replicas def __snake_case ( ): """simple docstring""" A_ = argparse.ArgumentParser( epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" ) parser.add_argument("--data_dir" ,type=__UpperCamelCase ,help="like cnn_dm/test.source" ) parser.add_argument( "--model_name" ,type=__UpperCamelCase ,help="like facebook/bart-large-cnn,t5-base, etc." ,default="sshleifer/distilbart-xsum-12-3" ,) parser.add_argument("--save_dir" ,type=__UpperCamelCase ,help="where to save" ,default="tmp_gen" ) parser.add_argument("--max_source_length" ,type=__UpperCamelCase ,default=__UpperCamelCase ) parser.add_argument( "--type_path" ,type=__UpperCamelCase ,default="test" ,help="which subset to evaluate typically train/val/test" ) parser.add_argument("--task" ,type=__UpperCamelCase ,default="summarization" ,help="used for task_specific_params + metrics" ) parser.add_argument("--bs" ,type=__UpperCamelCase ,default=8 ,required=__UpperCamelCase ,help="batch size" ) parser.add_argument( "--local_rank" ,type=__UpperCamelCase ,default=-1 ,required=__UpperCamelCase ,help="should be passed by distributed.launch" ) parser.add_argument( "--n_obs" ,type=__UpperCamelCase ,default=__UpperCamelCase ,required=__UpperCamelCase ,help="How many observations. Defaults to all." ) parser.add_argument( "--num_return_sequences" ,type=__UpperCamelCase ,default=1 ,required=__UpperCamelCase ,help="How many sequences to return" ) parser.add_argument( "--sync_timeout" ,type=__UpperCamelCase ,default=600 ,required=__UpperCamelCase ,help="How long should master process wait for other processes to finish." ,) parser.add_argument("--src_lang" ,type=__UpperCamelCase ,default=__UpperCamelCase ,required=__UpperCamelCase ) parser.add_argument("--tgt_lang" ,type=__UpperCamelCase ,default=__UpperCamelCase ,required=__UpperCamelCase ) parser.add_argument( "--prefix" ,type=__UpperCamelCase ,required=__UpperCamelCase ,default=__UpperCamelCase ,help="will be added to the begininng of src examples" ) parser.add_argument("--fp16" ,action="store_true" ) parser.add_argument("--debug" ,action="store_true" ) A_ = time.time() A_ , A_ = parser.parse_known_args() A_ = parse_numeric_n_bool_cl_kwargs(__UpperCamelCase ) if generate_kwargs and args.local_rank <= 0: print(f'''parsed the following generate kwargs: {generate_kwargs}''' ) A_ = Path(args.save_dir + "_tmp" ) Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase ) # this handles locking. A_ = list(json_save_dir.glob("rank_*.json" ) ) if intermediate_files: raise ValueError(f'''Found files at {json_save_dir} please move or remove them.''' ) # In theory, a node could finish and save before another node hits this. If this happens, we can address later. A_ = {} if args.src_lang is not None: A_ = args.src_lang if args.tgt_lang is not None: A_ = args.tgt_lang Path(args.save_dir ).mkdir(exist_ok=__UpperCamelCase ) A_ , A_ = eval_data_dir( args.data_dir ,__UpperCamelCase ,args.model_name ,type_path=args.type_path ,bs=args.bs ,fpaa=args.fpaa ,task=args.task ,local_rank=args.local_rank ,n_obs=args.n_obs ,max_source_length=args.max_source_length ,num_return_sequences=args.num_return_sequences ,prefix=args.prefix ,dataset_kwargs=__UpperCamelCase ,**__UpperCamelCase ,) if args.local_rank <= 0: A_ = Path(args.save_dir ) save_dir.mkdir(exist_ok=__UpperCamelCase ) A_ = gather_results_from_each_node(__UpperCamelCase ,__UpperCamelCase ,args.sync_timeout ) A_ = combine_partial_results(__UpperCamelCase ) if args.num_return_sequences > 1: A_ = save_dir.joinpath("pseudolabel_results.json" ) print(f'''Saving aggregated results at {save_path}, intermediate in {json_save_dir}/''' ) save_json(__UpperCamelCase ,__UpperCamelCase ) return A_ = Path(args.data_dir ).joinpath(args.type_path + ".target" ) with open(__UpperCamelCase ) as f: A_ = [x.rstrip() for x in f.readlines()][: len(__UpperCamelCase )] # Calculate metrics, save metrics, and save _generations.txt A_ = "translation" in args.task A_ = calculate_bleu if calc_bleu else calculate_rouge A_ = "bleu" if calc_bleu else "rouge" A_ = score_fn(__UpperCamelCase ,__UpperCamelCase ) A_ = len(__UpperCamelCase ) A_ = time.time() - start_time A_ = round(runtime / metrics["n_obs"] ,4 ) A_ = num_replicas # TODO(@stas00): add whatever metadata to metrics A_ = save_dir.joinpath(f'''{args.type_path}_{metric_name}.json''' ) save_json(__UpperCamelCase ,__UpperCamelCase ,indent=__UpperCamelCase ) print(__UpperCamelCase ) write_txt_file(__UpperCamelCase ,save_dir.joinpath(f'''{args.type_path}_generations.txt''' ) ) if args.debug: write_txt_file(__UpperCamelCase ,save_dir.joinpath(f'''{args.type_path}.target''' ) ) else: shutil.rmtree(__UpperCamelCase ) def __snake_case ( __UpperCamelCase : List[Any] ): """simple docstring""" A_ = [] for partial_result in partial_results: records.extend(__UpperCamelCase ) A_ = sorted(__UpperCamelCase ,key=lambda __UpperCamelCase : x["id"] ) A_ = [x["pred"] for x in records] return preds def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Dict ,__UpperCamelCase : int ): """simple docstring""" A_ = time.time() logger.info("waiting for all nodes to finish" ) A_ = None while (time.time() - start_wait) < timeout: A_ = list(save_dir.glob("rank_*.json" ) ) if len(__UpperCamelCase ) < num_replicas: continue try: # make sure all json files are fully saved A_ = lmap(__UpperCamelCase ,__UpperCamelCase ) return json_data except JSONDecodeError: continue else: raise TimeoutError("Rank 0 gave up on waiting for other processes" ) # Unreachable if __name__ == "__main__": # Usage for MT: run_generate()
312
from typing import Dict from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, get_torch_dist_unique_port, require_torch_multi_gpu, require_torch_neuroncore, ) from transformers.training_args import ParallelMode from transformers.utils import logging __a :int = logging.get_logger(__name__) if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset from transformers import Trainer class _a ( snake_case_ ): """simple docstring""" def __init__( self : Tuple , UpperCAmelCase : int = 101 ): A_ = length def __len__( self : int ): return self.length def __getitem__( self : Optional[int] , UpperCAmelCase : Optional[int] ): return i class _a : """simple docstring""" def __call__( self : Any , UpperCAmelCase : Optional[Any] ): return {"input_ids": torch.tensor(UpperCAmelCase ), "labels": torch.tensor(UpperCAmelCase )} class _a ( nn.Module ): """simple docstring""" def __init__( self : int ): super().__init__() # Add some (unused) params otherwise DDP will complain. A_ = nn.Linear(120 , 80 ) def __A ( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : Tuple=None ): if labels is not None: return torch.tensor(0.0 , device=input_ids.device ), input_ids else: return input_ids class _a ( snake_case_ ): """simple docstring""" @require_torch_neuroncore def __A ( self : List[str] ): A_ = f'''--nproc_per_node=2 --master_port={get_torch_dist_unique_port()} {self.test_file_dir}/test_trainer_distributed.py '''.split() A_ = self.get_auto_remove_tmp_dir() A_ = f'''--output_dir {output_dir}'''.split() A_ = ["torchrun"] + distributed_args + args execute_subprocess_async(UpperCAmelCase , env=self.get_env() ) # successful return here == success - any errors would have caused an error in the sub-call class _a ( snake_case_ ): """simple docstring""" @require_torch_multi_gpu def __A ( self : List[str] ): A_ = f'''--nproc_per_node={torch.cuda.device_count()} --master_port={get_torch_dist_unique_port()} {self.test_file_dir}/test_trainer_distributed.py '''.split() A_ = self.get_auto_remove_tmp_dir() A_ = f'''--output_dir {output_dir}'''.split() A_ = ["torchrun"] + distributed_args + args execute_subprocess_async(UpperCAmelCase , env=self.get_env() ) # successful return here == success - any errors would have caused an error in the sub-call if __name__ == "__main__": # The script below is meant to be run under torch.distributed, on a machine with multiple GPUs: # # PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py __a :Union[str, Any] = HfArgumentParser((TrainingArguments,)) __a :Tuple = parser.parse_args_into_dataclasses()[0] logger.warning( F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " F"distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}" ) # Essentially, what we want to verify in the distributed case is that we get all samples back, # in the right order. (this is crucial for prediction for instance) for dataset_length in [101, 40, 7]: __a :int = DummyDataset(dataset_length) def __snake_case ( __UpperCamelCase : EvalPrediction ): """simple docstring""" A_ = list(range(len(__UpperCamelCase ) ) ) A_ = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential if not success and training_args.local_rank == 0: logger.warning( "Predictions and/or labels do not match expected results:\n - predictions: " f'''{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}''' ) return {"success": success} __a :str = Trainer( model=DummyModel(), args=training_args, data_collator=DummyDataCollator(), eval_dataset=dataset, compute_metrics=compute_metrics, ) __a :str = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) __a :str = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) __a :Optional[int] = 2 __a :List[Any] = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) __a :str = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) __a :Union[str, Any] = None
312
1
def __snake_case ( __UpperCamelCase : dict ): """simple docstring""" A_ = set() # edges = list of graph's edges A_ = get_edges(__UpperCamelCase ) # While there are still elements in edges list, take an arbitrary edge # (from_node, to_node) and add his extremity to chosen_vertices and then # remove all arcs adjacent to the from_node and to_node while edges: A_ , A_ = edges.pop() chosen_vertices.add(__UpperCamelCase ) chosen_vertices.add(__UpperCamelCase ) for edge in edges.copy(): if from_node in edge or to_node in edge: edges.discard(__UpperCamelCase ) return chosen_vertices def __snake_case ( __UpperCamelCase : dict ): """simple docstring""" A_ = set() for from_node, to_nodes in graph.items(): for to_node in to_nodes: edges.add((from_node, to_node) ) return edges if __name__ == "__main__": import doctest doctest.testmod() # graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} # print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
312
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from timm import create_model from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import BitConfig, BitForImageClassification, BitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() __a :Any = logging.get_logger(__name__) def __snake_case ( __UpperCamelCase : Optional[int] ): """simple docstring""" A_ = "huggingface/label-files" A_ = "imagenet-1k-id2label.json" A_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type="dataset" ) ,"r" ) ) A_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()} A_ = {v: k for k, v in idalabel.items()} A_ = "std_conv" if "bit" in model_name else False # note that when using BiT as backbone for ViT-hybrid checkpoints, # one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same", # config.conv_layer = "std_conv_same" A_ = BitConfig( conv_layer=__UpperCamelCase ,num_labels=1000 ,idalabel=__UpperCamelCase ,labelaid=__UpperCamelCase ,) return config def __snake_case ( __UpperCamelCase : Union[str, Any] ): """simple docstring""" if "stem.conv" in name: A_ = name.replace("stem.conv" ,"bit.embedder.convolution" ) if "blocks" in name: A_ = name.replace("blocks" ,"layers" ) if "head.fc" in name: A_ = name.replace("head.fc" ,"classifier.1" ) if name.startswith("norm" ): A_ = "bit." + name if "bit" not in name and "classifier" not in name: A_ = "bit.encoder." + name return name def __snake_case ( ): """simple docstring""" A_ = "http://images.cocodataset.org/val2017/000000039769.jpg" A_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw ) return im @torch.no_grad() def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Tuple=False ): """simple docstring""" A_ = get_config(__UpperCamelCase ) # load original model from timm A_ = create_model(__UpperCamelCase ,pretrained=__UpperCamelCase ) timm_model.eval() # load state_dict of original model A_ = timm_model.state_dict() for key in state_dict.copy().keys(): A_ = state_dict.pop(__UpperCamelCase ) A_ = val.squeeze() if "head" in key else val # load HuggingFace model A_ = BitForImageClassification(__UpperCamelCase ) model.eval() model.load_state_dict(__UpperCamelCase ) # create image processor A_ = create_transform(**resolve_data_config({} ,model=__UpperCamelCase ) ) A_ = transform.transforms A_ = { "bilinear": PILImageResampling.BILINEAR, "bicubic": PILImageResampling.BICUBIC, "nearest": PILImageResampling.NEAREST, } A_ = BitImageProcessor( do_resize=__UpperCamelCase ,size={"shortest_edge": timm_transforms[0].size} ,resample=pillow_resamplings[timm_transforms[0].interpolation.value] ,do_center_crop=__UpperCamelCase ,crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} ,do_normalize=__UpperCamelCase ,image_mean=timm_transforms[-1].mean.tolist() ,image_std=timm_transforms[-1].std.tolist() ,) A_ = prepare_img() A_ = transform(__UpperCamelCase ).unsqueeze(0 ) A_ = processor(__UpperCamelCase ,return_tensors="pt" ).pixel_values # verify pixel values assert torch.allclose(__UpperCamelCase ,__UpperCamelCase ) # verify logits with torch.no_grad(): A_ = model(__UpperCamelCase ) A_ = outputs.logits print("Logits:" ,logits[0, :3] ) print("Predicted class:" ,model.config.idalabel[logits.argmax(-1 ).item()] ) A_ = timm_model(__UpperCamelCase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(__UpperCamelCase ,outputs.logits ,atol=1E-3 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase ) print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' ) model.save_pretrained(__UpperCamelCase ) processor.save_pretrained(__UpperCamelCase ) if push_to_hub: print(f'''Pushing model {model_name} and processor to the hub''' ) model.push_to_hub(f'''ybelkada/{model_name}''' ) processor.push_to_hub(f'''ybelkada/{model_name}''' ) if __name__ == "__main__": __a :List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='resnetv2_50x1_bitm', type=str, help='Name of the BiT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether to push the model to the hub.', ) __a :str = parser.parse_args() convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
312
1
import unittest import numpy as np import torch from diffusers import VersatileDiffusionImageVariationPipeline from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device __a :Tuple = False class _a ( unittest.TestCase ): """simple docstring""" pass @slow @require_torch_gpu class _a ( unittest.TestCase ): """simple docstring""" def __A ( self : str ): A_ = VersatileDiffusionImageVariationPipeline.from_pretrained("shi-labs/versatile-diffusion" ) pipe.to(UpperCAmelCase ) pipe.set_progress_bar_config(disable=UpperCAmelCase ) A_ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" ) A_ = torch.manual_seed(0 ) A_ = pipe( image=UpperCAmelCase , generator=UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images A_ = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) A_ = np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
312
import os import re import sys import traceback import warnings from pathlib import Path from typing import Dict, Optional, Union from uuid import uuida from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami from huggingface_hub.file_download import REGEX_COMMIT_HASH from huggingface_hub.utils import ( EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, is_jinja_available, ) from packaging import version from requests import HTTPError from .. import __version__ from .constants import ( DEPRECATED_REVISION_ARGS, DIFFUSERS_CACHE, HUGGINGFACE_CO_RESOLVE_ENDPOINT, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, ) from .import_utils import ( ENV_VARS_TRUE_VALUES, _flax_version, _jax_version, _onnxruntime_version, _torch_version, is_flax_available, is_onnx_available, is_torch_available, ) from .logging import get_logger __a :Dict = get_logger(__name__) __a :Union[str, Any] = Path(__file__).parent / 'model_card_template.md' __a :Tuple = uuida().hex __a :List[Any] = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES __a :Union[str, Any] = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES __a :Tuple = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/' def __snake_case ( __UpperCamelCase : Union[Dict, str, None] = None ): """simple docstring""" A_ = f'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}''' if DISABLE_TELEMETRY or HF_HUB_OFFLINE: return ua + "; telemetry/off" if is_torch_available(): ua += f'''; torch/{_torch_version}''' if is_flax_available(): ua += f'''; jax/{_jax_version}''' ua += f'''; flax/{_flax_version}''' if is_onnx_available(): ua += f'''; onnxruntime/{_onnxruntime_version}''' # CI will set this value to True if os.environ.get("DIFFUSERS_IS_CI" ,"" ).upper() in ENV_VARS_TRUE_VALUES: ua += "; is_ci/true" if isinstance(__UpperCamelCase ,__UpperCamelCase ): ua += "; " + "; ".join(f'''{k}/{v}''' for k, v in user_agent.items() ) elif isinstance(__UpperCamelCase ,__UpperCamelCase ): ua += "; " + user_agent return ua def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Optional[str] = None ,__UpperCamelCase : Optional[str] = None ): """simple docstring""" if token is None: A_ = HfFolder.get_token() if organization is None: A_ = whoami(__UpperCamelCase )["name"] return f'''{username}/{model_id}''' else: return f'''{organization}/{model_id}''' def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Union[str, Any] ): """simple docstring""" if not is_jinja_available(): raise ValueError( "Modelcard rendering is based on Jinja templates." " Please make sure to have `jinja` installed before using `create_model_card`." " To install it, please run `pip install Jinja2`." ) if hasattr(__UpperCamelCase ,"local_rank" ) and args.local_rank not in [-1, 0]: return A_ = args.hub_token if hasattr(__UpperCamelCase ,"hub_token" ) else None A_ = get_full_repo_name(__UpperCamelCase ,token=__UpperCamelCase ) A_ = ModelCard.from_template( card_data=ModelCardData( # Card metadata object that will be converted to YAML block language="en" ,license="apache-2.0" ,library_name="diffusers" ,tags=[] ,datasets=args.dataset_name ,metrics=[] ,) ,template_path=__UpperCamelCase ,model_name=__UpperCamelCase ,repo_name=__UpperCamelCase ,dataset_name=args.dataset_name if hasattr(__UpperCamelCase ,"dataset_name" ) else None ,learning_rate=args.learning_rate ,train_batch_size=args.train_batch_size ,eval_batch_size=args.eval_batch_size ,gradient_accumulation_steps=( args.gradient_accumulation_steps if hasattr(__UpperCamelCase ,"gradient_accumulation_steps" ) else None ) ,adam_betaa=args.adam_betaa if hasattr(__UpperCamelCase ,"adam_beta1" ) else None ,adam_betaa=args.adam_betaa if hasattr(__UpperCamelCase ,"adam_beta2" ) else None ,adam_weight_decay=args.adam_weight_decay if hasattr(__UpperCamelCase ,"adam_weight_decay" ) else None ,adam_epsilon=args.adam_epsilon if hasattr(__UpperCamelCase ,"adam_epsilon" ) else None ,lr_scheduler=args.lr_scheduler if hasattr(__UpperCamelCase ,"lr_scheduler" ) else None ,lr_warmup_steps=args.lr_warmup_steps if hasattr(__UpperCamelCase ,"lr_warmup_steps" ) else None ,ema_inv_gamma=args.ema_inv_gamma if hasattr(__UpperCamelCase ,"ema_inv_gamma" ) else None ,ema_power=args.ema_power if hasattr(__UpperCamelCase ,"ema_power" ) else None ,ema_max_decay=args.ema_max_decay if hasattr(__UpperCamelCase ,"ema_max_decay" ) else None ,mixed_precision=args.mixed_precision ,) A_ = os.path.join(args.output_dir ,"README.md" ) model_card.save(__UpperCamelCase ) def __snake_case ( __UpperCamelCase : Optional[str] ,__UpperCamelCase : Optional[str] = None ): """simple docstring""" if resolved_file is None or commit_hash is not None: return commit_hash A_ = str(Path(__UpperCamelCase ).as_posix() ) A_ = re.search(R"snapshots/([^/]+)/" ,__UpperCamelCase ) if search is None: return None A_ = search.groups()[0] return commit_hash if REGEX_COMMIT_HASH.match(__UpperCamelCase ) else None # Old default cache path, potentially to be migrated. # This logic was more or less taken from `transformers`, with the following differences: # - Diffusers doesn't use custom environment variables to specify the cache path. # - There is no need to migrate the cache format, just move the files to the new location. __a :str = os.path.expanduser( os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface')) ) __a :List[Any] = os.path.join(hf_cache_home, 'diffusers') def __snake_case ( __UpperCamelCase : Optional[str] = None ,__UpperCamelCase : Optional[str] = None ): """simple docstring""" if new_cache_dir is None: A_ = DIFFUSERS_CACHE if old_cache_dir is None: A_ = old_diffusers_cache A_ = Path(__UpperCamelCase ).expanduser() A_ = Path(__UpperCamelCase ).expanduser() for old_blob_path in old_cache_dir.glob("**/blobs/*" ): if old_blob_path.is_file() and not old_blob_path.is_symlink(): A_ = new_cache_dir / old_blob_path.relative_to(__UpperCamelCase ) new_blob_path.parent.mkdir(parents=__UpperCamelCase ,exist_ok=__UpperCamelCase ) os.replace(__UpperCamelCase ,__UpperCamelCase ) try: os.symlink(__UpperCamelCase ,__UpperCamelCase ) except OSError: logger.warning( "Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." ) # At this point, old_cache_dir contains symlinks to the new cache (it can still be used). __a :Dict = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt') if not os.path.isfile(cache_version_file): __a :Optional[int] = 0 else: with open(cache_version_file) as f: try: __a :Dict = int(f.read()) except ValueError: __a :str = 0 if cache_version < 1: __a :Optional[Any] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0 if old_cache_is_not_empty: logger.warning( 'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your ' 'existing cached models. This is a one-time operation, you can interrupt it or run it ' 'later by calling `diffusers.utils.hub_utils.move_cache()`.' ) try: move_cache() except Exception as e: __a :Optional[Any] = '\n'.join(traceback.format_tb(e.__traceback__)) logger.error( F"There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease " 'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole ' 'message and we will do our best to help.' ) if cache_version < 1: try: os.makedirs(DIFFUSERS_CACHE, exist_ok=True) with open(cache_version_file, 'w') as f: f.write('1') except Exception: logger.warning( F"There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure " 'the directory exists and can be written to.' ) def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Optional[str] = None ): """simple docstring""" if variant is not None: A_ = weights_name.split("." ) A_ = splits[:-1] + [variant] + splits[-1:] A_ = ".".join(__UpperCamelCase ) return weights_name def __snake_case ( __UpperCamelCase : Optional[Any] ,*, __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Any ,__UpperCamelCase : Tuple ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : str ,__UpperCamelCase : int ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : int ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : Tuple ,__UpperCamelCase : Optional[int]=None ,): """simple docstring""" A_ = str(__UpperCamelCase ) if os.path.isfile(__UpperCamelCase ): return pretrained_model_name_or_path elif os.path.isdir(__UpperCamelCase ): if os.path.isfile(os.path.join(__UpperCamelCase ,__UpperCamelCase ) ): # Load from a PyTorch checkpoint A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase ) return model_file elif subfolder is not None and os.path.isfile( os.path.join(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) ): A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) return model_file else: raise EnvironmentError( f'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' ) else: # 1. First check if deprecated way of loading from branches is used if ( revision in DEPRECATED_REVISION_ARGS and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME) and version.parse(version.parse(__UpperCamelCase ).base_version ) >= version.parse("0.20.0" ) ): try: A_ = hf_hub_download( __UpperCamelCase ,filename=_add_variant(__UpperCamelCase ,__UpperCamelCase ) ,cache_dir=__UpperCamelCase ,force_download=__UpperCamelCase ,proxies=__UpperCamelCase ,resume_download=__UpperCamelCase ,local_files_only=__UpperCamelCase ,use_auth_token=__UpperCamelCase ,user_agent=__UpperCamelCase ,subfolder=__UpperCamelCase ,revision=revision or commit_hash ,) warnings.warn( f'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' ,__UpperCamelCase ,) return model_file except: # noqa: E722 warnings.warn( f'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__UpperCamelCase ,__UpperCamelCase )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(__UpperCamelCase ,__UpperCamelCase )}\' so that the correct variant file can be added.''' ,__UpperCamelCase ,) try: # 2. Load model file as usual A_ = hf_hub_download( __UpperCamelCase ,filename=__UpperCamelCase ,cache_dir=__UpperCamelCase ,force_download=__UpperCamelCase ,proxies=__UpperCamelCase ,resume_download=__UpperCamelCase ,local_files_only=__UpperCamelCase ,use_auth_token=__UpperCamelCase ,user_agent=__UpperCamelCase ,subfolder=__UpperCamelCase ,revision=revision or commit_hash ,) return model_file except RepositoryNotFoundError: raise EnvironmentError( f'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier ''' "listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a " "token having permission to this repo with `use_auth_token` or log in with `huggingface-cli " "login`." ) except RevisionNotFoundError: raise EnvironmentError( f'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for ''' "this model name. Check the model page at " f'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' ) except EntryNotFoundError: raise EnvironmentError( f'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' ) except HTTPError as err: raise EnvironmentError( f'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' ) except ValueError: raise EnvironmentError( f'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it''' f''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a''' f''' directory containing a file named {weights_name} or''' " \nCheckout your internet connection or see how to run the library in" " offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." ) except EnvironmentError: raise EnvironmentError( f'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from ''' "'https://huggingface.co/models', make sure you don't have a local directory with the same name. " f'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory ''' f'''containing a file named {weights_name}''' )
312
1
import argparse import math import os from copy import deepcopy import torch from audio_diffusion.models import DiffusionAttnUnetaD from diffusion import sampling from torch import nn from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel __a :Dict = { 'gwf-440k': { 'url': 'https://model-server.zqevans2.workers.dev/gwf-440k.ckpt', 'sample_rate': 4_8000, 'sample_size': 6_5536, }, 'jmann-small-190k': { 'url': 'https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt', 'sample_rate': 4_8000, 'sample_size': 6_5536, }, 'jmann-large-580k': { 'url': 'https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt', 'sample_rate': 4_8000, 'sample_size': 13_1072, }, 'maestro-uncond-150k': { 'url': 'https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt', 'sample_rate': 1_6000, 'sample_size': 6_5536, }, 'unlocked-uncond-250k': { 'url': 'https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt', 'sample_rate': 1_6000, 'sample_size': 6_5536, }, 'honk-140k': { 'url': 'https://model-server.zqevans2.workers.dev/honk-140k.ckpt', 'sample_rate': 1_6000, 'sample_size': 6_5536, }, } def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Optional[int] ): """simple docstring""" return torch.atana(__UpperCamelCase ,__UpperCamelCase ) / math.pi * 2 def __snake_case ( __UpperCamelCase : Optional[int] ): """simple docstring""" A_ = torch.sin(t * math.pi / 2 ) ** 2 A_ = (1 - sigma**2) ** 0.5 return alpha_sigma_to_t(__UpperCamelCase ,__UpperCamelCase ) class _a ( snake_case_ ): """simple docstring""" pass class _a ( nn.Module ): """simple docstring""" def __init__( self : str , UpperCAmelCase : Optional[int] ): super().__init__() A_ = DiffusionAttnUnetaD(UpperCAmelCase , n_attn_layers=4 ) A_ = deepcopy(self.diffusion ) A_ = torch.quasirandom.SobolEngine(1 , scramble=UpperCAmelCase ) def __snake_case ( __UpperCamelCase : List[Any] ): """simple docstring""" A_ = MODELS_MAP[model_name]["url"] os.system(f'''wget {url} ./''' ) return f'''./{model_name}.ckpt''' __a :Dict = { '1': 'resnets.0', '2': 'attentions.0', '3': 'resnets.1', '4': 'attentions.1', '5': 'resnets.2', '6': 'attentions.2', } __a :Optional[Any] = { '8': 'resnets.0', '9': 'attentions.0', '10': 'resnets.1', '11': 'attentions.1', '12': 'resnets.2', '13': 'attentions.2', } __a :int = { '1': 'resnets.0', '2': 'attentions.0', '3': 'resnets.1', '4': 'attentions.1', '5': 'resnets.2', '6': 'attentions.2', '8': 'resnets.3', '9': 'attentions.3', '10': 'resnets.4', '11': 'attentions.4', '12': 'resnets.5', '13': 'attentions.5', } __a :Optional[int] = { '0': 'resnets.0', '1': 'resnets.1', '2': 'resnets.2', '4': 'resnets.0', '5': 'resnets.1', '6': 'resnets.2', } __a :List[Any] = { 'skip': 'conv_skip', 'main.0': 'conv_1', 'main.1': 'group_norm_1', 'main.3': 'conv_2', 'main.4': 'group_norm_2', } __a :Union[str, Any] = { 'norm': 'group_norm', 'qkv_proj': ['query', 'key', 'value'], 'out_proj': ['proj_attn'], } def __snake_case ( __UpperCamelCase : Tuple ): """simple docstring""" if name.startswith("skip" ): return name.replace("skip" ,RES_CONV_MAP["skip"] ) # name has to be of format main.{digit} if not name.startswith("main." ): raise ValueError(f'''ResConvBlock error with {name}''' ) return name.replace(name[:6] ,RES_CONV_MAP[name[:6]] ) def __snake_case ( __UpperCamelCase : Union[str, Any] ): """simple docstring""" for key, value in ATTN_MAP.items(): if name.startswith(__UpperCamelCase ) and not isinstance(__UpperCamelCase ,__UpperCamelCase ): return name.replace(__UpperCamelCase ,__UpperCamelCase ) elif name.startswith(__UpperCamelCase ): return [name.replace(__UpperCamelCase ,__UpperCamelCase ) for v in value] raise ValueError(f'''Attn error with {name}''' ) def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Optional[int]=13 ): """simple docstring""" A_ = input_string if string.split("." )[0] == "timestep_embed": return string.replace("timestep_embed" ,"time_proj" ) A_ = 0 if string.startswith("net.3." ): depth += 1 A_ = string[6:] elif string.startswith("net." ): A_ = string[4:] while string.startswith("main.7." ): depth += 1 A_ = string[7:] if string.startswith("main." ): A_ = string[5:] # mid block if string[:2].isdigit(): A_ = string[:2] A_ = string[2:] else: A_ = string[0] A_ = string[1:] if depth == max_depth: A_ = MID_NUM_TO_LAYER[layer_num] A_ = "mid_block" elif depth > 0 and int(__UpperCamelCase ) < 7: A_ = DOWN_NUM_TO_LAYER[layer_num] A_ = f'''down_blocks.{depth}''' elif depth > 0 and int(__UpperCamelCase ) > 7: A_ = UP_NUM_TO_LAYER[layer_num] A_ = f'''up_blocks.{max_depth - depth - 1}''' elif depth == 0: A_ = DEPTH_0_TO_LAYER[layer_num] A_ = f'''up_blocks.{max_depth - 1}''' if int(__UpperCamelCase ) > 3 else "down_blocks.0" if not string_left.startswith("." ): raise ValueError(f'''Naming error with {input_string} and string_left: {string_left}.''' ) A_ = string_left[1:] if "resnets" in new_layer: A_ = convert_resconv_naming(__UpperCamelCase ) elif "attentions" in new_layer: A_ = convert_attn_naming(__UpperCamelCase ) A_ = new_string_left if not isinstance(__UpperCamelCase ,__UpperCamelCase ): A_ = prefix + "." + new_layer + "." + string_left else: A_ = [prefix + "." + new_layer + "." + s for s in string_left] return new_string def __snake_case ( __UpperCamelCase : Optional[Any] ): """simple docstring""" A_ = {} for k, v in state_dict.items(): if k.endswith("kernel" ): # up- and downsample layers, don't have trainable weights continue A_ = rename(__UpperCamelCase ) # check if we need to transform from Conv => Linear for attention if isinstance(__UpperCamelCase ,__UpperCamelCase ): A_ = transform_conv_attns(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) else: A_ = v return new_state_dict def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : List[str] ,__UpperCamelCase : str ): """simple docstring""" if len(__UpperCamelCase ) == 1: if len(v.shape ) == 3: # weight A_ = v[:, :, 0] else: # bias A_ = v else: # qkv matrices A_ = v.shape[0] A_ = trippled_shape // 3 for i in range(3 ): if len(v.shape ) == 3: A_ = v[i * single_shape : (i + 1) * single_shape, :, 0] else: A_ = v[i * single_shape : (i + 1) * single_shape] return new_state_dict def __snake_case ( __UpperCamelCase : Union[str, Any] ): """simple docstring""" A_ = torch.device("cuda" if torch.cuda.is_available() else "cpu" ) A_ = args.model_path.split("/" )[-1].split("." )[0] if not os.path.isfile(args.model_path ): assert ( model_name == args.model_path ), f'''Make sure to provide one of the official model names {MODELS_MAP.keys()}''' A_ = download(__UpperCamelCase ) A_ = MODELS_MAP[model_name]["sample_rate"] A_ = MODELS_MAP[model_name]["sample_size"] A_ = Object() A_ = sample_size A_ = sample_rate A_ = 0 A_ = UNetaDModel(sample_size=__UpperCamelCase ,sample_rate=__UpperCamelCase ) A_ = diffusers_model.state_dict() A_ = DiffusionUncond(__UpperCamelCase ) orig_model.load_state_dict(torch.load(args.model_path ,map_location=__UpperCamelCase )["state_dict"] ) A_ = orig_model.diffusion_ema.eval() A_ = orig_model.state_dict() A_ = rename_orig_weights(__UpperCamelCase ) A_ = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() ) A_ = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() ) assert len(__UpperCamelCase ) == 0, f'''Problem with {renamed_minus_diffusers}''' assert all(k.endswith("kernel" ) for k in list(__UpperCamelCase ) ), f'''Problem with {diffusers_minus_renamed}''' for key, value in renamed_state_dict.items(): assert ( diffusers_state_dict[key].squeeze().shape == value.squeeze().shape ), f'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}''' if key == "time_proj.weight": A_ = value.squeeze() A_ = value diffusers_model.load_state_dict(__UpperCamelCase ) A_ = 100 A_ = 33 A_ = IPNDMScheduler(num_train_timesteps=__UpperCamelCase ) A_ = torch.manual_seed(__UpperCamelCase ) A_ = torch.randn([1, 2, config.sample_size] ,generator=__UpperCamelCase ).to(__UpperCamelCase ) A_ = torch.linspace(1 ,0 ,steps + 1 ,device=__UpperCamelCase )[:-1] A_ = get_crash_schedule(__UpperCamelCase ) A_ = DanceDiffusionPipeline(unet=__UpperCamelCase ,scheduler=__UpperCamelCase ) A_ = torch.manual_seed(33 ) A_ = pipe(num_inference_steps=__UpperCamelCase ,generator=__UpperCamelCase ).audios A_ = sampling.iplms_sample(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,{} ) A_ = generated.clamp(-1 ,1 ) A_ = (generated - audio).abs().sum() A_ = (generated - audio).abs().max() if args.save: pipe.save_pretrained(args.checkpoint_path ) print("Diff sum" ,__UpperCamelCase ) print("Diff max" ,__UpperCamelCase ) assert diff_max < 1E-3, f'''Diff max: {diff_max} is too much :-/''' print(f'''Conversion for {model_name} successful!''' ) if __name__ == "__main__": __a :Optional[Any] = argparse.ArgumentParser() parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.') parser.add_argument( '--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.' ) parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.') __a :int = parser.parse_args() main(args)
312
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __a :Any = { 'configuration_mgp_str': ['MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MgpstrConfig'], 'processing_mgp_str': ['MgpstrProcessor'], 'tokenization_mgp_str': ['MgpstrTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :Optional[Any] = [ 'MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST', 'MgpstrModel', 'MgpstrPreTrainedModel', 'MgpstrForSceneTextRecognition', ] if TYPE_CHECKING: from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig from .processing_mgp_str import MgpstrProcessor from .tokenization_mgp_str import MgpstrTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mgp_str import ( MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST, MgpstrForSceneTextRecognition, MgpstrModel, MgpstrPreTrainedModel, ) else: import sys __a :List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
312
1
from __future__ import annotations import json import requests from bsa import BeautifulSoup from fake_useragent import UserAgent __a :int = {'UserAgent': UserAgent().random} def __snake_case ( __UpperCamelCase : int ): """simple docstring""" A_ = script.contents[0] A_ = json.loads(data[data.find("{\"config\"" ) : -1] ) return info["entry_data"]["ProfilePage"][0]["graphql"]["user"] class _a : """simple docstring""" def __init__( self : str , UpperCAmelCase : int ): A_ = f'''https://www.instagram.com/{username}/''' A_ = self.get_json() def __A ( self : Tuple ): A_ = requests.get(self.url , headers=UpperCAmelCase ).text A_ = BeautifulSoup(UpperCAmelCase , "html.parser" ).find_all("script" ) try: return extract_user_profile(scripts[4] ) except (json.decoder.JSONDecodeError, KeyError): return extract_user_profile(scripts[3] ) def __repr__( self : int ): return f'''{self.__class__.__name__}(\'{self.username}\')''' def __str__( self : Dict ): return f'''{self.fullname} ({self.username}) is {self.biography}''' @property def __A ( self : Optional[int] ): return self.user_data["username"] @property def __A ( self : Union[str, Any] ): return self.user_data["full_name"] @property def __A ( self : Any ): return self.user_data["biography"] @property def __A ( self : Optional[int] ): return self.user_data["business_email"] @property def __A ( self : Tuple ): return self.user_data["external_url"] @property def __A ( self : str ): return self.user_data["edge_followed_by"]["count"] @property def __A ( self : Optional[Any] ): return self.user_data["edge_follow"]["count"] @property def __A ( self : Dict ): return self.user_data["edge_owner_to_timeline_media"]["count"] @property def __A ( self : Union[str, Any] ): return self.user_data["profile_pic_url_hd"] @property def __A ( self : List[str] ): return self.user_data["is_verified"] @property def __A ( self : List[Any] ): return self.user_data["is_private"] def __snake_case ( __UpperCamelCase : str = "github" ): """simple docstring""" import os if os.environ.get("CI" ): return # test failing on GitHub Actions A_ = InstagramUser(__UpperCamelCase ) assert instagram_user.user_data assert isinstance(instagram_user.user_data ,__UpperCamelCase ) assert instagram_user.username == username if username != "github": return assert instagram_user.fullname == "GitHub" assert instagram_user.biography == "Built for developers." assert instagram_user.number_of_posts > 150 assert instagram_user.number_of_followers > 12_0000 assert instagram_user.number_of_followings > 15 assert instagram_user.email == "support@github.com" assert instagram_user.website == "https://github.com/readme" assert instagram_user.profile_picture_url.startswith("https://instagram." ) assert instagram_user.is_verified is True assert instagram_user.is_private is False if __name__ == "__main__": import doctest doctest.testmod() __a :Optional[int] = InstagramUser('github') print(instagram_user) print(F"{instagram_user.number_of_posts = }") print(F"{instagram_user.number_of_followers = }") print(F"{instagram_user.number_of_followings = }") print(F"{instagram_user.email = }") print(F"{instagram_user.website = }") print(F"{instagram_user.profile_picture_url = }") print(F"{instagram_user.is_verified = }") print(F"{instagram_user.is_private = }")
312
import functools from typing import Any def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : list[str] ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or len(__UpperCamelCase ) == 0: raise ValueError("the string should be not empty string" ) if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not all( isinstance(__UpperCamelCase ,__UpperCamelCase ) and len(__UpperCamelCase ) > 0 for item in words ): raise ValueError("the words should be a list of non-empty strings" ) # Build trie A_ = {} A_ = "WORD_KEEPER" for word in words: A_ = trie for c in word: if c not in trie_node: A_ = {} A_ = trie_node[c] A_ = True A_ = len(__UpperCamelCase ) # Dynamic programming method @functools.cache def is_breakable(__UpperCamelCase : int ) -> bool: if index == len_string: return True A_ = trie for i in range(__UpperCamelCase ,__UpperCamelCase ): A_ = trie_node.get(string[i] ,__UpperCamelCase ) if trie_node is None: return False if trie_node.get(__UpperCamelCase ,__UpperCamelCase ) and is_breakable(i + 1 ): return True return False return is_breakable(0 ) if __name__ == "__main__": import doctest doctest.testmod()
312
1
import torch from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : Any = 'M-CLIP' def __init__( self : Optional[Any] , UpperCAmelCase : Tuple=1024 , UpperCAmelCase : Tuple=768 , **UpperCAmelCase : Union[str, Any] ): A_ = transformerDimSize A_ = imageDimSize super().__init__(**UpperCAmelCase ) class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : Union[str, Any] = MCLIPConfig def __init__( self : Optional[int] , UpperCAmelCase : Tuple , *UpperCAmelCase : str , **UpperCAmelCase : Optional[int] ): super().__init__(UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase ) A_ = XLMRobertaModel(UpperCAmelCase ) A_ = torch.nn.Linear( in_features=config.transformerDimensions , out_features=config.numDims ) def __A ( self : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict ): A_ = self.transformer(input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase )[0] A_ = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None] return self.LinearTransformation(UpperCAmelCase ), embs
312
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_electra import ElectraTokenizer __a :List[str] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} __a :Union[str, Any] = { 'vocab_file': { 'google/electra-small-generator': ( 'https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt' ), 'google/electra-base-generator': 'https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt', 'google/electra-large-generator': ( 'https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt' ), 'google/electra-small-discriminator': ( 'https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt' ), 'google/electra-base-discriminator': ( 'https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt' ), 'google/electra-large-discriminator': ( 'https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'google/electra-small-generator': ( 'https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json' ), 'google/electra-base-generator': ( 'https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json' ), 'google/electra-large-generator': ( 'https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json' ), 'google/electra-small-discriminator': ( 'https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json' ), 'google/electra-base-discriminator': ( 'https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json' ), 'google/electra-large-discriminator': ( 'https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json' ), }, } __a :Optional[int] = { 'google/electra-small-generator': 512, 'google/electra-base-generator': 512, 'google/electra-large-generator': 512, 'google/electra-small-discriminator': 512, 'google/electra-base-discriminator': 512, 'google/electra-large-discriminator': 512, } __a :str = { 'google/electra-small-generator': {'do_lower_case': True}, 'google/electra-base-generator': {'do_lower_case': True}, 'google/electra-large-generator': {'do_lower_case': True}, 'google/electra-small-discriminator': {'do_lower_case': True}, 'google/electra-base-discriminator': {'do_lower_case': True}, 'google/electra-large-discriminator': {'do_lower_case': True}, } class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : Tuple = VOCAB_FILES_NAMES _lowerCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase : int = PRETRAINED_INIT_CONFIGURATION _lowerCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase : int = ElectraTokenizer def __init__( self : Tuple , UpperCAmelCase : Dict=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Any=True , UpperCAmelCase : Any="[UNK]" , UpperCAmelCase : Union[str, Any]="[SEP]" , UpperCAmelCase : List[Any]="[PAD]" , UpperCAmelCase : Union[str, Any]="[CLS]" , UpperCAmelCase : List[Any]="[MASK]" , UpperCAmelCase : List[str]=True , UpperCAmelCase : Any=None , **UpperCAmelCase : Union[str, Any] , ): super().__init__( UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , ) A_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , UpperCAmelCase ) != do_lower_case or normalizer_state.get("strip_accents" , UpperCAmelCase ) != strip_accents or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase ) != tokenize_chinese_chars ): A_ = getattr(UpperCAmelCase , normalizer_state.pop("type" ) ) A_ = do_lower_case A_ = strip_accents A_ = tokenize_chinese_chars A_ = normalizer_class(**UpperCAmelCase ) A_ = do_lower_case def __A ( self : int , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any]=None ): A_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __A ( self : Union[str, Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ): A_ = [self.sep_token_id] A_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __A ( self : Tuple , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ): A_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase ) return tuple(UpperCAmelCase )
312
1
import os import pytest from transformers.dynamic_module_utils import get_imports __a :str = '\nimport os\n' __a :Dict = '\ndef foo():\n import os\n return False\n' __a :Tuple = '\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n' __a :Optional[Any] = '\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n' __a :List[Any] = '\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n' __a :str = '\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n' __a :Tuple = '\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n' __a :Optional[Any] = '\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n' __a :List[Any] = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n' __a :Dict = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n' __a :int = [ TOP_LEVEL_IMPORT, IMPORT_IN_FUNCTION, DEEPLY_NESTED_IMPORT, TOP_LEVEL_TRY_IMPORT, GENERIC_EXCEPT_IMPORT, MULTILINE_TRY_IMPORT, MULTILINE_BOTH_IMPORT, MULTIPLE_EXCEPTS_IMPORT, EXCEPT_AS_IMPORT, TRY_IMPORT_IN_FUNCTION, ] @pytest.mark.parametrize("case" ,__UpperCamelCase ) def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : List[str] ): """simple docstring""" A_ = os.path.join(__UpperCamelCase ,"test_file.py" ) with open(__UpperCamelCase ,"w" ) as _tmp_file: _tmp_file.write(__UpperCamelCase ) A_ = get_imports(__UpperCamelCase ) assert parsed_imports == ["os"]
312
# flake8: noqa # Lint as: python3 from typing import Dict, List, Optional, Type from .. import config from ..utils import logging from .formatting import ( ArrowFormatter, CustomFormatter, Formatter, PandasFormatter, PythonFormatter, TensorFormatter, format_table, query_table, ) from .np_formatter import NumpyFormatter __a :Optional[Any] = logging.get_logger(__name__) __a :Dict[Optional[str], Type[Formatter]] = {} __a :Dict[Optional[str], str] = {} __a :Dict[Optional[str], Exception] = {} def __snake_case ( __UpperCamelCase : type ,__UpperCamelCase : Optional[str] ,__UpperCamelCase : Optional[List[str]] = None ,): """simple docstring""" A_ = aliases if aliases is not None else [] if format_type in _FORMAT_TYPES: logger.warning( f'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' ) A_ = formatter_cls for alias in set(aliases + [format_type] ): if alias in _FORMAT_TYPES_ALIASES: logger.warning( f'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' ) A_ = format_type def __snake_case ( __UpperCamelCase : Exception ,__UpperCamelCase : Optional[str] ,__UpperCamelCase : Optional[List[str]] = None ): """simple docstring""" A_ = aliases if aliases is not None else [] for alias in set(aliases + [format_type] ): A_ = unavailable_error # Here we define all the available formatting functions that can be used by `Dataset.set_format` _register_formatter(PythonFormatter, None, aliases=['python']) _register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow']) _register_formatter(NumpyFormatter, 'numpy', aliases=['np']) _register_formatter(PandasFormatter, 'pandas', aliases=['pd']) _register_formatter(CustomFormatter, 'custom') if config.TORCH_AVAILABLE: from .torch_formatter import TorchFormatter _register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch']) else: __a :List[Any] = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.') _register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch']) if config.TF_AVAILABLE: from .tf_formatter import TFFormatter _register_formatter(TFFormatter, 'tensorflow', aliases=['tf']) else: __a :List[str] = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.') _register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf']) if config.JAX_AVAILABLE: from .jax_formatter import JaxFormatter _register_formatter(JaxFormatter, 'jax', aliases=[]) else: __a :Tuple = ValueError('JAX needs to be installed to be able to return JAX arrays.') _register_unavailable_formatter(_jax_error, 'jax', aliases=[]) def __snake_case ( __UpperCamelCase : Optional[str] ): """simple docstring""" if format_type in _FORMAT_TYPES_ALIASES: return _FORMAT_TYPES_ALIASES[format_type] else: return format_type def __snake_case ( __UpperCamelCase : Optional[str] ,**__UpperCamelCase : List[Any] ): """simple docstring""" A_ = get_format_type_from_alias(__UpperCamelCase ) if format_type in _FORMAT_TYPES: return _FORMAT_TYPES[format_type](**__UpperCamelCase ) if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE: raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type] else: raise ValueError( f'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
312
1
def __snake_case ( __UpperCamelCase : bytes ): """simple docstring""" return "".join([hex(__UpperCamelCase )[2:].zfill(2 ).upper() for byte in list(__UpperCamelCase )] ) def __snake_case ( __UpperCamelCase : str ): """simple docstring""" if (len(__UpperCamelCase ) % 2) != 0: raise ValueError( "Base16 encoded data is invalid:\nData does not have an even number of hex digits." ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(__UpperCamelCase ) <= set("0123456789ABCDEF" ): raise ValueError( "Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] ,16 ) for i in range(0 ,len(__UpperCamelCase ) ,2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
312
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __a :int = { 'configuration_mask2former': [ 'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Mask2FormerConfig', ], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :Union[str, Any] = ['Mask2FormerImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :Optional[Any] = [ 'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'Mask2FormerForUniversalSegmentation', 'Mask2FormerModel', 'Mask2FormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_maskaformer import MaskaFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskaformer import ( MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskaFormerForUniversalSegmentation, MaskaFormerModel, MaskaFormerPreTrainedModel, ) else: import sys __a :Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
312
1
def __snake_case ( __UpperCamelCase : int = 1000 ): """simple docstring""" return sum(2 * a * ((a - 1) // 2) for a in range(3 ,n + 1 ) ) if __name__ == "__main__": print(solution())
312
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, ClassLabel, Features from .base import TaskTemplate @dataclass(frozen=snake_case_ ) class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : str = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True} ) _lowerCamelCase : ClassVar[Features] = Features({'audio': Audio()} ) _lowerCamelCase : ClassVar[Features] = Features({'labels': ClassLabel} ) _lowerCamelCase : str = "audio" _lowerCamelCase : str = "labels" def __A ( self : str , UpperCAmelCase : List[Any] ): if self.label_column not in features: raise ValueError(f'''Column {self.label_column} is not present in features.''' ) if not isinstance(features[self.label_column] , UpperCAmelCase ): raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' ) A_ = copy.deepcopy(self ) A_ = self.label_schema.copy() A_ = features[self.label_column] A_ = label_schema return task_template @property def __A ( self : List[str] ): return { self.audio_column: "audio", self.label_column: "labels", }
312
1
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , ) @pytest.mark.usefixtures('sm_env' ) @parameterized_class( [ { 'framework': 'pytorch', 'script': 'run_glue_model_parallelism.py', 'model_name_or_path': 'roberta-large', 'instance_type': 'ml.p3dn.24xlarge', 'results': {'train_runtime': 1_6_0_0, 'eval_accuracy': 0.3, 'eval_loss': 1.2}, }, { 'framework': 'pytorch', 'script': 'run_glue.py', 'model_name_or_path': 'roberta-large', 'instance_type': 'ml.p3dn.24xlarge', 'results': {'train_runtime': 1_6_0_0, 'eval_accuracy': 0.3, 'eval_loss': 1.2}, }, ] ) class _a ( unittest.TestCase ): """simple docstring""" def __A ( self : List[Any] ): if self.framework == "pytorch": subprocess.run( f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="utf-8" , check=UpperCAmelCase , ) assert hasattr(self , "env" ) def __A ( self : Tuple , UpperCAmelCase : List[str] ): # configuration for running training on smdistributed Model Parallel A_ = { "enabled": True, "processes_per_host": 8, } A_ = { "enabled": True, "parameters": { "microbatches": 4, "placement_strategy": "spread", "pipeline": "interleaved", "optimize": "speed", "partitions": 4, "ddp": True, }, } A_ = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options} A_ = "trainer" if self.script == "run_glue.py" else "smtrainer" # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=UpperCAmelCase , instance_type=self.instance_type , debugger_hook_config=UpperCAmelCase , hyperparameters={ **self.env.hyperparameters, "model_name_or_path": self.model_name_or_path, "max_steps": 500, } , metric_definitions=self.env.metric_definitions , distribution=UpperCAmelCase , py_version="py36" , ) def __A ( self : str , UpperCAmelCase : Dict ): TrainingJobAnalytics(UpperCAmelCase ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' ) @parameterized.expand([(1,)] ) def __A ( self : List[str] , UpperCAmelCase : Dict ): # create estimator A_ = self.create_estimator(UpperCAmelCase ) # run training estimator.fit() # result dataframe A_ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis A_ = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] ) A_ = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping A_ = ( Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 999999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy ) assert all(t <= self.results["eval_loss"] for t in eval_loss ) # dump tests result into json file to share in PR with open(f'''{estimator.latest_training_job.name}.json''' , "w" ) as outfile: json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , UpperCAmelCase )
312
def __snake_case ( __UpperCamelCase : bytes ): """simple docstring""" return "".join([hex(__UpperCamelCase )[2:].zfill(2 ).upper() for byte in list(__UpperCamelCase )] ) def __snake_case ( __UpperCamelCase : str ): """simple docstring""" if (len(__UpperCamelCase ) % 2) != 0: raise ValueError( "Base16 encoded data is invalid:\nData does not have an even number of hex digits." ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(__UpperCamelCase ) <= set("0123456789ABCDEF" ): raise ValueError( "Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] ,16 ) for i in range(0 ,len(__UpperCamelCase ) ,2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
312
1
import json import os import shutil import tempfile import unittest from transformers import BatchEncoding, CanineTokenizer from transformers.testing_utils import require_tokenizers, require_torch from transformers.tokenization_utils import AddedToken from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin class _a ( snake_case_ , unittest.TestCase ): """simple docstring""" _lowerCamelCase : Dict = CanineTokenizer _lowerCamelCase : int = False def __A ( self : Any ): super().setUp() A_ = CanineTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __A ( self : Dict ): return CanineTokenizer.from_pretrained("google/canine-s" ) def __A ( self : List[str] , **UpperCAmelCase : Optional[int] ): A_ = self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase ) A_ = 1024 return tokenizer @require_torch def __A ( self : Optional[int] ): A_ = self.canine_tokenizer A_ = ["Life is like a box of chocolates.", "You never know what you're gonna get."] # fmt: off A_ = [57344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 57345, 0, 0, 0, 0] # fmt: on A_ = tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors="pt" ) self.assertIsInstance(UpperCAmelCase , UpperCAmelCase ) A_ = list(batch.input_ids.numpy()[0] ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) self.assertEqual((2, 39) , batch.input_ids.shape ) self.assertEqual((2, 39) , batch.attention_mask.shape ) @require_torch def __A ( self : str ): A_ = self.canine_tokenizer A_ = ["Once there was a man.", "He wrote a test in HuggingFace Tranformers."] A_ = tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors="pt" ) # check if input_ids, attention_mask and token_type_ids are returned self.assertIn("input_ids" , UpperCAmelCase ) self.assertIn("attention_mask" , UpperCAmelCase ) self.assertIn("token_type_ids" , UpperCAmelCase ) @require_torch def __A ( self : Dict ): A_ = self.canine_tokenizer A_ = [ "What's the weater?", "It's about 25 degrees.", ] A_ = tokenizer( text_target=UpperCAmelCase , max_length=32 , padding="max_length" , truncation=UpperCAmelCase , return_tensors="pt" ) self.assertEqual(32 , targets["input_ids"].shape[1] ) def __A ( self : Dict ): # safety check on max_len default value so we are sure the test works A_ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test A_ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc A_ = tempfile.mkdtemp() A_ = " He is very happy, UNwant\u00E9d,running" A_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) tokenizer.save_pretrained(UpperCAmelCase ) A_ = tokenizer.__class__.from_pretrained(UpperCAmelCase ) A_ = after_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) shutil.rmtree(UpperCAmelCase ) A_ = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc A_ = tempfile.mkdtemp() A_ = " He is very happy, UNwant\u00E9d,running" A_ = tokenizer.additional_special_tokens # We can add a new special token for Canine as follows: A_ = chr(0xe0_07 ) additional_special_tokens.append(UpperCAmelCase ) tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} ) A_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) tokenizer.save_pretrained(UpperCAmelCase ) A_ = tokenizer.__class__.from_pretrained(UpperCAmelCase ) A_ = after_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) self.assertIn(UpperCAmelCase , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) A_ = tokenizer.__class__.from_pretrained(UpperCAmelCase , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(UpperCAmelCase ) def __A ( self : Optional[Any] ): A_ = self.get_tokenizers(do_lower_case=UpperCAmelCase ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): A_ , A_ = self.get_clean_sequence(UpperCAmelCase ) # a special token for Canine can be defined as follows: A_ = 0xe0_05 A_ = chr(UpperCAmelCase ) tokenizer.add_special_tokens({"cls_token": special_token} ) A_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) self.assertEqual(len(UpperCAmelCase ) , 1 ) A_ = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=UpperCAmelCase ) A_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) A_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) A_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) self.assertEqual(UpperCAmelCase , input_encoded + special_token_id ) A_ = tokenizer.decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase ) self.assertTrue(special_token not in decoded ) def __A ( self : Optional[Any] ): A_ = self.get_tokenizers(do_lower_case=UpperCAmelCase ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): A_ = chr(0xe0_05 ) A_ = chr(0xe0_06 ) # `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py) tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=UpperCAmelCase ) # `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`, # which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py) tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]} ) A_ = tokenizer.tokenize(UpperCAmelCase ) A_ = tokenizer.tokenize(UpperCAmelCase ) self.assertEqual(len(UpperCAmelCase ) , 1 ) self.assertEqual(len(UpperCAmelCase ) , 1 ) self.assertEqual(token_a[0] , UpperCAmelCase ) self.assertEqual(token_a[0] , UpperCAmelCase ) @require_tokenizers def __A ( self : List[Any] ): A_ = self.get_tokenizers(do_lower_case=UpperCAmelCase ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # a special token for Canine can be defined as follows: A_ = 0xe0_06 A_ = chr(UpperCAmelCase ) A_ = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase ) tokenizer.add_special_tokens({"additional_special_tokens": [new_token]} ) with tempfile.TemporaryDirectory() as tmp_dir_name: tokenizer.save_pretrained(UpperCAmelCase ) tokenizer.from_pretrained(UpperCAmelCase ) def __A ( self : int ): A_ = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(UpperCAmelCase ) with open(os.path.join(UpperCAmelCase , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file: A_ = json.load(UpperCAmelCase ) with open(os.path.join(UpperCAmelCase , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file: A_ = json.load(UpperCAmelCase ) # a special token for Canine can be defined as follows: A_ = 0xe0_06 A_ = chr(UpperCAmelCase ) A_ = [new_token_a] A_ = [new_token_a] with open(os.path.join(UpperCAmelCase , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile: json.dump(UpperCAmelCase , UpperCAmelCase ) with open(os.path.join(UpperCAmelCase , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile: json.dump(UpperCAmelCase , UpperCAmelCase ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files A_ = tokenizer_class.from_pretrained(UpperCAmelCase , extra_ids=0 ) self.assertIn(UpperCAmelCase , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , ) A_ = 0xe0_07 A_ = chr(UpperCAmelCase ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained A_ = [AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase )] A_ = tokenizer_class.from_pretrained( UpperCAmelCase , additional_special_tokens=UpperCAmelCase , extra_ids=0 ) self.assertIn(UpperCAmelCase , tokenizer.additional_special_tokens ) # self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) ) @require_tokenizers def __A ( self : Tuple ): A_ = self.get_tokenizers(do_lower_case=UpperCAmelCase ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): A_ = "hello world" if self.space_between_special_tokens: A_ = "[CLS] hello world [SEP]" else: A_ = input A_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) A_ = tokenizer.decode(UpperCAmelCase , spaces_between_special_tokens=self.space_between_special_tokens ) self.assertIn(UpperCAmelCase , [output, output.lower()] ) def __A ( self : Any ): A_ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): A_ = [ "bos_token", "eos_token", "unk_token", "sep_token", "pad_token", "cls_token", "mask_token", ] A_ = "a" A_ = ord(UpperCAmelCase ) for attr in attributes_list: setattr(UpperCAmelCase , attr + "_id" , UpperCAmelCase ) self.assertEqual(getattr(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(getattr(UpperCAmelCase , attr + "_id" ) , UpperCAmelCase ) setattr(UpperCAmelCase , attr + "_id" , UpperCAmelCase ) self.assertEqual(getattr(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(getattr(UpperCAmelCase , attr + "_id" ) , UpperCAmelCase ) setattr(UpperCAmelCase , "additional_special_tokens_ids" , [] ) self.assertListEqual(getattr(UpperCAmelCase , "additional_special_tokens" ) , [] ) self.assertListEqual(getattr(UpperCAmelCase , "additional_special_tokens_ids" ) , [] ) A_ = 0xe0_06 A_ = chr(UpperCAmelCase ) setattr(UpperCAmelCase , "additional_special_tokens_ids" , [additional_special_token_id] ) self.assertListEqual(getattr(UpperCAmelCase , "additional_special_tokens" ) , [additional_special_token] ) self.assertListEqual(getattr(UpperCAmelCase , "additional_special_tokens_ids" ) , [additional_special_token_id] ) def __A ( self : int ): pass def __A ( self : List[str] ): pass def __A ( self : Tuple ): pass def __A ( self : Union[str, Any] ): pass def __A ( self : List[str] ): pass def __A ( self : List[Any] ): pass def __A ( self : Any ): pass def __A ( self : Any ): pass
312
import cva import numpy as np class _a : """simple docstring""" def __init__( self : Any , UpperCAmelCase : float , UpperCAmelCase : int ): if k in (0.04, 0.06): A_ = k A_ = window_size else: raise ValueError("invalid k value" ) def __str__( self : Optional[Any] ): return str(self.k ) def __A ( self : int , UpperCAmelCase : str ): A_ = cva.imread(UpperCAmelCase , 0 ) A_ , A_ = img.shape A_ = [] A_ = img.copy() A_ = cva.cvtColor(UpperCAmelCase , cva.COLOR_GRAY2RGB ) A_ , A_ = np.gradient(UpperCAmelCase ) A_ = dx**2 A_ = dy**2 A_ = dx * dy A_ = 0.04 A_ = self.window_size // 2 for y in range(UpperCAmelCase , h - offset ): for x in range(UpperCAmelCase , w - offset ): A_ = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() A_ = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() A_ = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() A_ = (wxx * wyy) - (wxy**2) A_ = wxx + wyy A_ = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) , 0 ) color_img.itemset((y, x, 1) , 0 ) color_img.itemset((y, x, 2) , 255 ) return color_img, corner_list if __name__ == "__main__": __a :List[str] = HarrisCorner(0.04, 3) __a , __a :str = edge_detect.detect('path_to_image') cva.imwrite('detect.png', color_img)
312
1
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : Any = 'ClapFeatureExtractor' _lowerCamelCase : Optional[int] = ('RobertaTokenizer', 'RobertaTokenizerFast') def __init__( self : str , UpperCAmelCase : Dict , UpperCAmelCase : List[str] ): super().__init__(UpperCAmelCase , UpperCAmelCase ) def __call__( self : List[str] , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : Optional[Any]=None , **UpperCAmelCase : Tuple ): A_ = kwargs.pop("sampling_rate" , UpperCAmelCase ) if text is None and audios is None: raise ValueError("You have to specify either text or audios. Both cannot be none." ) if text is not None: A_ = self.tokenizer(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase ) if audios is not None: A_ = self.feature_extractor( UpperCAmelCase , sampling_rate=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase ) if text is not None and audios is not None: A_ = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**UpperCAmelCase ) , tensor_type=UpperCAmelCase ) def __A ( self : Tuple , *UpperCAmelCase : Any , **UpperCAmelCase : str ): return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase ) def __A ( self : Any , *UpperCAmelCase : List[str] , **UpperCAmelCase : Optional[Any] ): return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase ) @property def __A ( self : Any ): A_ = self.tokenizer.model_input_names A_ = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
312
def __snake_case ( __UpperCamelCase : int = 1000 ): """simple docstring""" return sum(2 * a * ((a - 1) // 2) for a in range(3 ,n + 1 ) ) if __name__ == "__main__": print(solution())
312
1
def __snake_case ( __UpperCamelCase : list ): """simple docstring""" if len(__UpperCamelCase ) < 2: return collection def circle_sort_util(__UpperCamelCase : list ,__UpperCamelCase : int ,__UpperCamelCase : int ) -> bool: A_ = False if low == high: return swapped A_ = low A_ = high while left < right: if collection[left] > collection[right]: A_ , A_ = ( collection[right], collection[left], ) A_ = True left += 1 right -= 1 if left == right and collection[left] > collection[right + 1]: A_ , A_ = ( collection[right + 1], collection[left], ) A_ = True A_ = low + int((high - low) / 2 ) A_ = circle_sort_util(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) A_ = circle_sort_util(__UpperCamelCase ,mid + 1 ,__UpperCamelCase ) return swapped or left_swap or right_swap A_ = True while is_not_sorted is True: A_ = circle_sort_util(__UpperCamelCase ,0 ,len(__UpperCamelCase ) - 1 ) return collection if __name__ == "__main__": __a :Tuple = input('Enter numbers separated by a comma:\n').strip() __a :List[str] = [int(item) for item in user_input.split(',')] print(circle_sort(unsorted))
312
import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : Union[str, Any] = ['image_processor', 'tokenizer'] _lowerCamelCase : Tuple = 'OwlViTImageProcessor' _lowerCamelCase : List[Any] = ('CLIPTokenizer', 'CLIPTokenizerFast') def __init__( self : Optional[Any] , UpperCAmelCase : int=None , UpperCAmelCase : Union[str, Any]=None , **UpperCAmelCase : Any ): A_ = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , UpperCAmelCase , ) A_ = kwargs.pop("feature_extractor" ) A_ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(UpperCAmelCase , UpperCAmelCase ) def __call__( self : Optional[int] , UpperCAmelCase : List[str]=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Dict="max_length" , UpperCAmelCase : Optional[Any]="np" , **UpperCAmelCase : Optional[int] ): if text is None and query_images is None and images is None: raise ValueError( "You have to specify at least one text or query image or image. All three cannot be none." ) if text is not None: if isinstance(UpperCAmelCase , UpperCAmelCase ) or (isinstance(UpperCAmelCase , UpperCAmelCase ) and not isinstance(text[0] , UpperCAmelCase )): A_ = [self.tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )] elif isinstance(UpperCAmelCase , UpperCAmelCase ) and isinstance(text[0] , UpperCAmelCase ): A_ = [] # Maximum number of queries across batch A_ = max([len(UpperCAmelCase ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(UpperCAmelCase ) != max_num_queries: A_ = t + [" "] * (max_num_queries - len(UpperCAmelCase )) A_ = self.tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase ) encodings.append(UpperCAmelCase ) else: raise TypeError("Input text should be a string, a list of strings or a nested list of strings" ) if return_tensors == "np": A_ = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) A_ = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp A_ = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) A_ = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch A_ = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 ) A_ = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf A_ = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 ) A_ = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 ) else: raise ValueError("Target return tensor type could not be returned" ) A_ = BatchEncoding() A_ = input_ids A_ = attention_mask if query_images is not None: A_ = BatchEncoding() A_ = self.image_processor( UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase ).pixel_values A_ = query_pixel_values if images is not None: A_ = self.image_processor(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase ) if text is not None and images is not None: A_ = image_features.pixel_values return encoding elif query_images is not None and images is not None: A_ = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**UpperCAmelCase ) , tensor_type=UpperCAmelCase ) def __A ( self : Optional[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : List[Any] ): return self.image_processor.post_process(*UpperCAmelCase , **UpperCAmelCase ) def __A ( self : str , *UpperCAmelCase : str , **UpperCAmelCase : Union[str, Any] ): return self.image_processor.post_process_object_detection(*UpperCAmelCase , **UpperCAmelCase ) def __A ( self : List[Any] , *UpperCAmelCase : int , **UpperCAmelCase : int ): return self.image_processor.post_process_image_guided_detection(*UpperCAmelCase , **UpperCAmelCase ) def __A ( self : List[Any] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Any ): return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase ) def __A ( self : Tuple , *UpperCAmelCase : Dict , **UpperCAmelCase : str ): return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase ) @property def __A ( self : Union[str, Any] ): warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCAmelCase , ) return self.image_processor_class @property def __A ( self : Optional[Any] ): warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCAmelCase , ) return self.image_processor
312
1
from argparse import ArgumentParser from .env import EnvironmentCommand def __snake_case ( ): """simple docstring""" A_ = ArgumentParser("Diffusers CLI tool" ,usage="diffusers-cli <command> [<args>]" ) A_ = parser.add_subparsers(help="diffusers-cli command helpers" ) # Register commands EnvironmentCommand.register_subcommand(__UpperCamelCase ) # Let's go A_ = parser.parse_args() if not hasattr(__UpperCamelCase ,"func" ): parser.print_help() exit(1 ) # Run A_ = args.func(__UpperCamelCase ) service.run() if __name__ == "__main__": main()
312
from typing import Optional, Union import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models.modeling_utils import ModelMixin class _a ( snake_case_ , snake_case_ ): """simple docstring""" @register_to_config def __init__( self : Dict , UpperCAmelCase : int = 768 , ): super().__init__() A_ = nn.Parameter(torch.zeros(1 , UpperCAmelCase ) ) A_ = nn.Parameter(torch.ones(1 , UpperCAmelCase ) ) def __A ( self : str , UpperCAmelCase : Optional[Union[str, torch.device]] = None , UpperCAmelCase : Optional[torch.dtype] = None , ): A_ = nn.Parameter(self.mean.to(UpperCAmelCase ).to(UpperCAmelCase ) ) A_ = nn.Parameter(self.std.to(UpperCAmelCase ).to(UpperCAmelCase ) ) return self def __A ( self : Dict , UpperCAmelCase : List[Any] ): A_ = (embeds - self.mean) * 1.0 / self.std return embeds def __A ( self : int , UpperCAmelCase : int ): A_ = (embeds * self.std) + self.mean return embeds
312
1
import unittest from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin __a :List[Any] = get_tests_dir('fixtures/spiece.model') @require_sentencepiece @require_tokenizers class _a ( snake_case_ , unittest.TestCase ): """simple docstring""" _lowerCamelCase : int = DebertaVaTokenizer _lowerCamelCase : List[Any] = DebertaVaTokenizerFast _lowerCamelCase : Tuple = True _lowerCamelCase : Union[str, Any] = True def __A ( self : Tuple ): super().setUp() # We have a SentencePiece fixture for testing A_ = DebertaVaTokenizer(UpperCAmelCase , unk_token="<unk>" ) tokenizer.save_pretrained(self.tmpdirname ) def __A ( self : List[str] , UpperCAmelCase : str ): A_ = "this is a test" A_ = "this is a test" return input_text, output_text def __A ( self : Tuple ): A_ = "<pad>" A_ = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase ) , UpperCAmelCase ) def __A ( self : Any ): A_ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<pad>" ) self.assertEqual(vocab_keys[1] , "<unk>" ) self.assertEqual(vocab_keys[-1] , "[PAD]" ) self.assertEqual(len(UpperCAmelCase ) , 30001 ) def __A ( self : Any ): self.assertEqual(self.get_tokenizer().vocab_size , 30000 ) def __A ( self : List[str] ): # fmt: off A_ = " \tHeLLo!how \n Are yoU? " A_ = ["▁hello", "!", "how", "▁are", "▁you", "?"] # fmt: on A_ = DebertaVaTokenizer(UpperCAmelCase , do_lower_case=UpperCAmelCase ) A_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) A_ = DebertaVaTokenizerFast(UpperCAmelCase , do_lower_case=UpperCAmelCase ) A_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) @unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." ) def __A ( self : Tuple ): pass @unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." ) def __A ( self : Optional[int] ): pass def __A ( self : Any ): # fmt: off A_ = "I was born in 92000, and this is falsé." A_ = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on A_ = DebertaVaTokenizer(UpperCAmelCase , split_by_punct=UpperCAmelCase ) A_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) A_ = DebertaVaTokenizerFast(UpperCAmelCase , split_by_punct=UpperCAmelCase ) A_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) def __A ( self : Optional[Any] ): # fmt: off A_ = "I was born in 92000, and this is falsé." A_ = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on A_ = DebertaVaTokenizer(UpperCAmelCase , do_lower_case=UpperCAmelCase , split_by_punct=UpperCAmelCase ) A_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) A_ = DebertaVaTokenizerFast(UpperCAmelCase , do_lower_case=UpperCAmelCase , split_by_punct=UpperCAmelCase ) A_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) def __A ( self : int ): # fmt: off A_ = "I was born in 92000, and this is falsé." A_ = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ] # fmt: on A_ = DebertaVaTokenizer(UpperCAmelCase , do_lower_case=UpperCAmelCase , split_by_punct=UpperCAmelCase ) A_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) A_ = DebertaVaTokenizerFast(UpperCAmelCase , do_lower_case=UpperCAmelCase , split_by_punct=UpperCAmelCase ) A_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) def __A ( self : Any ): # fmt: off A_ = "I was born in 92000, and this is falsé." A_ = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on A_ = DebertaVaTokenizer(UpperCAmelCase , do_lower_case=UpperCAmelCase , split_by_punct=UpperCAmelCase ) A_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) A_ = DebertaVaTokenizerFast(UpperCAmelCase , do_lower_case=UpperCAmelCase , split_by_punct=UpperCAmelCase ) A_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) def __A ( self : Tuple ): # fmt: off A_ = " \tHeLLo!how \n Are yoU? " A_ = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"] # fmt: on A_ = DebertaVaTokenizer(UpperCAmelCase , do_lower_case=UpperCAmelCase , split_by_punct=UpperCAmelCase ) A_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) A_ = DebertaVaTokenizerFast(UpperCAmelCase , do_lower_case=UpperCAmelCase , split_by_punct=UpperCAmelCase ) A_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) def __A ( self : str ): A_ = self.get_tokenizer() A_ = self.get_rust_tokenizer() A_ = "I was born in 92000, and this is falsé." A_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) ) A_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) A_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) A_ = rust_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) A_ = self.get_rust_tokenizer() A_ = tokenizer.encode(UpperCAmelCase ) A_ = rust_tokenizer.encode(UpperCAmelCase ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) def __A ( self : Tuple ): A_ = "This is a test" A_ = [13, 1, 4398, 25, 21, 1289] A_ = ["▁", "T", "his", "▁is", "▁a", "▁test"] A_ = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"] A_ = DebertaVaTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase ) A_ = DebertaVaTokenizerFast(UpperCAmelCase , keep_accents=UpperCAmelCase ) A_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) A_ = tokenizer.tokenize(UpperCAmelCase ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) A_ = tokenizer.convert_ids_to_tokens(UpperCAmelCase ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) A_ = rust_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) A_ = rust_tokenizer.tokenize(UpperCAmelCase ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) A_ = rust_tokenizer.convert_ids_to_tokens(UpperCAmelCase ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) # fmt: off A_ = "I was born in 92000, and this is falsé." A_ = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] A_ = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ] A_ = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ] # fmt: on A_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) A_ = tokenizer.tokenize(UpperCAmelCase ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) A_ = tokenizer.convert_ids_to_tokens(UpperCAmelCase ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) A_ = rust_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) A_ = rust_tokenizer.tokenize(UpperCAmelCase ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) A_ = rust_tokenizer.convert_ids_to_tokens(UpperCAmelCase ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) def __A ( self : List[str] ): A_ = DebertaVaTokenizer(UpperCAmelCase ) A_ = tokenizer.encode("sequence builders" ) A_ = tokenizer.encode("multi-sequence build" ) A_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase ) A_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase ) self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , UpperCAmelCase ) self.assertEqual( [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , UpperCAmelCase , ) @slow def __A ( self : List[str] ): # fmt: off A_ = {"input_ids": [[1, 39867, 36, 19390, 486, 27, 35052, 81436, 18, 60685, 1225, 7, 35052, 81436, 18, 9367, 16899, 18, 15937, 53, 594, 773, 18, 16287, 30465, 36, 15937, 6, 41139, 38, 36979, 60763, 191, 6, 34132, 99, 6, 50538, 390, 43230, 6, 34132, 2779, 20850, 14, 699, 1072, 1194, 36, 382, 10901, 53, 7, 699, 1072, 2084, 36, 20422, 630, 53, 19, 105, 3049, 1896, 1053, 16899, 1506, 11, 37978, 4243, 7, 1237, 31869, 200, 16566, 654, 6, 35052, 81436, 7, 55630, 13593, 4, 2], [1, 26, 15011, 13, 667, 8, 1053, 18, 23611, 1237, 72356, 12820, 34, 104134, 1209, 35, 13313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 15785, 14951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase , model_name="microsoft/deberta-v2-xlarge" , revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" , )
312
from __future__ import annotations import numpy as np from numpy import floataa from numpy.typing import NDArray def __snake_case ( __UpperCamelCase : NDArray[floataa] ,__UpperCamelCase : NDArray[floataa] ,__UpperCamelCase : list[int] ,__UpperCamelCase : int ,): """simple docstring""" A_ , A_ = coefficient_matrix.shape A_ , A_ = constant_matrix.shape if rowsa != colsa: A_ = f'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}''' raise ValueError(__UpperCamelCase ) if colsa != 1: A_ = f'''Constant matrix must be nx1 but received {rowsa}x{colsa}''' raise ValueError(__UpperCamelCase ) if rowsa != rowsa: A_ = ( "Coefficient and constant matrices dimensions must be nxn and nx1 but " f'''received {rowsa}x{colsa} and {rowsa}x{colsa}''' ) raise ValueError(__UpperCamelCase ) if len(__UpperCamelCase ) != rowsa: A_ = ( "Number of initial values must be equal to number of rows in coefficient " f'''matrix but received {len(__UpperCamelCase )} and {rowsa}''' ) raise ValueError(__UpperCamelCase ) if iterations <= 0: raise ValueError("Iterations must be at least 1" ) A_ = np.concatenate( (coefficient_matrix, constant_matrix) ,axis=1 ) A_ , A_ = table.shape strictly_diagonally_dominant(__UpperCamelCase ) # Iterates the whole matrix for given number of times for _ in range(__UpperCamelCase ): A_ = [] for row in range(__UpperCamelCase ): A_ = 0 for col in range(__UpperCamelCase ): if col == row: A_ = table[row][col] elif col == cols - 1: A_ = table[row][col] else: temp += (-1) * table[row][col] * init_val[col] A_ = (temp + val) / denom new_val.append(__UpperCamelCase ) A_ = new_val return [float(__UpperCamelCase ) for i in new_val] def __snake_case ( __UpperCamelCase : NDArray[floataa] ): """simple docstring""" A_ , A_ = table.shape A_ = True for i in range(0 ,__UpperCamelCase ): A_ = 0 for j in range(0 ,cols - 1 ): if i == j: continue else: total += table[i][j] if table[i][i] <= total: raise ValueError("Coefficient matrix is not strictly diagonally dominant" ) return is_diagonally_dominant # Test Cases if __name__ == "__main__": import doctest doctest.testmod()
312
1
import string def __snake_case ( __UpperCamelCase : str ): """simple docstring""" for key in range(len(string.ascii_uppercase ) ): A_ = "" for symbol in message: if symbol in string.ascii_uppercase: A_ = string.ascii_uppercase.find(__UpperCamelCase ) A_ = num - key if num < 0: A_ = num + len(string.ascii_uppercase ) A_ = translated + string.ascii_uppercase[num] else: A_ = translated + symbol print(f'''Decryption using Key #{key}: {translated}''' ) def __snake_case ( ): """simple docstring""" A_ = input("Encrypted message: " ) A_ = message.upper() decrypt(__UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod() main()
312
from unittest import TestCase from datasets import Dataset from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters def __snake_case ( ): """simple docstring""" A_ = { "repo_name": ["test_repo1", "test_repo2", "test_repo3"], "path": ["test_1.py", "test_2.py", "unit_test.py"], "content": ["a " * 20, "a " * 30, "b " * 7], } A_ = Dataset.from_dict(__UpperCamelCase ) return dataset class _a ( snake_case_ ): """simple docstring""" def __A ( self : Union[str, Any] ): A_ = get_dataset() A_ = make_duplicate_clusters(UpperCAmelCase , 0.85 ) self.assertEqual(len(duplicate_clusters[0] ) , 2 ) def __A ( self : List[Any] ): A_ = get_dataset() A_ , A_ = deduplicate_dataset(UpperCAmelCase ) self.assertEqual(len(UpperCAmelCase ) , 2 ) print(UpperCAmelCase ) self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 ) self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , UpperCAmelCase )
312
1
import ast import os import re import shutil import tempfile import unittest from unittest import mock import torch from accelerate.test_utils.examples import compare_against_test from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow from accelerate.utils import write_basic_config # DataLoaders built from `test_samples/MRPC` for quick testing # Should mock `{script_name}.get_dataloaders` via: # @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders) __a :Any = [ 'cross_validation.py', 'gradient_accumulation.py', 'local_sgd.py', 'multi_process_metrics.py', 'memory.py', 'automatic_gradient_accumulation.py', 'fsdp_with_peak_mem_tracking.py', 'deepspeed_with_config_support.py', 'megatron_lm_gpt_pretraining.py', ] class _a ( unittest.TestCase ): """simple docstring""" def __A ( self : Dict , UpperCAmelCase : str , UpperCAmelCase : bool , UpperCAmelCase : str = None , UpperCAmelCase : list = None ): A_ = None A_ = os.path.abspath(os.path.join("examples" , "by_feature" ) ) A_ = os.path.abspath("examples" ) for item in os.listdir(UpperCAmelCase ): if item not in EXCLUDE_EXAMPLES: A_ = os.path.join(UpperCAmelCase , UpperCAmelCase ) if os.path.isfile(UpperCAmelCase ) and ".py" in item_path: with self.subTest( tested_script=UpperCAmelCase , feature_script=UpperCAmelCase , tested_section="main()" if parser_only else "training_function()" , ): A_ = compare_against_test( os.path.join(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) A_ = "\n".join(UpperCAmelCase ) if special_strings is not None: for string in special_strings: A_ = diff.replace(UpperCAmelCase , "" ) self.assertEqual(UpperCAmelCase , "" ) def __A ( self : Optional[int] ): self.one_complete_example("complete_nlp_example.py" , UpperCAmelCase ) self.one_complete_example("complete_nlp_example.py" , UpperCAmelCase ) def __A ( self : str ): A_ = os.path.abspath(os.path.join("examples" , "cv_example.py" ) ) A_ = [ " " * 16 + "{\n\n", " " * 20 + "\"accuracy\": eval_metric[\"accuracy\"],\n\n", " " * 20 + "\"f1\": eval_metric[\"f1\"],\n\n", " " * 20 + "\"train_loss\": total_loss.item() / len(train_dataloader),\n\n", " " * 20 + "\"epoch\": epoch,\n\n", " " * 16 + "},\n\n", " " * 16 + "step=epoch,\n", " " * 12, " " * 8 + "for step, batch in enumerate(active_dataloader):\n", ] self.one_complete_example("complete_cv_example.py" , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) self.one_complete_example("complete_cv_example.py" , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) @mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '1'} ) class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : Tuple = False @classmethod def __A ( cls : List[Any] ): super().setUpClass() A_ = tempfile.mkdtemp() A_ = os.path.join(cls._tmpdir , "default_config.yml" ) write_basic_config(save_location=cls.configPath ) A_ = ["accelerate", "launch", "--config_file", cls.configPath] @classmethod def __A ( cls : Tuple ): super().tearDownClass() shutil.rmtree(cls._tmpdir ) def __A ( self : Optional[Any] ): A_ = f''' examples/by_feature/checkpointing.py --checkpointing_steps epoch --output_dir {self.tmpdir} '''.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "epoch_0" ) ) ) def __A ( self : List[str] ): A_ = f''' examples/by_feature/checkpointing.py --checkpointing_steps 1 --output_dir {self.tmpdir} '''.split() A_ = run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "step_2" ) ) ) def __A ( self : Any ): A_ = f''' examples/by_feature/checkpointing.py --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )} '''.split() A_ = run_command(self._launch_args + testargs , return_stdout=UpperCAmelCase ) self.assertNotIn("epoch 0:" , UpperCAmelCase ) self.assertIn("epoch 1:" , UpperCAmelCase ) def __A ( self : Tuple ): A_ = f''' examples/by_feature/checkpointing.py --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )} '''.split() A_ = run_command(self._launch_args + testargs , return_stdout=UpperCAmelCase ) if torch.cuda.is_available(): A_ = torch.cuda.device_count() else: A_ = 1 if num_processes > 1: self.assertNotIn("epoch 0:" , UpperCAmelCase ) self.assertIn("epoch 1:" , UpperCAmelCase ) else: self.assertIn("epoch 0:" , UpperCAmelCase ) self.assertIn("epoch 1:" , UpperCAmelCase ) @slow def __A ( self : Union[str, Any] ): A_ = "\n examples/by_feature/cross_validation.py\n --num_folds 2\n ".split() with mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "0"} ): A_ = run_command(self._launch_args + testargs , return_stdout=UpperCAmelCase ) A_ = re.findall("({.+})" , UpperCAmelCase ) A_ = [r for r in results if "accuracy" in r][-1] A_ = ast.literal_eval(UpperCAmelCase ) self.assertGreaterEqual(results["accuracy"] , 0.75 ) def __A ( self : Any ): A_ = ["examples/by_feature/multi_process_metrics.py"] run_command(self._launch_args + testargs ) @require_trackers @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def __A ( self : int ): with tempfile.TemporaryDirectory() as tmpdir: A_ = f''' examples/by_feature/tracking.py --with_tracking --project_dir {tmpdir} '''.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase , "tracking" ) ) ) def __A ( self : Optional[int] ): A_ = ["examples/by_feature/gradient_accumulation.py"] run_command(self._launch_args + testargs ) def __A ( self : Optional[int] ): A_ = ["examples/by_feature/local_sgd.py"] run_command(self._launch_args + testargs )
312
import os from typing import Dict, List, Tuple, TypeVar, Union __a :Any = TypeVar('T') __a :Union[str, Any] = Union[List[T], Tuple[T, ...]] __a :List[str] = Union[T, List[T], Dict[str, T]] __a :Any = Union[str, bytes, os.PathLike]
312
1
import re import time from typing import Optional import IPython.display as disp from ..trainer_callback import TrainerCallback from ..trainer_utils import IntervalStrategy, has_length def __snake_case ( __UpperCamelCase : Tuple ): """simple docstring""" A_ = int(__UpperCamelCase ) A_ , A_ , A_ = t // 3600, (t // 60) % 60, t % 60 return f'''{h}:{m:02d}:{s:02d}''' if h != 0 else f'''{m:02d}:{s:02d}''' def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Tuple ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Dict=300 ): """simple docstring""" return f''' <div> {prefix} <progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress> {label} </div> ''' def __snake_case ( __UpperCamelCase : Union[str, Any] ): """simple docstring""" A_ = "<table border=\"1\" class=\"dataframe\">\n" html_code += """ <thead>\n <tr style="text-align: left;">\n""" for i in items[0]: html_code += f''' <th>{i}</th>\n''' html_code += " </tr>\n </thead>\n <tbody>\n" for line in items[1:]: html_code += " <tr>\n" for elt in line: A_ = f'''{elt:.6f}''' if isinstance(__UpperCamelCase ,__UpperCamelCase ) else str(__UpperCamelCase ) html_code += f''' <td>{elt}</td>\n''' html_code += " </tr>\n" html_code += " </tbody>\n</table><p>" return html_code class _a : """simple docstring""" _lowerCamelCase : List[str] = 5 _lowerCamelCase : List[str] = 0.2 def __init__( self : int , UpperCAmelCase : int , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Optional["NotebookTrainingTracker"] = None , UpperCAmelCase : int = 300 , ): A_ = total A_ = "" if prefix is None else prefix A_ = leave A_ = parent A_ = width A_ = None A_ = None A_ = None def __A ( self : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : bool = False , UpperCAmelCase : str = None ): A_ = value if comment is not None: A_ = comment if self.last_value is None: A_ = A_ = time.time() A_ = A_ = value A_ = A_ = None A_ = self.warmup A_ = 1 self.update_bar(UpperCAmelCase ) elif value <= self.last_value and not force_update: return elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ): if self.first_calls > 0: self.first_calls -= 1 A_ = time.time() A_ = current_time - self.start_time # We could have value = self.start_value if the update is called twixe with the same start value. if value > self.start_value: A_ = self.elapsed_time / (value - self.start_value) else: A_ = None if value >= self.total: A_ = self.total A_ = None if not self.leave: self.close() elif self.average_time_per_item is not None: A_ = self.average_time_per_item * (self.total - value) self.update_bar(UpperCAmelCase ) A_ = value A_ = current_time if self.average_time_per_item is None: A_ = 1 else: A_ = max(int(self.update_every / self.average_time_per_item ) , 1 ) def __A ( self : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Any=None ): A_ = " " * (len(str(self.total ) ) - len(str(UpperCAmelCase ) )) + str(UpperCAmelCase ) if self.elapsed_time is None: A_ = f'''[{spaced_value}/{self.total} : < :''' elif self.predicted_remaining is None: A_ = f'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}''' else: A_ = ( f'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <''' f''' {format_time(self.predicted_remaining )}''' ) self.label += f''', {1/self.average_time_per_item:.2f} it/s''' self.label += "]" if self.comment is None or len(self.comment ) == 0 else f''', {self.comment}]''' self.display() def __A ( self : str ): A_ = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width ) if self.parent is not None: # If this is a child bar, the parent will take care of the display. self.parent.display() return if self.output is None: A_ = disp.display(disp.HTML(self.html_code ) , display_id=UpperCAmelCase ) else: self.output.update(disp.HTML(self.html_code ) ) def __A ( self : Dict ): if self.parent is None and self.output is not None: self.output.update(disp.HTML("" ) ) class _a ( snake_case_ ): """simple docstring""" def __init__( self : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int=None ): super().__init__(UpperCAmelCase ) A_ = None if column_names is None else [column_names] A_ = None def __A ( self : Any ): A_ = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width ) if self.inner_table is not None: self.html_code += text_to_html_table(self.inner_table ) if self.child_bar is not None: self.html_code += self.child_bar.html_code if self.output is None: A_ = disp.display(disp.HTML(self.html_code ) , display_id=UpperCAmelCase ) else: self.output.update(disp.HTML(self.html_code ) ) def __A ( self : List[Any] , UpperCAmelCase : Optional[Any] ): if self.inner_table is None: A_ = [list(values.keys() ), list(values.values() )] else: A_ = self.inner_table[0] if len(self.inner_table ) == 1: # We give a chance to update the column names at the first iteration for key in values.keys(): if key not in columns: columns.append(UpperCAmelCase ) A_ = columns self.inner_table.append([values[c] for c in columns] ) def __A ( self : str , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict=None , UpperCAmelCase : List[Any]=300 ): A_ = NotebookProgressBar(UpperCAmelCase , prefix=UpperCAmelCase , parent=self , width=UpperCAmelCase ) return self.child_bar def __A ( self : List[Any] ): A_ = None self.display() class _a ( snake_case_ ): """simple docstring""" def __init__( self : Union[str, Any] ): A_ = None A_ = None A_ = False def __A ( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Optional[int] ): A_ = "Epoch" if args.evaluation_strategy == IntervalStrategy.EPOCH else "Step" A_ = 0 A_ = 0 A_ = [self.first_column] + ["Training Loss"] if args.evaluation_strategy != IntervalStrategy.NO: column_names.append("Validation Loss" ) A_ = NotebookTrainingTracker(state.max_steps , UpperCAmelCase ) def __A ( self : Optional[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : int , **UpperCAmelCase : Dict ): A_ = int(state.epoch ) if int(state.epoch ) == state.epoch else f'''{state.epoch:.2f}''' self.training_tracker.update( state.global_step + 1 , comment=f'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , ) A_ = False def __A ( self : Tuple , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[str]=None , **UpperCAmelCase : Optional[Any] ): if not has_length(UpperCAmelCase ): return if self.prediction_bar is None: if self.training_tracker is not None: A_ = self.training_tracker.add_child(len(UpperCAmelCase ) ) else: A_ = NotebookProgressBar(len(UpperCAmelCase ) ) self.prediction_bar.update(1 ) else: self.prediction_bar.update(self.prediction_bar.value + 1 ) def __A ( self : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Dict , **UpperCAmelCase : List[str] ): if self.prediction_bar is not None: self.prediction_bar.close() A_ = None def __A ( self : List[str] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Optional[int]=None , **UpperCAmelCase : Optional[Any] ): # Only for when there is no evaluation if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs: A_ = {"Training Loss": logs["loss"]} # First column is necessarily Step sine we're not in epoch eval strategy A_ = state.global_step self.training_tracker.write_line(UpperCAmelCase ) def __A ( self : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : Dict=None , **UpperCAmelCase : Optional[Any] ): if self.training_tracker is not None: A_ = {"Training Loss": "No log", "Validation Loss": "No log"} for log in reversed(state.log_history ): if "loss" in log: A_ = log["loss"] break if self.first_column == "Epoch": A_ = int(state.epoch ) else: A_ = state.global_step A_ = "eval" for k in metrics: if k.endswith("_loss" ): A_ = re.sub(R"\_loss$" , "" , UpperCAmelCase ) A_ = metrics.pop("total_flos" , UpperCAmelCase ) A_ = metrics.pop("epoch" , UpperCAmelCase ) A_ = metrics.pop(f'''{metric_key_prefix}_runtime''' , UpperCAmelCase ) A_ = metrics.pop(f'''{metric_key_prefix}_samples_per_second''' , UpperCAmelCase ) A_ = metrics.pop(f'''{metric_key_prefix}_steps_per_second''' , UpperCAmelCase ) A_ = metrics.pop(f'''{metric_key_prefix}_jit_compilation_time''' , UpperCAmelCase ) for k, v in metrics.items(): if k == f'''{metric_key_prefix}_loss''': A_ = v else: A_ = k.split("_" ) A_ = " ".join([part.capitalize() for part in splits[1:]] ) A_ = v self.training_tracker.write_line(UpperCAmelCase ) self.training_tracker.remove_child() A_ = None # Evaluation takes a long time so we should force the next update. A_ = True def __A ( self : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , **UpperCAmelCase : Any ): self.training_tracker.update( state.global_step , comment=f'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' , force_update=UpperCAmelCase ) A_ = None
312
__a :Dict = '0.18.2' from .configuration_utils import ConfigMixin from .utils import ( OptionalDependencyNotAvailable, is_flax_available, is_inflect_available, is_invisible_watermark_available, is_k_diffusion_available, is_k_diffusion_version, is_librosa_available, is_note_seq_available, is_onnx_available, is_scipy_available, is_torch_available, is_torchsde_available, is_transformers_available, is_transformers_version, is_unidecode_available, logging, ) try: if not is_onnx_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_onnx_objects import * # noqa F403 else: from .pipelines import OnnxRuntimeModel try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_pt_objects import * # noqa F403 else: from .models import ( AutoencoderKL, ControlNetModel, ModelMixin, PriorTransformer, TaFilmDecoder, TransformeraDModel, UNetaDModel, UNetaDConditionModel, UNetaDModel, UNetaDConditionModel, VQModel, ) from .optimization import ( get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, get_scheduler, ) from .pipelines import ( AudioPipelineOutput, ConsistencyModelPipeline, DanceDiffusionPipeline, DDIMPipeline, DDPMPipeline, DiffusionPipeline, DiTPipeline, ImagePipelineOutput, KarrasVePipeline, LDMPipeline, LDMSuperResolutionPipeline, PNDMPipeline, RePaintPipeline, ScoreSdeVePipeline, ) from .schedulers import ( CMStochasticIterativeScheduler, DDIMInverseScheduler, DDIMParallelScheduler, DDIMScheduler, DDPMParallelScheduler, DDPMScheduler, DEISMultistepScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, HeunDiscreteScheduler, IPNDMScheduler, KarrasVeScheduler, KDPMaAncestralDiscreteScheduler, KDPMaDiscreteScheduler, PNDMScheduler, RePaintScheduler, SchedulerMixin, ScoreSdeVeScheduler, UnCLIPScheduler, UniPCMultistepScheduler, VQDiffusionScheduler, ) from .training_utils import EMAModel try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .schedulers import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .schedulers import DPMSolverSDEScheduler try: if not (is_torch_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipelines import ( AltDiffusionImgaImgPipeline, AltDiffusionPipeline, AudioLDMPipeline, CycleDiffusionPipeline, IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ImageTextPipelineOutput, KandinskyImgaImgPipeline, KandinskyInpaintPipeline, KandinskyPipeline, KandinskyPriorPipeline, KandinskyVaaControlnetImgaImgPipeline, KandinskyVaaControlnetPipeline, KandinskyVaaImgaImgPipeline, KandinskyVaaInpaintPipeline, KandinskyVaaPipeline, KandinskyVaaPriorEmbaEmbPipeline, KandinskyVaaPriorPipeline, LDMTextToImagePipeline, PaintByExamplePipeline, SemanticStableDiffusionPipeline, ShapEImgaImgPipeline, ShapEPipeline, StableDiffusionAttendAndExcitePipeline, StableDiffusionControlNetImgaImgPipeline, StableDiffusionControlNetInpaintPipeline, StableDiffusionControlNetPipeline, StableDiffusionDepthaImgPipeline, StableDiffusionDiffEditPipeline, StableDiffusionImageVariationPipeline, StableDiffusionImgaImgPipeline, StableDiffusionInpaintPipeline, StableDiffusionInpaintPipelineLegacy, StableDiffusionInstructPixaPixPipeline, StableDiffusionLatentUpscalePipeline, StableDiffusionLDMaDPipeline, StableDiffusionModelEditingPipeline, StableDiffusionPanoramaPipeline, StableDiffusionParadigmsPipeline, StableDiffusionPipeline, StableDiffusionPipelineSafe, StableDiffusionPixaPixZeroPipeline, StableDiffusionSAGPipeline, StableDiffusionUpscalePipeline, StableUnCLIPImgaImgPipeline, StableUnCLIPPipeline, TextToVideoSDPipeline, TextToVideoZeroPipeline, UnCLIPImageVariationPipeline, UnCLIPPipeline, UniDiffuserModel, UniDiffuserPipeline, UniDiffuserTextDecoder, VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, VideoToVideoSDPipeline, VQDiffusionPipeline, ) try: if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403 else: from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline try: if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403 else: from .pipelines import StableDiffusionKDiffusionPipeline try: if not (is_torch_available() and is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403 else: from .pipelines import ( OnnxStableDiffusionImgaImgPipeline, OnnxStableDiffusionInpaintPipeline, OnnxStableDiffusionInpaintPipelineLegacy, OnnxStableDiffusionPipeline, OnnxStableDiffusionUpscalePipeline, StableDiffusionOnnxPipeline, ) try: if not (is_torch_available() and is_librosa_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_librosa_objects import * # noqa F403 else: from .pipelines import AudioDiffusionPipeline, Mel try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .pipelines import SpectrogramDiffusionPipeline try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_objects import * # noqa F403 else: from .models.controlnet_flax import FlaxControlNetModel from .models.modeling_flax_utils import FlaxModelMixin from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel from .models.vae_flax import FlaxAutoencoderKL from .pipelines import FlaxDiffusionPipeline from .schedulers import ( FlaxDDIMScheduler, FlaxDDPMScheduler, FlaxDPMSolverMultistepScheduler, FlaxKarrasVeScheduler, FlaxLMSDiscreteScheduler, FlaxPNDMScheduler, FlaxSchedulerMixin, FlaxScoreSdeVeScheduler, ) try: if not (is_flax_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_and_transformers_objects import * # noqa F403 else: from .pipelines import ( FlaxStableDiffusionControlNetPipeline, FlaxStableDiffusionImgaImgPipeline, FlaxStableDiffusionInpaintPipeline, FlaxStableDiffusionPipeline, ) try: if not (is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_note_seq_objects import * # noqa F403 else: from .pipelines import MidiProcessor
312
1
from __future__ import annotations import math __a :Tuple = '2020.9.26' __a :Any = 'xcodz-dot, cclaus, dhruvmanila' def __snake_case ( __UpperCamelCase : float ,__UpperCamelCase : float ,__UpperCamelCase : float ,__UpperCamelCase : float ,__UpperCamelCase : float ): """simple docstring""" if not all(isinstance(__UpperCamelCase ,(float, int) ) for val in locals().values() ): A_ = f'''Input values must either be float or int: {list(locals().values() )}''' raise TypeError(__UpperCamelCase ) A_ = ((x * distance) / (z + distance)) * scale A_ = ((y * distance) / (z + distance)) * scale return projected_x, projected_y def __snake_case ( __UpperCamelCase : float ,__UpperCamelCase : float ,__UpperCamelCase : float ,__UpperCamelCase : str ,__UpperCamelCase : float ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ): raise TypeError("Axis must be a str" ) A_ = locals() del input_variables["axis"] if not all(isinstance(__UpperCamelCase ,(float, int) ) for val in input_variables.values() ): A_ = ( "Input values except axis must either be float or int: " f'''{list(input_variables.values() )}''' ) raise TypeError(__UpperCamelCase ) A_ = (angle % 360) / 450 * 180 / math.pi if axis == "z": A_ = x * math.cos(__UpperCamelCase ) - y * math.sin(__UpperCamelCase ) A_ = y * math.cos(__UpperCamelCase ) + x * math.sin(__UpperCamelCase ) A_ = z elif axis == "x": A_ = y * math.cos(__UpperCamelCase ) - z * math.sin(__UpperCamelCase ) A_ = z * math.cos(__UpperCamelCase ) + y * math.sin(__UpperCamelCase ) A_ = x elif axis == "y": A_ = x * math.cos(__UpperCamelCase ) - z * math.sin(__UpperCamelCase ) A_ = z * math.cos(__UpperCamelCase ) + x * math.sin(__UpperCamelCase ) A_ = y else: raise ValueError("not a valid axis, choose one of 'x', 'y', 'z'" ) return new_x, new_y, new_z if __name__ == "__main__": import doctest doctest.testmod() print(F"{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }") print(F"{rotate(1.0, 2.0, 3.0, 'y', 90.0) = }")
312
def __snake_case ( __UpperCamelCase : int = 1000 ): """simple docstring""" return sum(e for e in range(3 ,__UpperCamelCase ) if e % 3 == 0 or e % 5 == 0 ) if __name__ == "__main__": print(F"{solution() = }")
312
1
import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from torch import nn from ...models.controlnet import ControlNetModel, ControlNetOutput from ...models.modeling_utils import ModelMixin from ...utils import logging __a :Dict = logging.get_logger(__name__) class _a ( snake_case_ ): """simple docstring""" def __init__( self : List[str] , UpperCAmelCase : Union[List[ControlNetModel], Tuple[ControlNetModel]] ): super().__init__() A_ = nn.ModuleList(UpperCAmelCase ) def __A ( self : int , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Union[torch.Tensor, float, int] , UpperCAmelCase : torch.Tensor , UpperCAmelCase : List[torch.tensor] , UpperCAmelCase : List[float] , UpperCAmelCase : Optional[torch.Tensor] = None , UpperCAmelCase : Optional[torch.Tensor] = None , UpperCAmelCase : Optional[torch.Tensor] = None , UpperCAmelCase : Optional[Dict[str, Any]] = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True , ): for i, (image, scale, controlnet) in enumerate(zip(UpperCAmelCase , UpperCAmelCase , self.nets ) ): A_ , A_ = controlnet( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) # merge samples if i == 0: A_ , A_ = down_samples, mid_sample else: A_ = [ samples_prev + samples_curr for samples_prev, samples_curr in zip(UpperCAmelCase , UpperCAmelCase ) ] mid_block_res_sample += mid_sample return down_block_res_samples, mid_block_res_sample def __A ( self : Any , UpperCAmelCase : Union[str, os.PathLike] , UpperCAmelCase : bool = True , UpperCAmelCase : Callable = None , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[str] = None , ): A_ = 0 A_ = save_directory for controlnet in self.nets: controlnet.save_pretrained( UpperCAmelCase , is_main_process=UpperCAmelCase , save_function=UpperCAmelCase , safe_serialization=UpperCAmelCase , variant=UpperCAmelCase , ) idx += 1 A_ = model_path_to_save + f'''_{idx}''' @classmethod def __A ( cls : Union[str, Any] , UpperCAmelCase : Optional[Union[str, os.PathLike]] , **UpperCAmelCase : Any ): A_ = 0 A_ = [] # load controlnet and append to list until no controlnet directory exists anymore # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... A_ = pretrained_model_path while os.path.isdir(UpperCAmelCase ): A_ = ControlNetModel.from_pretrained(UpperCAmelCase , **UpperCAmelCase ) controlnets.append(UpperCAmelCase ) idx += 1 A_ = pretrained_model_path + f'''_{idx}''' logger.info(f'''{len(UpperCAmelCase )} controlnets loaded from {pretrained_model_path}.''' ) if len(UpperCAmelCase ) == 0: raise ValueError( f'''No ControlNets found under {os.path.dirname(UpperCAmelCase )}. Expected at least {pretrained_model_path + "_0"}.''' ) return cls(UpperCAmelCase )
312
import unittest from typing import Tuple import torch from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device from diffusers.utils.testing_utils import require_torch @require_torch class _a : """simple docstring""" @property def __A ( self : Union[str, Any] ): return self.get_dummy_input() @property def __A ( self : int ): if self.block_type == "down": return (4, 32, 16, 16) elif self.block_type == "mid": return (4, 32, 32, 32) elif self.block_type == "up": return (4, 32, 64, 64) raise ValueError(f'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' ) def __A ( self : Union[str, Any] , UpperCAmelCase : List[Any]=True , UpperCAmelCase : str=False , UpperCAmelCase : Tuple=False , UpperCAmelCase : Optional[Any]=False , ): A_ = 4 A_ = 32 A_ = (32, 32) A_ = torch.manual_seed(0 ) A_ = torch.device(UpperCAmelCase ) A_ = (batch_size, num_channels) + sizes A_ = randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=UpperCAmelCase ) A_ = {"hidden_states": hidden_states} if include_temb: A_ = 128 A_ = randn_tensor((batch_size, temb_channels) , generator=UpperCAmelCase , device=UpperCAmelCase ) if include_res_hidden_states_tuple: A_ = torch.manual_seed(1 ) A_ = (randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=UpperCAmelCase ),) if include_encoder_hidden_states: A_ = floats_tensor((batch_size, 32, 32) ).to(UpperCAmelCase ) if include_skip_sample: A_ = randn_tensor(((batch_size, 3) + sizes) , generator=UpperCAmelCase , device=UpperCAmelCase ) return dummy_input def __A ( self : Optional[int] ): A_ = { "in_channels": 32, "out_channels": 32, "temb_channels": 128, } if self.block_type == "up": A_ = 32 if self.block_type == "mid": init_dict.pop("out_channels" ) A_ = self.dummy_input return init_dict, inputs_dict def __A ( self : List[str] , UpperCAmelCase : Optional[Any] ): A_ , A_ = self.prepare_init_args_and_inputs_for_common() A_ = self.block_class(**UpperCAmelCase ) unet_block.to(UpperCAmelCase ) unet_block.eval() with torch.no_grad(): A_ = unet_block(**UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ): A_ = output[0] self.assertEqual(output.shape , self.output_shape ) A_ = output[0, -1, -3:, -3:] A_ = torch.tensor(UpperCAmelCase ).to(UpperCAmelCase ) assert torch_all_close(output_slice.flatten() , UpperCAmelCase , atol=5E-3 ) @unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" ) def __A ( self : Union[str, Any] ): A_ , A_ = self.prepare_init_args_and_inputs_for_common() A_ = self.block_class(**UpperCAmelCase ) model.to(UpperCAmelCase ) model.train() A_ = model(**UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ): A_ = output[0] A_ = torch.device(UpperCAmelCase ) A_ = randn_tensor(output.shape , device=UpperCAmelCase ) A_ = torch.nn.functional.mse_loss(UpperCAmelCase , UpperCAmelCase ) loss.backward()
312
1
import argparse import json from typing import List from ltp import LTP from transformers import BertTokenizer def __snake_case ( __UpperCamelCase : Optional[Any] ): """simple docstring""" if ( (cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F) or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) # or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) # or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) # or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) # or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) # or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F) or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) # ): # return True return False def __snake_case ( __UpperCamelCase : str ): """simple docstring""" for char in word: A_ = ord(__UpperCamelCase ) if not _is_chinese_char(__UpperCamelCase ): return 0 return 1 def __snake_case ( __UpperCamelCase : List[str] ): """simple docstring""" A_ = set() for token in tokens: A_ = len(__UpperCamelCase ) > 1 and is_chinese(__UpperCamelCase ) if chinese_word: word_set.add(__UpperCamelCase ) A_ = list(__UpperCamelCase ) return word_list def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : set() ): """simple docstring""" if not chinese_word_set: return bert_tokens A_ = max([len(__UpperCamelCase ) for w in chinese_word_set] ) A_ = bert_tokens A_ , A_ = 0, len(__UpperCamelCase ) while start < end: A_ = True if is_chinese(bert_word[start] ): A_ = min(end - start ,__UpperCamelCase ) for i in range(__UpperCamelCase ,1 ,-1 ): A_ = "".join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 ,start + i ): A_ = "##" + bert_word[j] A_ = start + i A_ = False break if single_word: start += 1 return bert_word def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : LTP ,__UpperCamelCase : BertTokenizer ): """simple docstring""" A_ = [] for i in range(0 ,len(__UpperCamelCase ) ,100 ): A_ = ltp_tokenizer.seg(lines[i : i + 100] )[0] A_ = [get_chinese_word(__UpperCamelCase ) for r in res] ltp_res.extend(__UpperCamelCase ) assert len(__UpperCamelCase ) == len(__UpperCamelCase ) A_ = [] for i in range(0 ,len(__UpperCamelCase ) ,100 ): A_ = bert_tokenizer(lines[i : i + 100] ,add_special_tokens=__UpperCamelCase ,truncation=__UpperCamelCase ,max_length=512 ) bert_res.extend(res["input_ids"] ) assert len(__UpperCamelCase ) == len(__UpperCamelCase ) A_ = [] for input_ids, chinese_word in zip(__UpperCamelCase ,__UpperCamelCase ): A_ = [] for id in input_ids: A_ = bert_tokenizer._convert_id_to_token(__UpperCamelCase ) input_tokens.append(__UpperCamelCase ) A_ = add_sub_symbol(__UpperCamelCase ,__UpperCamelCase ) A_ = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(__UpperCamelCase ): if token[:2] == "##": A_ = token[2:] # save chinese tokens' pos if len(__UpperCamelCase ) == 1 and _is_chinese_char(ord(__UpperCamelCase ) ): ref_id.append(__UpperCamelCase ) ref_ids.append(__UpperCamelCase ) assert len(__UpperCamelCase ) == len(__UpperCamelCase ) return ref_ids def __snake_case ( __UpperCamelCase : int ): """simple docstring""" with open(args.file_name ,"r" ,encoding="utf-8" ) as f: A_ = f.readlines() A_ = [line.strip() for line in data if len(__UpperCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' A_ = LTP(args.ltp ) # faster in GPU device A_ = BertTokenizer.from_pretrained(args.bert ) A_ = prepare_ref(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) with open(args.save_path ,"w" ,encoding="utf-8" ) as f: A_ = [json.dumps(__UpperCamelCase ) + "\n" for ref in ref_ids] f.writelines(__UpperCamelCase ) if __name__ == "__main__": __a :List[str] = argparse.ArgumentParser(description='prepare_chinese_ref') parser.add_argument( '--file_name', type=str, default='./resources/chinese-demo.txt', help='file need process, same as training data in lm', ) parser.add_argument( '--ltp', type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path' ) parser.add_argument('--bert', type=str, default='./resources/robert', help='resources for Bert tokenizer') parser.add_argument('--save_path', type=str, default='./resources/ref.txt', help='path to save res') __a :Optional[int] = parser.parse_args() main(args)
312
import copy import fnmatch import json import os import pickle as pkl import shutil import sys import tarfile import tempfile from collections import OrderedDict from contextlib import contextmanager from functools import partial from hashlib import shaaaa from io import BytesIO from pathlib import Path from urllib.parse import urlparse from zipfile import ZipFile, is_zipfile import cva import numpy as np import requests import wget from filelock import FileLock from PIL import Image from tqdm.auto import tqdm from yaml import Loader, dump, load try: import torch __a :int = True except ImportError: __a :Optional[Any] = False try: from torch.hub import _get_torch_home __a :Optional[Any] = _get_torch_home() except ImportError: __a :Tuple = os.path.expanduser( os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch')) ) __a :Optional[Any] = os.path.join(torch_cache_home, 'transformers') __a :int = 'https://cdn.huggingface.co' __a :Any = 'https://s3.amazonaws.com/models.huggingface.co/bert' __a :Optional[Any] = '/'.join(str(Path(__file__).resolve()).split('/')[:-1]) __a :str = os.path.join(PATH, 'config.yaml') __a :str = os.path.join(PATH, 'attributes.txt') __a :Optional[Any] = os.path.join(PATH, 'objects.txt') __a :Optional[int] = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path) __a :Dict = os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE) __a :List[Any] = os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE) __a :List[str] = 'pytorch_model.bin' __a :Tuple = 'config.yaml' def __snake_case ( __UpperCamelCase : Optional[Any]=OBJECTS ,__UpperCamelCase : List[str]=ATTRIBUTES ): """simple docstring""" A_ = [] with open(__UpperCamelCase ) as f: for object in f.readlines(): vg_classes.append(object.split("," )[0].lower().strip() ) A_ = [] with open(__UpperCamelCase ) as f: for object in f.readlines(): vg_attrs.append(object.split("," )[0].lower().strip() ) return vg_classes, vg_attrs def __snake_case ( __UpperCamelCase : List[Any] ): """simple docstring""" A_ = OrderedDict() with open(__UpperCamelCase ,"rb" ) as f: A_ = pkl.load(__UpperCamelCase )["model"] for k in copy.deepcopy(list(ckp.keys() ) ): A_ = ckp.pop(__UpperCamelCase ) if isinstance(__UpperCamelCase ,np.ndarray ): A_ = torch.tensor(__UpperCamelCase ) else: assert isinstance(__UpperCamelCase ,torch.tensor ), type(__UpperCamelCase ) A_ = v return r class _a : """simple docstring""" _lowerCamelCase : Union[str, Any] = {} def __init__( self : str , UpperCAmelCase : dict , UpperCAmelCase : str = "root" , UpperCAmelCase : List[str]=0 ): A_ = name A_ = level A_ = {} for k, v in dictionary.items(): if v is None: raise ValueError() A_ = copy.deepcopy(UpperCAmelCase ) A_ = copy.deepcopy(UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ): A_ = Config(UpperCAmelCase , name=UpperCAmelCase , level=level + 1 ) A_ = v setattr(self , UpperCAmelCase , UpperCAmelCase ) A_ = d def __repr__( self : Optional[Any] ): return str(list((self._pointer.keys()) ) ) def __setattr__( self : Any , UpperCAmelCase : Any , UpperCAmelCase : Any ): A_ = val A_ = val A_ = key.split("." ) A_ = len(UpperCAmelCase ) - 1 A_ = self._pointer if len(UpperCAmelCase ) > 1: for i, l in enumerate(UpperCAmelCase ): if hasattr(self , UpperCAmelCase ) and isinstance(getattr(self , UpperCAmelCase ) , UpperCAmelCase ): setattr(getattr(self , UpperCAmelCase ) , ".".join(levels[i:] ) , UpperCAmelCase ) if l == last_level: A_ = val else: A_ = pointer[l] def __A ( self : List[str] ): return self._pointer def __A ( self : int , UpperCAmelCase : Tuple , UpperCAmelCase : int ): with open(f'''{file_name}''' , "w" ) as stream: dump(UpperCAmelCase , UpperCAmelCase ) def __A ( self : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Tuple ): with open(f'''{file_name}''' , "w" ) as stream: json.dump(UpperCAmelCase , UpperCAmelCase ) @staticmethod def __A ( UpperCAmelCase : Optional[int] ): with open(UpperCAmelCase ) as stream: A_ = load(UpperCAmelCase , Loader=UpperCAmelCase ) return data def __str__( self : str ): A_ = " " if self._name != "root": A_ = f'''{t * (self._level-1)}{self._name}:\n''' else: A_ = "" A_ = self._level for i, (k, v) in enumerate(self._pointer.items() ): if isinstance(UpperCAmelCase , UpperCAmelCase ): r += f'''{t * (self._level)}{v}\n''' self._level += 1 else: r += f'''{t * (self._level)}{k}: {v} ({type(UpperCAmelCase ).__name__})\n''' A_ = level return r[:-1] @classmethod def __A ( cls : Optional[Any] , UpperCAmelCase : str , **UpperCAmelCase : str ): A_ , A_ = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase ) return cls(UpperCAmelCase ) @classmethod def __A ( cls : int , UpperCAmelCase : str , **UpperCAmelCase : int ): A_ = kwargs.pop("cache_dir" , UpperCAmelCase ) A_ = kwargs.pop("force_download" , UpperCAmelCase ) A_ = kwargs.pop("resume_download" , UpperCAmelCase ) A_ = kwargs.pop("proxies" , UpperCAmelCase ) A_ = kwargs.pop("local_files_only" , UpperCAmelCase ) if os.path.isdir(UpperCAmelCase ): A_ = os.path.join(UpperCAmelCase , UpperCAmelCase ) elif os.path.isfile(UpperCAmelCase ) or is_remote_url(UpperCAmelCase ): A_ = pretrained_model_name_or_path else: A_ = hf_bucket_url(UpperCAmelCase , filename=UpperCAmelCase , use_cdn=UpperCAmelCase ) try: # Load from URL or cache if already cached A_ = cached_path( UpperCAmelCase , cache_dir=UpperCAmelCase , force_download=UpperCAmelCase , proxies=UpperCAmelCase , resume_download=UpperCAmelCase , local_files_only=UpperCAmelCase , ) # Load config dict if resolved_config_file is None: raise EnvironmentError A_ = Config.load_yaml(UpperCAmelCase ) except EnvironmentError: A_ = "Can't load config for" raise EnvironmentError(UpperCAmelCase ) if resolved_config_file == config_file: print("loading configuration file from path" ) else: print("loading configuration file cache" ) return Config.load_yaml(UpperCAmelCase ), kwargs def __snake_case ( __UpperCamelCase : Union[str, Any] ): """simple docstring""" A_ = torch.load("dump.pt" ,map_location=in_tensor.device ) A_ = in_tensor.numpy() A_ = out_tensor.numpy()[0] print(na.shape ,na[0, 0, :5] ) print(na.shape ,na[0, 0, :5] ) assert np.allclose(__UpperCamelCase ,__UpperCamelCase ,rtol=0.01 ,atol=0.1 ), ( f'''{sum([1 for x in np.isclose(__UpperCamelCase ,__UpperCamelCase ,rtol=0.01 ,atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %''' " element-wise mismatch" ) raise Exception("tensors are all good" ) # Hugging face functions below def __snake_case ( __UpperCamelCase : Optional[int] ): """simple docstring""" A_ = urlparse(__UpperCamelCase ) return parsed.scheme in ("http", "https") def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ,__UpperCamelCase : str=True ): """simple docstring""" A_ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX A_ = "/" not in model_id if legacy_format: return f'''{endpoint}/{model_id}-{filename}''' else: return f'''{endpoint}/{model_id}/{filename}''' def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str]=None ,__UpperCamelCase : int=0 ,__UpperCamelCase : int=None ,): """simple docstring""" A_ = "python/{}".format(sys.version.split()[0] ) if _torch_available: ua += "; torch/{}".format(torch.__version__ ) if isinstance(__UpperCamelCase ,__UpperCamelCase ): ua += "; " + "; ".join("{}/{}".format(__UpperCamelCase ,__UpperCamelCase ) for k, v in user_agent.items() ) elif isinstance(__UpperCamelCase ,__UpperCamelCase ): ua += "; " + user_agent A_ = {"user-agent": ua} if resume_size > 0: A_ = "bytes=%d-" % (resume_size,) A_ = requests.get(__UpperCamelCase ,stream=__UpperCamelCase ,proxies=__UpperCamelCase ,headers=__UpperCamelCase ) if response.status_code == 416: # Range not satisfiable return A_ = response.headers.get("Content-Length" ) A_ = resume_size + int(__UpperCamelCase ) if content_length is not None else None A_ = tqdm( unit="B" ,unit_scale=__UpperCamelCase ,total=__UpperCamelCase ,initial=__UpperCamelCase ,desc="Downloading" ,) for chunk in response.iter_content(chunk_size=1024 ): if chunk: # filter out keep-alive new chunks progress.update(len(__UpperCamelCase ) ) temp_file.write(__UpperCamelCase ) progress.close() def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Any=None ,__UpperCamelCase : Dict=False ,__UpperCamelCase : Union[str, Any]=None ,__UpperCamelCase : Any=10 ,__UpperCamelCase : int=False ,__UpperCamelCase : Optional[Any]=None ,__UpperCamelCase : str=False ,): """simple docstring""" if cache_dir is None: A_ = TRANSFORMERS_CACHE if isinstance(__UpperCamelCase ,__UpperCamelCase ): A_ = str(__UpperCamelCase ) os.makedirs(__UpperCamelCase ,exist_ok=__UpperCamelCase ) A_ = None if not local_files_only: try: A_ = requests.head(__UpperCamelCase ,allow_redirects=__UpperCamelCase ,proxies=__UpperCamelCase ,timeout=__UpperCamelCase ) if response.status_code == 200: A_ = response.headers.get("ETag" ) except (EnvironmentError, requests.exceptions.Timeout): # etag is already None pass A_ = url_to_filename(__UpperCamelCase ,__UpperCamelCase ) # get cache path to put the file A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase ) # etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible. # try to get the last downloaded one if etag is None: if os.path.exists(__UpperCamelCase ): return cache_path else: A_ = [ file for file in fnmatch.filter(os.listdir(__UpperCamelCase ) ,filename + ".*" ) if not file.endswith(".json" ) and not file.endswith(".lock" ) ] if len(__UpperCamelCase ) > 0: return os.path.join(__UpperCamelCase ,matching_files[-1] ) else: # If files cannot be found and local_files_only=True, # the models might've been found if local_files_only=False # Notify the user about that if local_files_only: raise ValueError( "Cannot find the requested files in the cached path and outgoing traffic has been" " disabled. To enable model look-ups and downloads online, set 'local_files_only'" " to False." ) return None # From now on, etag is not None. if os.path.exists(__UpperCamelCase ) and not force_download: return cache_path # Prevent parallel downloads of the same file with a lock. A_ = cache_path + ".lock" with FileLock(__UpperCamelCase ): # If the download just completed while the lock was activated. if os.path.exists(__UpperCamelCase ) and not force_download: # Even if returning early like here, the lock will be released. return cache_path if resume_download: A_ = cache_path + ".incomplete" @contextmanager def _resumable_file_manager(): with open(__UpperCamelCase ,"a+b" ) as f: yield f A_ = _resumable_file_manager if os.path.exists(__UpperCamelCase ): A_ = os.stat(__UpperCamelCase ).st_size else: A_ = 0 else: A_ = partial(tempfile.NamedTemporaryFile ,dir=__UpperCamelCase ,delete=__UpperCamelCase ) A_ = 0 # Download to temporary file, then copy to cache dir once finished. # Otherwise you get corrupt cache entries if the download gets interrupted. with temp_file_manager() as temp_file: print( "%s not found in cache or force_download set to True, downloading to %s" ,__UpperCamelCase ,temp_file.name ,) http_get( __UpperCamelCase ,__UpperCamelCase ,proxies=__UpperCamelCase ,resume_size=__UpperCamelCase ,user_agent=__UpperCamelCase ,) os.replace(temp_file.name ,__UpperCamelCase ) A_ = {"url": url, "etag": etag} A_ = cache_path + ".json" with open(__UpperCamelCase ,"w" ) as meta_file: json.dump(__UpperCamelCase ,__UpperCamelCase ) return cache_path def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : str=None ): """simple docstring""" A_ = url.encode("utf-8" ) A_ = shaaaa(__UpperCamelCase ) A_ = url_hash.hexdigest() if etag: A_ = etag.encode("utf-8" ) A_ = shaaaa(__UpperCamelCase ) filename += "." + etag_hash.hexdigest() if url.endswith(".h5" ): filename += ".h5" return filename def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Union[str, Any]=None ,__UpperCamelCase : List[Any]=False ,__UpperCamelCase : List[str]=None ,__UpperCamelCase : Any=False ,__UpperCamelCase : Optional[int]=None ,__UpperCamelCase : Optional[Any]=False ,__UpperCamelCase : Dict=False ,__UpperCamelCase : Optional[Any]=False ,): """simple docstring""" if cache_dir is None: A_ = TRANSFORMERS_CACHE if isinstance(__UpperCamelCase ,__UpperCamelCase ): A_ = str(__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ): A_ = str(__UpperCamelCase ) if is_remote_url(__UpperCamelCase ): # URL, so get it from the cache (downloading if necessary) A_ = get_from_cache( __UpperCamelCase ,cache_dir=__UpperCamelCase ,force_download=__UpperCamelCase ,proxies=__UpperCamelCase ,resume_download=__UpperCamelCase ,user_agent=__UpperCamelCase ,local_files_only=__UpperCamelCase ,) elif os.path.exists(__UpperCamelCase ): # File, and it exists. A_ = url_or_filename elif urlparse(__UpperCamelCase ).scheme == "": # File, but it doesn't exist. raise EnvironmentError("file {} not found".format(__UpperCamelCase ) ) else: # Something unknown raise ValueError("unable to parse {} as a URL or as a local path".format(__UpperCamelCase ) ) if extract_compressed_file: if not is_zipfile(__UpperCamelCase ) and not tarfile.is_tarfile(__UpperCamelCase ): return output_path # Path where we extract compressed archives # We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/" A_ , A_ = os.path.split(__UpperCamelCase ) A_ = output_file.replace("." ,"-" ) + "-extracted" A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase ) if os.path.isdir(__UpperCamelCase ) and os.listdir(__UpperCamelCase ) and not force_extract: return output_path_extracted # Prevent parallel extractions A_ = output_path + ".lock" with FileLock(__UpperCamelCase ): shutil.rmtree(__UpperCamelCase ,ignore_errors=__UpperCamelCase ) os.makedirs(__UpperCamelCase ) if is_zipfile(__UpperCamelCase ): with ZipFile(__UpperCamelCase ,"r" ) as zip_file: zip_file.extractall(__UpperCamelCase ) zip_file.close() elif tarfile.is_tarfile(__UpperCamelCase ): A_ = tarfile.open(__UpperCamelCase ) tar_file.extractall(__UpperCamelCase ) tar_file.close() else: raise EnvironmentError("Archive format of {} could not be identified".format(__UpperCamelCase ) ) return output_path_extracted return output_path def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Any="," ): """simple docstring""" assert isinstance(__UpperCamelCase ,__UpperCamelCase ) if os.path.isfile(__UpperCamelCase ): with open(__UpperCamelCase ) as f: A_ = eval(f.read() ) else: A_ = requests.get(__UpperCamelCase ) try: A_ = requests.json() except Exception: A_ = req.content.decode() assert data is not None, "could not connect" try: A_ = eval(__UpperCamelCase ) except Exception: A_ = data.split("\n" ) req.close() return data def __snake_case ( __UpperCamelCase : int ): """simple docstring""" A_ = requests.get(__UpperCamelCase ) A_ = np.array(Image.open(BytesIO(response.content ) ) ) return img def __snake_case ( __UpperCamelCase : Tuple ): """simple docstring""" A_ = url.split("/" )[-1] if fn not in os.listdir(os.getcwd() ): wget.download(__UpperCamelCase ) with open(__UpperCamelCase ,"rb" ) as stream: A_ = pkl.load(__UpperCamelCase ) A_ = weights.pop("model" ) A_ = {} for k, v in model.items(): A_ = torch.from_numpy(__UpperCamelCase ) if "running_var" in k: A_ = torch.tensor([0] ) A_ = k.replace("running_var" ,"num_batches_tracked" ) A_ = zero return new def __snake_case ( ): """simple docstring""" print(f'''{os.path.abspath(os.path.join(__UpperCamelCase ,os.pardir ) )}/demo.ipynb''' ) def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Optional[int]="RGB" ): """simple docstring""" assert isinstance(__UpperCamelCase ,__UpperCamelCase ) if os.path.isfile(__UpperCamelCase ): A_ = cva.imread(__UpperCamelCase ) else: A_ = get_image_from_url(__UpperCamelCase ) assert img is not None, f'''could not connect to: {im}''' A_ = cva.cvtColor(__UpperCamelCase ,cva.COLOR_BGR2RGB ) if input_format == "RGB": A_ = img[:, :, ::-1] return img def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : List[str]=1 ): """simple docstring""" return (images[i : i + batch] for i in range(0 ,len(__UpperCamelCase ) ,__UpperCamelCase ))
312
1
import random import unittest import torch from diffusers import IFInpaintingPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class _a ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" _lowerCamelCase : List[str] = IFInpaintingPipeline _lowerCamelCase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'} _lowerCamelCase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS _lowerCamelCase : str = PipelineTesterMixin.required_optional_params - {'latents'} def __A ( self : int ): return self._get_dummy_components() def __A ( self : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any]=0 ): if str(UpperCAmelCase ).startswith("mps" ): A_ = torch.manual_seed(UpperCAmelCase ) else: A_ = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase ) A_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase ) A_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase ) A_ = { "prompt": "A painting of a squirrel eating a burger", "image": image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def __A ( self : Optional[int] ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def __A ( self : Optional[int] ): self._test_save_load_optional_components() @unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" ) def __A ( self : Tuple ): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def __A ( self : Any ): self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def __A ( self : List[Any] ): self._test_save_load_local() def __A ( self : str ): self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
312
from __future__ import annotations def __snake_case ( __UpperCamelCase : list[list[int]] ): """simple docstring""" for i in range(1 ,len(matrix[0] ) ): matrix[0][i] += matrix[0][i - 1] # preprocessing the first column for i in range(1 ,len(__UpperCamelCase ) ): matrix[i][0] += matrix[i - 1][0] # updating the path cost for current position for i in range(1 ,len(__UpperCamelCase ) ): for j in range(1 ,len(matrix[0] ) ): matrix[i][j] += min(matrix[i - 1][j] ,matrix[i][j - 1] ) return matrix[-1][-1] if __name__ == "__main__": import doctest doctest.testmod()
312
1
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy as np import tensorflow as tf from transformers import ( TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST, FlaubertConfig, TFFlaubertForMultipleChoice, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForSequenceClassification, TFFlaubertForTokenClassification, TFFlaubertModel, TFFlaubertWithLMHeadModel, ) class _a : """simple docstring""" def __init__( self : Union[str, Any] , UpperCAmelCase : Tuple , ): A_ = parent A_ = 13 A_ = 7 A_ = True A_ = True A_ = True A_ = True A_ = True A_ = False A_ = False A_ = False A_ = 2 A_ = 99 A_ = 0 A_ = 32 A_ = 2 A_ = 4 A_ = 0.1 A_ = 0.1 A_ = 512 A_ = 16 A_ = 2 A_ = 0.02 A_ = 3 A_ = 4 A_ = "last" A_ = True A_ = None A_ = 0 def __A ( self : Tuple ): A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A_ = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa ) A_ = None if self.use_input_lengths: A_ = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length A_ = None if self.use_token_type_ids: A_ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) A_ = None A_ = None A_ = None if self.use_labels: A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A_ = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa ) A_ = ids_tensor([self.batch_size] , self.num_choices ) A_ = FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , ) return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def __A ( self : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Tuple , UpperCAmelCase : Any , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : Dict , ): A_ = TFFlaubertModel(config=UpperCAmelCase ) A_ = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids} A_ = model(UpperCAmelCase ) A_ = [input_ids, input_mask] A_ = model(UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __A ( self : int , UpperCAmelCase : str , UpperCAmelCase : Dict , UpperCAmelCase : str , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : int , ): A_ = TFFlaubertWithLMHeadModel(UpperCAmelCase ) A_ = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids} A_ = model(UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __A ( self : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple , ): A_ = TFFlaubertForQuestionAnsweringSimple(UpperCAmelCase ) A_ = {"input_ids": input_ids, "lengths": input_lengths} A_ = model(UpperCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __A ( self : List[str] , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : str , UpperCAmelCase : Tuple , ): A_ = TFFlaubertForSequenceClassification(UpperCAmelCase ) A_ = {"input_ids": input_ids, "lengths": input_lengths} A_ = model(UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __A ( self : Any , UpperCAmelCase : Dict , UpperCAmelCase : Any , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple , ): A_ = self.num_labels A_ = TFFlaubertForTokenClassification(config=UpperCAmelCase ) A_ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} A_ = model(UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __A ( self : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : str , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , ): A_ = self.num_choices A_ = TFFlaubertForMultipleChoice(config=UpperCAmelCase ) A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) A_ = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } A_ = model(UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __A ( self : Optional[int] ): A_ = self.prepare_config_and_inputs() ( ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ) = config_and_inputs A_ = { "input_ids": input_ids, "token_type_ids": token_type_ids, "langs": token_type_ids, "lengths": input_lengths, } return config, inputs_dict @require_tf class _a ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" _lowerCamelCase : int = ( ( TFFlaubertModel, TFFlaubertWithLMHeadModel, TFFlaubertForSequenceClassification, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForTokenClassification, TFFlaubertForMultipleChoice, ) if is_tf_available() else () ) _lowerCamelCase : int = ( (TFFlaubertWithLMHeadModel,) if is_tf_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable _lowerCamelCase : int = ( { 'feature-extraction': TFFlaubertModel, 'fill-mask': TFFlaubertWithLMHeadModel, 'question-answering': TFFlaubertForQuestionAnsweringSimple, 'text-classification': TFFlaubertForSequenceClassification, 'token-classification': TFFlaubertForTokenClassification, 'zero-shot': TFFlaubertForSequenceClassification, } if is_tf_available() else {} ) _lowerCamelCase : List[Any] = False _lowerCamelCase : Dict = False def __A ( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : int , UpperCAmelCase : Tuple ): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def __A ( self : Union[str, Any] ): A_ = TFFlaubertModelTester(self ) A_ = ConfigTester(self , config_class=UpperCAmelCase , emb_dim=37 ) def __A ( self : List[str] ): self.config_tester.run_common_tests() def __A ( self : int ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*UpperCAmelCase ) def __A ( self : Dict ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*UpperCAmelCase ) def __A ( self : Dict ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*UpperCAmelCase ) def __A ( self : Optional[Any] ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*UpperCAmelCase ) def __A ( self : Tuple ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_token_classification(*UpperCAmelCase ) def __A ( self : List[Any] ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_multiple_choice(*UpperCAmelCase ) @slow def __A ( self : str ): for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ = TFFlaubertModel.from_pretrained(UpperCAmelCase ) self.assertIsNotNone(UpperCAmelCase ) @require_tf @require_sentencepiece @require_tokenizers class _a ( unittest.TestCase ): """simple docstring""" @slow def __A ( self : Any ): A_ = TFFlaubertModel.from_pretrained("jplu/tf-flaubert-small-cased" ) A_ = tf.convert_to_tensor( [[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !" A_ = model(UpperCAmelCase )[0] A_ = tf.TensorShape((1, 8, 512) ) self.assertEqual(output.shape , UpperCAmelCase ) # compare the actual values for a slice. A_ = tf.convert_to_tensor( [ [ [-1.8_768_773, -1.566_555, 0.27_072_418], [-1.6_920_038, -0.5_873_505, 1.9_329_599], [-2.9_563_985, -1.6_993_835, 1.7_972_052], ] ] , dtype=tf.floataa , ) self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
312
from typing import Dict from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, get_torch_dist_unique_port, require_torch_multi_gpu, require_torch_neuroncore, ) from transformers.training_args import ParallelMode from transformers.utils import logging __a :int = logging.get_logger(__name__) if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset from transformers import Trainer class _a ( snake_case_ ): """simple docstring""" def __init__( self : Tuple , UpperCAmelCase : int = 101 ): A_ = length def __len__( self : int ): return self.length def __getitem__( self : Optional[int] , UpperCAmelCase : Optional[int] ): return i class _a : """simple docstring""" def __call__( self : Any , UpperCAmelCase : Optional[Any] ): return {"input_ids": torch.tensor(UpperCAmelCase ), "labels": torch.tensor(UpperCAmelCase )} class _a ( nn.Module ): """simple docstring""" def __init__( self : int ): super().__init__() # Add some (unused) params otherwise DDP will complain. A_ = nn.Linear(120 , 80 ) def __A ( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : Tuple=None ): if labels is not None: return torch.tensor(0.0 , device=input_ids.device ), input_ids else: return input_ids class _a ( snake_case_ ): """simple docstring""" @require_torch_neuroncore def __A ( self : List[str] ): A_ = f'''--nproc_per_node=2 --master_port={get_torch_dist_unique_port()} {self.test_file_dir}/test_trainer_distributed.py '''.split() A_ = self.get_auto_remove_tmp_dir() A_ = f'''--output_dir {output_dir}'''.split() A_ = ["torchrun"] + distributed_args + args execute_subprocess_async(UpperCAmelCase , env=self.get_env() ) # successful return here == success - any errors would have caused an error in the sub-call class _a ( snake_case_ ): """simple docstring""" @require_torch_multi_gpu def __A ( self : List[str] ): A_ = f'''--nproc_per_node={torch.cuda.device_count()} --master_port={get_torch_dist_unique_port()} {self.test_file_dir}/test_trainer_distributed.py '''.split() A_ = self.get_auto_remove_tmp_dir() A_ = f'''--output_dir {output_dir}'''.split() A_ = ["torchrun"] + distributed_args + args execute_subprocess_async(UpperCAmelCase , env=self.get_env() ) # successful return here == success - any errors would have caused an error in the sub-call if __name__ == "__main__": # The script below is meant to be run under torch.distributed, on a machine with multiple GPUs: # # PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py __a :Union[str, Any] = HfArgumentParser((TrainingArguments,)) __a :Tuple = parser.parse_args_into_dataclasses()[0] logger.warning( F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " F"distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}" ) # Essentially, what we want to verify in the distributed case is that we get all samples back, # in the right order. (this is crucial for prediction for instance) for dataset_length in [101, 40, 7]: __a :int = DummyDataset(dataset_length) def __snake_case ( __UpperCamelCase : EvalPrediction ): """simple docstring""" A_ = list(range(len(__UpperCamelCase ) ) ) A_ = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential if not success and training_args.local_rank == 0: logger.warning( "Predictions and/or labels do not match expected results:\n - predictions: " f'''{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}''' ) return {"success": success} __a :str = Trainer( model=DummyModel(), args=training_args, data_collator=DummyDataCollator(), eval_dataset=dataset, compute_metrics=compute_metrics, ) __a :str = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) __a :str = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) __a :Optional[int] = 2 __a :List[Any] = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) __a :str = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) __a :Union[str, Any] = None
312
1
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() __a :List[str] = logging.get_logger(__name__) __a :List[Any] = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.linear_k': 'encoder.layers.*.self_attn.linear_k', 'self_attn.linear_v': 'encoder.layers.*.self_attn.linear_v', 'self_attn.linear_q': 'encoder.layers.*.self_attn.linear_q', 'self_attn.pos_bias_u': 'encoder.layers.*.self_attn.pos_bias_u', 'self_attn.pos_bias_v': 'encoder.layers.*.self_attn.pos_bias_v', 'self_attn.linear_out': 'encoder.layers.*.self_attn.linear_out', 'self_attn.linear_pos': 'encoder.layers.*.self_attn.linear_pos', 'self_attn.rotary_emb': 'encoder.embed_positions', 'self_attn_layer_norm': 'encoder.layers.*.self_attn_layer_norm', 'conv_module.pointwise_conv1': 'encoder.layers.*.conv_module.pointwise_conv1', 'conv_module.pointwise_conv2': 'encoder.layers.*.conv_module.pointwise_conv2', 'conv_module.depthwise_conv': 'encoder.layers.*.conv_module.depthwise_conv', 'conv_module.batch_norm': 'encoder.layers.*.conv_module.batch_norm', 'conv_module.layer_norm': 'encoder.layers.*.conv_module.layer_norm', 'ffn1.w_1': 'encoder.layers.*.ffn1.intermediate_dense', 'ffn1.w_2': 'encoder.layers.*.ffn1.output_dense', 'ffn1.layer_norm': 'encoder.layers.*.ffn1_layer_norm', 'ffn2.w_1': 'encoder.layers.*.ffn2.intermediate_dense', 'ffn2.w_2': 'encoder.layers.*.ffn2.output_dense', 'ffn2.layer_norm': 'encoder.layers.*.ffn2_layer_norm', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', } __a :Any = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', ] def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : List[Any] ,__UpperCamelCase : Tuple ,__UpperCamelCase : int ,__UpperCamelCase : List[str] ): """simple docstring""" for attribute in key.split("." ): A_ = getattr(__UpperCamelCase ,__UpperCamelCase ) if weight_type is not None: A_ = getattr(__UpperCamelCase ,__UpperCamelCase ).shape else: A_ = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": A_ = value elif weight_type == "weight_g": A_ = value elif weight_type == "weight_v": A_ = value elif weight_type == "bias": A_ = value elif weight_type == "running_mean": A_ = value elif weight_type == "running_var": A_ = value elif weight_type == "num_batches_tracked": A_ = value elif weight_type == "inv_freq": A_ = value else: A_ = value logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : str ,__UpperCamelCase : List[str] ): """simple docstring""" A_ = [] A_ = fairseq_model.state_dict() A_ = hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): A_ = False if "conv_layers" in name: load_conv_layer( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,hf_model.config.feat_extract_norm == "group" ,) A_ = True else: for key, mapped_key in MAPPING.items(): A_ = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: A_ = True if "*" in mapped_key: A_ = name.split(__UpperCamelCase )[0].split("." )[-2] A_ = mapped_key.replace("*" ,__UpperCamelCase ) if "pos_bias_u" in name: A_ = None elif "pos_bias_v" in name: A_ = None elif "weight_g" in name: A_ = "weight_g" elif "weight_v" in name: A_ = "weight_v" elif "bias" in name: A_ = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj A_ = "weight" elif "running_mean" in name: A_ = "running_mean" elif "inv_freq" in name: A_ = "inv_freq" elif "running_var" in name: A_ = "running_var" elif "num_batches_tracked" in name: A_ = "num_batches_tracked" else: A_ = None set_recursively(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) continue if not is_used: unused_weights.append(__UpperCamelCase ) logger.warning(f'''Unused weights: {unused_weights}''' ) def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : int ,__UpperCamelCase : str ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : str ): """simple docstring""" A_ = full_name.split("conv_layers." )[-1] A_ = name.split("." ) A_ = int(items[0] ) A_ = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) A_ = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) A_ = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) A_ = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) A_ = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(__UpperCamelCase ) @torch.no_grad() def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Dict ,__UpperCamelCase : Any=None ,__UpperCamelCase : Optional[Any]=None ,__UpperCamelCase : List[Any]=True ): """simple docstring""" if config_path is not None: A_ = WavaVecaConformerConfig.from_pretrained(__UpperCamelCase ,hidden_act="swish" ) else: A_ = WavaVecaConformerConfig() if "rope" in checkpoint_path: A_ = "rotary" if is_finetuned: if dict_path: A_ = Dictionary.load(__UpperCamelCase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq A_ = target_dict.pad_index A_ = target_dict.bos_index A_ = target_dict.eos_index A_ = len(target_dict.symbols ) A_ = os.path.join(__UpperCamelCase ,"vocab.json" ) if not os.path.isdir(__UpperCamelCase ): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(__UpperCamelCase ) ) return os.makedirs(__UpperCamelCase ,exist_ok=__UpperCamelCase ) A_ = target_dict.indices # fairseq has the <pad> and <s> switched A_ = 0 A_ = 1 with open(__UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_handle: json.dump(__UpperCamelCase ,__UpperCamelCase ) A_ = WavaVecaCTCTokenizer( __UpperCamelCase ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="|" ,do_lower_case=__UpperCamelCase ,) A_ = True if config.feat_extract_norm == "layer" else False A_ = WavaVecaFeatureExtractor( feature_size=1 ,sampling_rate=1_6000 ,padding_value=0 ,do_normalize=__UpperCamelCase ,return_attention_mask=__UpperCamelCase ,) A_ = WavaVecaProcessor(feature_extractor=__UpperCamelCase ,tokenizer=__UpperCamelCase ) processor.save_pretrained(__UpperCamelCase ) A_ = WavaVecaConformerForCTC(__UpperCamelCase ) else: A_ = WavaVecaConformerForPreTraining(__UpperCamelCase ) if is_finetuned: A_ , A_ , A_ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] ,arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) else: A_ = argparse.Namespace(task="audio_pretraining" ) A_ = fairseq.tasks.setup_task(__UpperCamelCase ) A_ , A_ , A_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ,task=__UpperCamelCase ) A_ = model[0].eval() recursively_load_weights(__UpperCamelCase ,__UpperCamelCase ,not is_finetuned ) hf_wavavec.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": __a :Tuple = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) __a :List[str] = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
312
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from timm import create_model from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import BitConfig, BitForImageClassification, BitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() __a :Any = logging.get_logger(__name__) def __snake_case ( __UpperCamelCase : Optional[int] ): """simple docstring""" A_ = "huggingface/label-files" A_ = "imagenet-1k-id2label.json" A_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type="dataset" ) ,"r" ) ) A_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()} A_ = {v: k for k, v in idalabel.items()} A_ = "std_conv" if "bit" in model_name else False # note that when using BiT as backbone for ViT-hybrid checkpoints, # one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same", # config.conv_layer = "std_conv_same" A_ = BitConfig( conv_layer=__UpperCamelCase ,num_labels=1000 ,idalabel=__UpperCamelCase ,labelaid=__UpperCamelCase ,) return config def __snake_case ( __UpperCamelCase : Union[str, Any] ): """simple docstring""" if "stem.conv" in name: A_ = name.replace("stem.conv" ,"bit.embedder.convolution" ) if "blocks" in name: A_ = name.replace("blocks" ,"layers" ) if "head.fc" in name: A_ = name.replace("head.fc" ,"classifier.1" ) if name.startswith("norm" ): A_ = "bit." + name if "bit" not in name and "classifier" not in name: A_ = "bit.encoder." + name return name def __snake_case ( ): """simple docstring""" A_ = "http://images.cocodataset.org/val2017/000000039769.jpg" A_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw ) return im @torch.no_grad() def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Tuple=False ): """simple docstring""" A_ = get_config(__UpperCamelCase ) # load original model from timm A_ = create_model(__UpperCamelCase ,pretrained=__UpperCamelCase ) timm_model.eval() # load state_dict of original model A_ = timm_model.state_dict() for key in state_dict.copy().keys(): A_ = state_dict.pop(__UpperCamelCase ) A_ = val.squeeze() if "head" in key else val # load HuggingFace model A_ = BitForImageClassification(__UpperCamelCase ) model.eval() model.load_state_dict(__UpperCamelCase ) # create image processor A_ = create_transform(**resolve_data_config({} ,model=__UpperCamelCase ) ) A_ = transform.transforms A_ = { "bilinear": PILImageResampling.BILINEAR, "bicubic": PILImageResampling.BICUBIC, "nearest": PILImageResampling.NEAREST, } A_ = BitImageProcessor( do_resize=__UpperCamelCase ,size={"shortest_edge": timm_transforms[0].size} ,resample=pillow_resamplings[timm_transforms[0].interpolation.value] ,do_center_crop=__UpperCamelCase ,crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} ,do_normalize=__UpperCamelCase ,image_mean=timm_transforms[-1].mean.tolist() ,image_std=timm_transforms[-1].std.tolist() ,) A_ = prepare_img() A_ = transform(__UpperCamelCase ).unsqueeze(0 ) A_ = processor(__UpperCamelCase ,return_tensors="pt" ).pixel_values # verify pixel values assert torch.allclose(__UpperCamelCase ,__UpperCamelCase ) # verify logits with torch.no_grad(): A_ = model(__UpperCamelCase ) A_ = outputs.logits print("Logits:" ,logits[0, :3] ) print("Predicted class:" ,model.config.idalabel[logits.argmax(-1 ).item()] ) A_ = timm_model(__UpperCamelCase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(__UpperCamelCase ,outputs.logits ,atol=1E-3 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase ) print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' ) model.save_pretrained(__UpperCamelCase ) processor.save_pretrained(__UpperCamelCase ) if push_to_hub: print(f'''Pushing model {model_name} and processor to the hub''' ) model.push_to_hub(f'''ybelkada/{model_name}''' ) processor.push_to_hub(f'''ybelkada/{model_name}''' ) if __name__ == "__main__": __a :List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='resnetv2_50x1_bitm', type=str, help='Name of the BiT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether to push the model to the hub.', ) __a :str = parser.parse_args() convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
312
1
from bisect import bisect from itertools import accumulate def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Any ,__UpperCamelCase : int ,__UpperCamelCase : Union[str, Any] ): """simple docstring""" A_ = sorted(zip(__UpperCamelCase ,__UpperCamelCase ) ,key=lambda __UpperCamelCase : x[0] / x[1] ,reverse=__UpperCamelCase ) A_ , A_ = [i[0] for i in r], [i[1] for i in r] A_ = list(accumulate(__UpperCamelCase ) ) A_ = bisect(__UpperCamelCase ,__UpperCamelCase ) return ( 0 if k == 0 else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k]) if k != n else sum(vl[:k] ) ) if __name__ == "__main__": import doctest doctest.testmod()
312
import os import re import sys import traceback import warnings from pathlib import Path from typing import Dict, Optional, Union from uuid import uuida from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami from huggingface_hub.file_download import REGEX_COMMIT_HASH from huggingface_hub.utils import ( EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, is_jinja_available, ) from packaging import version from requests import HTTPError from .. import __version__ from .constants import ( DEPRECATED_REVISION_ARGS, DIFFUSERS_CACHE, HUGGINGFACE_CO_RESOLVE_ENDPOINT, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, ) from .import_utils import ( ENV_VARS_TRUE_VALUES, _flax_version, _jax_version, _onnxruntime_version, _torch_version, is_flax_available, is_onnx_available, is_torch_available, ) from .logging import get_logger __a :Dict = get_logger(__name__) __a :Union[str, Any] = Path(__file__).parent / 'model_card_template.md' __a :Tuple = uuida().hex __a :List[Any] = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES __a :Union[str, Any] = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES __a :Tuple = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/' def __snake_case ( __UpperCamelCase : Union[Dict, str, None] = None ): """simple docstring""" A_ = f'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}''' if DISABLE_TELEMETRY or HF_HUB_OFFLINE: return ua + "; telemetry/off" if is_torch_available(): ua += f'''; torch/{_torch_version}''' if is_flax_available(): ua += f'''; jax/{_jax_version}''' ua += f'''; flax/{_flax_version}''' if is_onnx_available(): ua += f'''; onnxruntime/{_onnxruntime_version}''' # CI will set this value to True if os.environ.get("DIFFUSERS_IS_CI" ,"" ).upper() in ENV_VARS_TRUE_VALUES: ua += "; is_ci/true" if isinstance(__UpperCamelCase ,__UpperCamelCase ): ua += "; " + "; ".join(f'''{k}/{v}''' for k, v in user_agent.items() ) elif isinstance(__UpperCamelCase ,__UpperCamelCase ): ua += "; " + user_agent return ua def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Optional[str] = None ,__UpperCamelCase : Optional[str] = None ): """simple docstring""" if token is None: A_ = HfFolder.get_token() if organization is None: A_ = whoami(__UpperCamelCase )["name"] return f'''{username}/{model_id}''' else: return f'''{organization}/{model_id}''' def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Union[str, Any] ): """simple docstring""" if not is_jinja_available(): raise ValueError( "Modelcard rendering is based on Jinja templates." " Please make sure to have `jinja` installed before using `create_model_card`." " To install it, please run `pip install Jinja2`." ) if hasattr(__UpperCamelCase ,"local_rank" ) and args.local_rank not in [-1, 0]: return A_ = args.hub_token if hasattr(__UpperCamelCase ,"hub_token" ) else None A_ = get_full_repo_name(__UpperCamelCase ,token=__UpperCamelCase ) A_ = ModelCard.from_template( card_data=ModelCardData( # Card metadata object that will be converted to YAML block language="en" ,license="apache-2.0" ,library_name="diffusers" ,tags=[] ,datasets=args.dataset_name ,metrics=[] ,) ,template_path=__UpperCamelCase ,model_name=__UpperCamelCase ,repo_name=__UpperCamelCase ,dataset_name=args.dataset_name if hasattr(__UpperCamelCase ,"dataset_name" ) else None ,learning_rate=args.learning_rate ,train_batch_size=args.train_batch_size ,eval_batch_size=args.eval_batch_size ,gradient_accumulation_steps=( args.gradient_accumulation_steps if hasattr(__UpperCamelCase ,"gradient_accumulation_steps" ) else None ) ,adam_betaa=args.adam_betaa if hasattr(__UpperCamelCase ,"adam_beta1" ) else None ,adam_betaa=args.adam_betaa if hasattr(__UpperCamelCase ,"adam_beta2" ) else None ,adam_weight_decay=args.adam_weight_decay if hasattr(__UpperCamelCase ,"adam_weight_decay" ) else None ,adam_epsilon=args.adam_epsilon if hasattr(__UpperCamelCase ,"adam_epsilon" ) else None ,lr_scheduler=args.lr_scheduler if hasattr(__UpperCamelCase ,"lr_scheduler" ) else None ,lr_warmup_steps=args.lr_warmup_steps if hasattr(__UpperCamelCase ,"lr_warmup_steps" ) else None ,ema_inv_gamma=args.ema_inv_gamma if hasattr(__UpperCamelCase ,"ema_inv_gamma" ) else None ,ema_power=args.ema_power if hasattr(__UpperCamelCase ,"ema_power" ) else None ,ema_max_decay=args.ema_max_decay if hasattr(__UpperCamelCase ,"ema_max_decay" ) else None ,mixed_precision=args.mixed_precision ,) A_ = os.path.join(args.output_dir ,"README.md" ) model_card.save(__UpperCamelCase ) def __snake_case ( __UpperCamelCase : Optional[str] ,__UpperCamelCase : Optional[str] = None ): """simple docstring""" if resolved_file is None or commit_hash is not None: return commit_hash A_ = str(Path(__UpperCamelCase ).as_posix() ) A_ = re.search(R"snapshots/([^/]+)/" ,__UpperCamelCase ) if search is None: return None A_ = search.groups()[0] return commit_hash if REGEX_COMMIT_HASH.match(__UpperCamelCase ) else None # Old default cache path, potentially to be migrated. # This logic was more or less taken from `transformers`, with the following differences: # - Diffusers doesn't use custom environment variables to specify the cache path. # - There is no need to migrate the cache format, just move the files to the new location. __a :str = os.path.expanduser( os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface')) ) __a :List[Any] = os.path.join(hf_cache_home, 'diffusers') def __snake_case ( __UpperCamelCase : Optional[str] = None ,__UpperCamelCase : Optional[str] = None ): """simple docstring""" if new_cache_dir is None: A_ = DIFFUSERS_CACHE if old_cache_dir is None: A_ = old_diffusers_cache A_ = Path(__UpperCamelCase ).expanduser() A_ = Path(__UpperCamelCase ).expanduser() for old_blob_path in old_cache_dir.glob("**/blobs/*" ): if old_blob_path.is_file() and not old_blob_path.is_symlink(): A_ = new_cache_dir / old_blob_path.relative_to(__UpperCamelCase ) new_blob_path.parent.mkdir(parents=__UpperCamelCase ,exist_ok=__UpperCamelCase ) os.replace(__UpperCamelCase ,__UpperCamelCase ) try: os.symlink(__UpperCamelCase ,__UpperCamelCase ) except OSError: logger.warning( "Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." ) # At this point, old_cache_dir contains symlinks to the new cache (it can still be used). __a :Dict = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt') if not os.path.isfile(cache_version_file): __a :Optional[int] = 0 else: with open(cache_version_file) as f: try: __a :Dict = int(f.read()) except ValueError: __a :str = 0 if cache_version < 1: __a :Optional[Any] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0 if old_cache_is_not_empty: logger.warning( 'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your ' 'existing cached models. This is a one-time operation, you can interrupt it or run it ' 'later by calling `diffusers.utils.hub_utils.move_cache()`.' ) try: move_cache() except Exception as e: __a :Optional[Any] = '\n'.join(traceback.format_tb(e.__traceback__)) logger.error( F"There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease " 'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole ' 'message and we will do our best to help.' ) if cache_version < 1: try: os.makedirs(DIFFUSERS_CACHE, exist_ok=True) with open(cache_version_file, 'w') as f: f.write('1') except Exception: logger.warning( F"There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure " 'the directory exists and can be written to.' ) def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Optional[str] = None ): """simple docstring""" if variant is not None: A_ = weights_name.split("." ) A_ = splits[:-1] + [variant] + splits[-1:] A_ = ".".join(__UpperCamelCase ) return weights_name def __snake_case ( __UpperCamelCase : Optional[Any] ,*, __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Any ,__UpperCamelCase : Tuple ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : str ,__UpperCamelCase : int ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : int ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : Tuple ,__UpperCamelCase : Optional[int]=None ,): """simple docstring""" A_ = str(__UpperCamelCase ) if os.path.isfile(__UpperCamelCase ): return pretrained_model_name_or_path elif os.path.isdir(__UpperCamelCase ): if os.path.isfile(os.path.join(__UpperCamelCase ,__UpperCamelCase ) ): # Load from a PyTorch checkpoint A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase ) return model_file elif subfolder is not None and os.path.isfile( os.path.join(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) ): A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) return model_file else: raise EnvironmentError( f'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' ) else: # 1. First check if deprecated way of loading from branches is used if ( revision in DEPRECATED_REVISION_ARGS and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME) and version.parse(version.parse(__UpperCamelCase ).base_version ) >= version.parse("0.20.0" ) ): try: A_ = hf_hub_download( __UpperCamelCase ,filename=_add_variant(__UpperCamelCase ,__UpperCamelCase ) ,cache_dir=__UpperCamelCase ,force_download=__UpperCamelCase ,proxies=__UpperCamelCase ,resume_download=__UpperCamelCase ,local_files_only=__UpperCamelCase ,use_auth_token=__UpperCamelCase ,user_agent=__UpperCamelCase ,subfolder=__UpperCamelCase ,revision=revision or commit_hash ,) warnings.warn( f'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' ,__UpperCamelCase ,) return model_file except: # noqa: E722 warnings.warn( f'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__UpperCamelCase ,__UpperCamelCase )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(__UpperCamelCase ,__UpperCamelCase )}\' so that the correct variant file can be added.''' ,__UpperCamelCase ,) try: # 2. Load model file as usual A_ = hf_hub_download( __UpperCamelCase ,filename=__UpperCamelCase ,cache_dir=__UpperCamelCase ,force_download=__UpperCamelCase ,proxies=__UpperCamelCase ,resume_download=__UpperCamelCase ,local_files_only=__UpperCamelCase ,use_auth_token=__UpperCamelCase ,user_agent=__UpperCamelCase ,subfolder=__UpperCamelCase ,revision=revision or commit_hash ,) return model_file except RepositoryNotFoundError: raise EnvironmentError( f'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier ''' "listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a " "token having permission to this repo with `use_auth_token` or log in with `huggingface-cli " "login`." ) except RevisionNotFoundError: raise EnvironmentError( f'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for ''' "this model name. Check the model page at " f'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' ) except EntryNotFoundError: raise EnvironmentError( f'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' ) except HTTPError as err: raise EnvironmentError( f'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' ) except ValueError: raise EnvironmentError( f'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it''' f''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a''' f''' directory containing a file named {weights_name} or''' " \nCheckout your internet connection or see how to run the library in" " offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." ) except EnvironmentError: raise EnvironmentError( f'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from ''' "'https://huggingface.co/models', make sure you don't have a local directory with the same name. " f'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory ''' f'''containing a file named {weights_name}''' )
312
1
__a :Any = [ 'Audio', 'Array2D', 'Array3D', 'Array4D', 'Array5D', 'ClassLabel', 'Features', 'Sequence', 'Value', 'Image', 'Translation', 'TranslationVariableLanguages', ] from .audio import Audio from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value from .image import Image from .translation import Translation, TranslationVariableLanguages
312
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __a :Any = { 'configuration_mgp_str': ['MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MgpstrConfig'], 'processing_mgp_str': ['MgpstrProcessor'], 'tokenization_mgp_str': ['MgpstrTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :Optional[Any] = [ 'MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST', 'MgpstrModel', 'MgpstrPreTrainedModel', 'MgpstrForSceneTextRecognition', ] if TYPE_CHECKING: from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig from .processing_mgp_str import MgpstrProcessor from .tokenization_mgp_str import MgpstrTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mgp_str import ( MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST, MgpstrForSceneTextRecognition, MgpstrModel, MgpstrPreTrainedModel, ) else: import sys __a :List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
312
1
import os from typing import Dict, List, Tuple, TypeVar, Union __a :Any = TypeVar('T') __a :Union[str, Any] = Union[List[T], Tuple[T, ...]] __a :List[str] = Union[T, List[T], Dict[str, T]] __a :Any = Union[str, bytes, os.PathLike]
312
import functools from typing import Any def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : list[str] ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or len(__UpperCamelCase ) == 0: raise ValueError("the string should be not empty string" ) if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not all( isinstance(__UpperCamelCase ,__UpperCamelCase ) and len(__UpperCamelCase ) > 0 for item in words ): raise ValueError("the words should be a list of non-empty strings" ) # Build trie A_ = {} A_ = "WORD_KEEPER" for word in words: A_ = trie for c in word: if c not in trie_node: A_ = {} A_ = trie_node[c] A_ = True A_ = len(__UpperCamelCase ) # Dynamic programming method @functools.cache def is_breakable(__UpperCamelCase : int ) -> bool: if index == len_string: return True A_ = trie for i in range(__UpperCamelCase ,__UpperCamelCase ): A_ = trie_node.get(string[i] ,__UpperCamelCase ) if trie_node is None: return False if trie_node.get(__UpperCamelCase ,__UpperCamelCase ) and is_breakable(i + 1 ): return True return False return is_breakable(0 ) if __name__ == "__main__": import doctest doctest.testmod()
312
1
import functools from typing import Any def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : list[str] ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or len(__UpperCamelCase ) == 0: raise ValueError("the string should be not empty string" ) if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not all( isinstance(__UpperCamelCase ,__UpperCamelCase ) and len(__UpperCamelCase ) > 0 for item in words ): raise ValueError("the words should be a list of non-empty strings" ) # Build trie A_ = {} A_ = "WORD_KEEPER" for word in words: A_ = trie for c in word: if c not in trie_node: A_ = {} A_ = trie_node[c] A_ = True A_ = len(__UpperCamelCase ) # Dynamic programming method @functools.cache def is_breakable(__UpperCamelCase : int ) -> bool: if index == len_string: return True A_ = trie for i in range(__UpperCamelCase ,__UpperCamelCase ): A_ = trie_node.get(string[i] ,__UpperCamelCase ) if trie_node is None: return False if trie_node.get(__UpperCamelCase ,__UpperCamelCase ) and is_breakable(i + 1 ): return True return False return is_breakable(0 ) if __name__ == "__main__": import doctest doctest.testmod()
312
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_electra import ElectraTokenizer __a :List[str] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} __a :Union[str, Any] = { 'vocab_file': { 'google/electra-small-generator': ( 'https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt' ), 'google/electra-base-generator': 'https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt', 'google/electra-large-generator': ( 'https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt' ), 'google/electra-small-discriminator': ( 'https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt' ), 'google/electra-base-discriminator': ( 'https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt' ), 'google/electra-large-discriminator': ( 'https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'google/electra-small-generator': ( 'https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json' ), 'google/electra-base-generator': ( 'https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json' ), 'google/electra-large-generator': ( 'https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json' ), 'google/electra-small-discriminator': ( 'https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json' ), 'google/electra-base-discriminator': ( 'https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json' ), 'google/electra-large-discriminator': ( 'https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json' ), }, } __a :Optional[int] = { 'google/electra-small-generator': 512, 'google/electra-base-generator': 512, 'google/electra-large-generator': 512, 'google/electra-small-discriminator': 512, 'google/electra-base-discriminator': 512, 'google/electra-large-discriminator': 512, } __a :str = { 'google/electra-small-generator': {'do_lower_case': True}, 'google/electra-base-generator': {'do_lower_case': True}, 'google/electra-large-generator': {'do_lower_case': True}, 'google/electra-small-discriminator': {'do_lower_case': True}, 'google/electra-base-discriminator': {'do_lower_case': True}, 'google/electra-large-discriminator': {'do_lower_case': True}, } class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : Tuple = VOCAB_FILES_NAMES _lowerCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase : int = PRETRAINED_INIT_CONFIGURATION _lowerCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase : int = ElectraTokenizer def __init__( self : Tuple , UpperCAmelCase : Dict=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Any=True , UpperCAmelCase : Any="[UNK]" , UpperCAmelCase : Union[str, Any]="[SEP]" , UpperCAmelCase : List[Any]="[PAD]" , UpperCAmelCase : Union[str, Any]="[CLS]" , UpperCAmelCase : List[Any]="[MASK]" , UpperCAmelCase : List[str]=True , UpperCAmelCase : Any=None , **UpperCAmelCase : Union[str, Any] , ): super().__init__( UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , ) A_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , UpperCAmelCase ) != do_lower_case or normalizer_state.get("strip_accents" , UpperCAmelCase ) != strip_accents or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase ) != tokenize_chinese_chars ): A_ = getattr(UpperCAmelCase , normalizer_state.pop("type" ) ) A_ = do_lower_case A_ = strip_accents A_ = tokenize_chinese_chars A_ = normalizer_class(**UpperCAmelCase ) A_ = do_lower_case def __A ( self : int , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any]=None ): A_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __A ( self : Union[str, Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ): A_ = [self.sep_token_id] A_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __A ( self : Tuple , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ): A_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase ) return tuple(UpperCAmelCase )
312
1
from typing import Optional from torch import nn from .transformer_ad import TransformeraDModel, TransformeraDModelOutput class _a ( nn.Module ): """simple docstring""" def __init__( self : Optional[int] , UpperCAmelCase : int = 16 , UpperCAmelCase : int = 88 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 1 , UpperCAmelCase : float = 0.0 , UpperCAmelCase : int = 32 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : str = "geglu" , UpperCAmelCase : Optional[int] = None , ): super().__init__() A_ = nn.ModuleList( [ TransformeraDModel( num_attention_heads=UpperCAmelCase , attention_head_dim=UpperCAmelCase , in_channels=UpperCAmelCase , num_layers=UpperCAmelCase , dropout=UpperCAmelCase , norm_num_groups=UpperCAmelCase , cross_attention_dim=UpperCAmelCase , attention_bias=UpperCAmelCase , sample_size=UpperCAmelCase , num_vector_embeds=UpperCAmelCase , activation_fn=UpperCAmelCase , num_embeds_ada_norm=UpperCAmelCase , ) for _ in range(2 ) ] ) # Variables that can be set by a pipeline: # The ratio of transformer1 to transformer2's output states to be combined during inference A_ = 0.5 # The shape of `encoder_hidden_states` is expected to be # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)` A_ = [77, 257] # Which transformer to use to encode which condition. # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])` A_ = [1, 0] def __A ( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : List[str]=None , UpperCAmelCase : Tuple=None , UpperCAmelCase : bool = True , ): A_ = hidden_states A_ = [] A_ = 0 # attention_mask is not used yet for i in range(2 ): # for each of the two transformers, pass the corresponding condition tokens A_ = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]] A_ = self.transformer_index_for_condition[i] A_ = self.transformers[transformer_index]( UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , timestep=UpperCAmelCase , cross_attention_kwargs=UpperCAmelCase , return_dict=UpperCAmelCase , )[0] encoded_states.append(encoded_state - input_states ) tokens_start += self.condition_lengths[i] A_ = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio) A_ = output_states + input_states if not return_dict: return (output_states,) return TransformeraDModelOutput(sample=UpperCAmelCase )
312
# flake8: noqa # Lint as: python3 from typing import Dict, List, Optional, Type from .. import config from ..utils import logging from .formatting import ( ArrowFormatter, CustomFormatter, Formatter, PandasFormatter, PythonFormatter, TensorFormatter, format_table, query_table, ) from .np_formatter import NumpyFormatter __a :Optional[Any] = logging.get_logger(__name__) __a :Dict[Optional[str], Type[Formatter]] = {} __a :Dict[Optional[str], str] = {} __a :Dict[Optional[str], Exception] = {} def __snake_case ( __UpperCamelCase : type ,__UpperCamelCase : Optional[str] ,__UpperCamelCase : Optional[List[str]] = None ,): """simple docstring""" A_ = aliases if aliases is not None else [] if format_type in _FORMAT_TYPES: logger.warning( f'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' ) A_ = formatter_cls for alias in set(aliases + [format_type] ): if alias in _FORMAT_TYPES_ALIASES: logger.warning( f'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' ) A_ = format_type def __snake_case ( __UpperCamelCase : Exception ,__UpperCamelCase : Optional[str] ,__UpperCamelCase : Optional[List[str]] = None ): """simple docstring""" A_ = aliases if aliases is not None else [] for alias in set(aliases + [format_type] ): A_ = unavailable_error # Here we define all the available formatting functions that can be used by `Dataset.set_format` _register_formatter(PythonFormatter, None, aliases=['python']) _register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow']) _register_formatter(NumpyFormatter, 'numpy', aliases=['np']) _register_formatter(PandasFormatter, 'pandas', aliases=['pd']) _register_formatter(CustomFormatter, 'custom') if config.TORCH_AVAILABLE: from .torch_formatter import TorchFormatter _register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch']) else: __a :List[Any] = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.') _register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch']) if config.TF_AVAILABLE: from .tf_formatter import TFFormatter _register_formatter(TFFormatter, 'tensorflow', aliases=['tf']) else: __a :List[str] = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.') _register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf']) if config.JAX_AVAILABLE: from .jax_formatter import JaxFormatter _register_formatter(JaxFormatter, 'jax', aliases=[]) else: __a :Tuple = ValueError('JAX needs to be installed to be able to return JAX arrays.') _register_unavailable_formatter(_jax_error, 'jax', aliases=[]) def __snake_case ( __UpperCamelCase : Optional[str] ): """simple docstring""" if format_type in _FORMAT_TYPES_ALIASES: return _FORMAT_TYPES_ALIASES[format_type] else: return format_type def __snake_case ( __UpperCamelCase : Optional[str] ,**__UpperCamelCase : List[Any] ): """simple docstring""" A_ = get_format_type_from_alias(__UpperCamelCase ) if format_type in _FORMAT_TYPES: return _FORMAT_TYPES[format_type](**__UpperCamelCase ) if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE: raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type] else: raise ValueError( f'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
312
1
import torch from transformers import AutoModel class _a ( torch.nn.Module ): """simple docstring""" def __init__( self : Union[str, Any] , UpperCAmelCase : List[str]="sayef/fsner-bert-base-uncased" ): super(UpperCAmelCase , self ).__init__() A_ = AutoModel.from_pretrained(UpperCAmelCase , return_dict=UpperCAmelCase ) A_ = torch.nn.CosineSimilarity(3 , 1E-08 ) A_ = torch.nn.Softmax(dim=1 ) def __A ( self : Optional[Any] , **UpperCAmelCase : Any ): return self.bert(**UpperCAmelCase ).last_hidden_state def __A ( self : List[Any] , UpperCAmelCase : Union[str, Any] ): return token_embeddings.sum(2 , keepdim=UpperCAmelCase ) def __A ( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Any=1 ): return self.softmax(T * self.cos(UpperCAmelCase , UpperCAmelCase ) ) def __A ( self : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Any ): A_ = W_supports["sizes"].tolist() A_ = W_supports["start_token_id"].item() A_ = W_supports["end_token_id"].item() del W_supports["sizes"] del W_supports["start_token_id"] del W_supports["end_token_id"] A_ = self.BERT(**UpperCAmelCase ) A_ = self.BERT(**UpperCAmelCase ) A_ = None A_ = None A_ = W_supports["input_ids"] == start_token_id A_ = W_supports["input_ids"] == end_token_id for i, size in enumerate(UpperCAmelCase ): if i == 0: A_ = 0 else: A_ = support_sizes[i - 1] A_ = S[s : s + size][start_token_masks[s : s + size]] A_ = S[s : s + size][end_token_masks[s : s + size]] A_ = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 ) A_ = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 ) if p_starts is not None: A_ = torch.vstack((p_starts, p_start) ) A_ = torch.vstack((p_ends, p_end) ) else: A_ = p_start A_ = p_end return p_starts, p_ends
312
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __a :int = { 'configuration_mask2former': [ 'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Mask2FormerConfig', ], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :Union[str, Any] = ['Mask2FormerImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :Optional[Any] = [ 'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'Mask2FormerForUniversalSegmentation', 'Mask2FormerModel', 'Mask2FormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_maskaformer import MaskaFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskaformer import ( MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskaFormerForUniversalSegmentation, MaskaFormerModel, MaskaFormerPreTrainedModel, ) else: import sys __a :Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
312
1
import numpy as np def __snake_case ( __UpperCamelCase : np.ndarray ): """simple docstring""" return 1 / (1 + np.exp(-vector )) def __snake_case ( __UpperCamelCase : np.ndarray ): """simple docstring""" return vector * sigmoid(__UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
312
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, ClassLabel, Features from .base import TaskTemplate @dataclass(frozen=snake_case_ ) class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : str = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True} ) _lowerCamelCase : ClassVar[Features] = Features({'audio': Audio()} ) _lowerCamelCase : ClassVar[Features] = Features({'labels': ClassLabel} ) _lowerCamelCase : str = "audio" _lowerCamelCase : str = "labels" def __A ( self : str , UpperCAmelCase : List[Any] ): if self.label_column not in features: raise ValueError(f'''Column {self.label_column} is not present in features.''' ) if not isinstance(features[self.label_column] , UpperCAmelCase ): raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' ) A_ = copy.deepcopy(self ) A_ = self.label_schema.copy() A_ = features[self.label_column] A_ = label_schema return task_template @property def __A ( self : List[str] ): return { self.audio_column: "audio", self.label_column: "labels", }
312
1
import io import json import unittest from parameterized import parameterized from transformers import FSMTForConditionalGeneration, FSMTTokenizer from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device from utils import calculate_bleu __a :Dict = get_tests_dir() + '/test_data/fsmt/fsmt_val_data.json' with io.open(filename, 'r', encoding='utf-8') as f: __a :str = json.load(f) @require_torch class _a ( unittest.TestCase ): """simple docstring""" def __A ( self : List[str] , UpperCAmelCase : Dict ): return FSMTTokenizer.from_pretrained(UpperCAmelCase ) def __A ( self : List[str] , UpperCAmelCase : Optional[int] ): A_ = FSMTForConditionalGeneration.from_pretrained(UpperCAmelCase ).to(UpperCAmelCase ) if torch_device == "cuda": model.half() return model @parameterized.expand( [ ["en-ru", 26.0], ["ru-en", 22.0], ["en-de", 22.0], ["de-en", 29.0], ] ) @slow def __A ( self : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any] ): # note: this test is not testing the best performance since it only evals a small batch # but it should be enough to detect a regression in the output quality A_ = f'''facebook/wmt19-{pair}''' A_ = self.get_tokenizer(UpperCAmelCase ) A_ = self.get_model(UpperCAmelCase ) A_ = bleu_data[pair]["src"] A_ = bleu_data[pair]["tgt"] A_ = tokenizer(UpperCAmelCase , return_tensors="pt" , truncation=UpperCAmelCase , padding="longest" ).to(UpperCAmelCase ) A_ = model.generate( input_ids=batch.input_ids , num_beams=8 , ) A_ = tokenizer.batch_decode( UpperCAmelCase , skip_special_tokens=UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase ) A_ = calculate_bleu(UpperCAmelCase , UpperCAmelCase ) print(UpperCAmelCase ) self.assertGreaterEqual(scores["bleu"] , UpperCAmelCase )
312
def __snake_case ( __UpperCamelCase : bytes ): """simple docstring""" return "".join([hex(__UpperCamelCase )[2:].zfill(2 ).upper() for byte in list(__UpperCamelCase )] ) def __snake_case ( __UpperCamelCase : str ): """simple docstring""" if (len(__UpperCamelCase ) % 2) != 0: raise ValueError( "Base16 encoded data is invalid:\nData does not have an even number of hex digits." ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(__UpperCamelCase ) <= set("0123456789ABCDEF" ): raise ValueError( "Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] ,16 ) for i in range(0 ,len(__UpperCamelCase ) ,2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
312
1
import unittest from typing import Tuple import torch from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device from diffusers.utils.testing_utils import require_torch @require_torch class _a : """simple docstring""" @property def __A ( self : Union[str, Any] ): return self.get_dummy_input() @property def __A ( self : int ): if self.block_type == "down": return (4, 32, 16, 16) elif self.block_type == "mid": return (4, 32, 32, 32) elif self.block_type == "up": return (4, 32, 64, 64) raise ValueError(f'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' ) def __A ( self : Union[str, Any] , UpperCAmelCase : List[Any]=True , UpperCAmelCase : str=False , UpperCAmelCase : Tuple=False , UpperCAmelCase : Optional[Any]=False , ): A_ = 4 A_ = 32 A_ = (32, 32) A_ = torch.manual_seed(0 ) A_ = torch.device(UpperCAmelCase ) A_ = (batch_size, num_channels) + sizes A_ = randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=UpperCAmelCase ) A_ = {"hidden_states": hidden_states} if include_temb: A_ = 128 A_ = randn_tensor((batch_size, temb_channels) , generator=UpperCAmelCase , device=UpperCAmelCase ) if include_res_hidden_states_tuple: A_ = torch.manual_seed(1 ) A_ = (randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=UpperCAmelCase ),) if include_encoder_hidden_states: A_ = floats_tensor((batch_size, 32, 32) ).to(UpperCAmelCase ) if include_skip_sample: A_ = randn_tensor(((batch_size, 3) + sizes) , generator=UpperCAmelCase , device=UpperCAmelCase ) return dummy_input def __A ( self : Optional[int] ): A_ = { "in_channels": 32, "out_channels": 32, "temb_channels": 128, } if self.block_type == "up": A_ = 32 if self.block_type == "mid": init_dict.pop("out_channels" ) A_ = self.dummy_input return init_dict, inputs_dict def __A ( self : List[str] , UpperCAmelCase : Optional[Any] ): A_ , A_ = self.prepare_init_args_and_inputs_for_common() A_ = self.block_class(**UpperCAmelCase ) unet_block.to(UpperCAmelCase ) unet_block.eval() with torch.no_grad(): A_ = unet_block(**UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ): A_ = output[0] self.assertEqual(output.shape , self.output_shape ) A_ = output[0, -1, -3:, -3:] A_ = torch.tensor(UpperCAmelCase ).to(UpperCAmelCase ) assert torch_all_close(output_slice.flatten() , UpperCAmelCase , atol=5E-3 ) @unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" ) def __A ( self : Union[str, Any] ): A_ , A_ = self.prepare_init_args_and_inputs_for_common() A_ = self.block_class(**UpperCAmelCase ) model.to(UpperCAmelCase ) model.train() A_ = model(**UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ): A_ = output[0] A_ = torch.device(UpperCAmelCase ) A_ = randn_tensor(output.shape , device=UpperCAmelCase ) A_ = torch.nn.functional.mse_loss(UpperCAmelCase , UpperCAmelCase ) loss.backward()
312
import cva import numpy as np class _a : """simple docstring""" def __init__( self : Any , UpperCAmelCase : float , UpperCAmelCase : int ): if k in (0.04, 0.06): A_ = k A_ = window_size else: raise ValueError("invalid k value" ) def __str__( self : Optional[Any] ): return str(self.k ) def __A ( self : int , UpperCAmelCase : str ): A_ = cva.imread(UpperCAmelCase , 0 ) A_ , A_ = img.shape A_ = [] A_ = img.copy() A_ = cva.cvtColor(UpperCAmelCase , cva.COLOR_GRAY2RGB ) A_ , A_ = np.gradient(UpperCAmelCase ) A_ = dx**2 A_ = dy**2 A_ = dx * dy A_ = 0.04 A_ = self.window_size // 2 for y in range(UpperCAmelCase , h - offset ): for x in range(UpperCAmelCase , w - offset ): A_ = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() A_ = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() A_ = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() A_ = (wxx * wyy) - (wxy**2) A_ = wxx + wyy A_ = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) , 0 ) color_img.itemset((y, x, 1) , 0 ) color_img.itemset((y, x, 2) , 255 ) return color_img, corner_list if __name__ == "__main__": __a :List[str] = HarrisCorner(0.04, 3) __a , __a :str = edge_detect.detect('path_to_image') cva.imwrite('detect.png', color_img)
312
1
import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_decord, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_decord class _a ( unittest.TestCase ): """simple docstring""" _lowerCamelCase : Optional[Any] = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def __A ( self : Any , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] ): A_ = hf_hub_download( repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" ) A_ = VideoClassificationPipeline(model=UpperCAmelCase , image_processor=UpperCAmelCase , top_k=2 ) A_ = [ example_video_filepath, "https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4", ] return video_classifier, examples def __A ( self : Any , UpperCAmelCase : Dict , UpperCAmelCase : Tuple ): for example in examples: A_ = video_classifier(UpperCAmelCase ) self.assertEqual( UpperCAmelCase , [ {"score": ANY(UpperCAmelCase ), "label": ANY(UpperCAmelCase )}, {"score": ANY(UpperCAmelCase ), "label": ANY(UpperCAmelCase )}, ] , ) @require_torch def __A ( self : Any ): A_ = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification" A_ = VideoMAEFeatureExtractor( size={"shortest_edge": 10} , crop_size={"height": 10, "width": 10} ) A_ = pipeline( "video-classification" , model=UpperCAmelCase , feature_extractor=UpperCAmelCase , frame_sampling_rate=4 ) A_ = hf_hub_download(repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" ) A_ = video_classifier(UpperCAmelCase , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase , decimals=4 ) , [{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}] , ) A_ = video_classifier( [ video_file_path, video_file_path, ] , top_k=2 , ) self.assertEqual( nested_simplify(UpperCAmelCase , decimals=4 ) , [ [{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}], [{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}], ] , ) @require_tf def __A ( self : Tuple ): pass
312
def __snake_case ( __UpperCamelCase : int = 1000 ): """simple docstring""" return sum(2 * a * ((a - 1) // 2) for a in range(3 ,n + 1 ) ) if __name__ == "__main__": print(solution())
312
1
import argparse import torch from torch import nn from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration def __snake_case ( __UpperCamelCase : Any ): """simple docstring""" A_ = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "decoder.output_projection.weight", "_float_tensor", "encoder.embed_positions._float_tensor", "decoder.embed_positions._float_tensor", ] for k in ignore_keys: state_dict.pop(__UpperCamelCase ,__UpperCamelCase ) def __snake_case ( __UpperCamelCase : Optional[Any] ): """simple docstring""" A_ , A_ = emb.weight.shape A_ = nn.Linear(__UpperCamelCase ,__UpperCamelCase ,bias=__UpperCamelCase ) A_ = emb.weight.data return lin_layer def __snake_case ( __UpperCamelCase : str ): """simple docstring""" A_ = torch.load(__UpperCamelCase ,map_location="cpu" ) A_ = mam_aaa["args"] or mam_aaa["cfg"]["model"] A_ = mam_aaa["model"] remove_ignore_keys_(__UpperCamelCase ) A_ = state_dict["encoder.embed_tokens.weight"].shape[0] A_ = MaMaaaConfig( vocab_size=__UpperCamelCase ,max_position_embeddings=1024 ,encoder_layers=args.encoder_layers ,decoder_layers=args.decoder_layers ,encoder_attention_heads=args.encoder_attention_heads ,decoder_attention_heads=args.decoder_attention_heads ,encoder_ffn_dim=args.encoder_ffn_embed_dim ,decoder_ffn_dim=args.decoder_ffn_embed_dim ,d_model=args.encoder_embed_dim ,encoder_layerdrop=args.encoder_layerdrop ,decoder_layerdrop=args.decoder_layerdrop ,dropout=args.dropout ,attention_dropout=args.attention_dropout ,activation_dropout=args.activation_dropout ,activation_function="relu" ,) A_ = state_dict["decoder.embed_tokens.weight"] A_ = MaMaaaForConditionalGeneration(__UpperCamelCase ) model.model.load_state_dict(__UpperCamelCase ,strict=__UpperCamelCase ) A_ = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": __a :List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.') parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') __a :Dict = parser.parse_args() __a :Optional[Any] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß) model.save_pretrained(args.pytorch_dump_folder_path)
312
import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : Union[str, Any] = ['image_processor', 'tokenizer'] _lowerCamelCase : Tuple = 'OwlViTImageProcessor' _lowerCamelCase : List[Any] = ('CLIPTokenizer', 'CLIPTokenizerFast') def __init__( self : Optional[Any] , UpperCAmelCase : int=None , UpperCAmelCase : Union[str, Any]=None , **UpperCAmelCase : Any ): A_ = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , UpperCAmelCase , ) A_ = kwargs.pop("feature_extractor" ) A_ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(UpperCAmelCase , UpperCAmelCase ) def __call__( self : Optional[int] , UpperCAmelCase : List[str]=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Dict="max_length" , UpperCAmelCase : Optional[Any]="np" , **UpperCAmelCase : Optional[int] ): if text is None and query_images is None and images is None: raise ValueError( "You have to specify at least one text or query image or image. All three cannot be none." ) if text is not None: if isinstance(UpperCAmelCase , UpperCAmelCase ) or (isinstance(UpperCAmelCase , UpperCAmelCase ) and not isinstance(text[0] , UpperCAmelCase )): A_ = [self.tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )] elif isinstance(UpperCAmelCase , UpperCAmelCase ) and isinstance(text[0] , UpperCAmelCase ): A_ = [] # Maximum number of queries across batch A_ = max([len(UpperCAmelCase ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(UpperCAmelCase ) != max_num_queries: A_ = t + [" "] * (max_num_queries - len(UpperCAmelCase )) A_ = self.tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase ) encodings.append(UpperCAmelCase ) else: raise TypeError("Input text should be a string, a list of strings or a nested list of strings" ) if return_tensors == "np": A_ = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) A_ = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp A_ = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) A_ = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch A_ = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 ) A_ = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf A_ = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 ) A_ = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 ) else: raise ValueError("Target return tensor type could not be returned" ) A_ = BatchEncoding() A_ = input_ids A_ = attention_mask if query_images is not None: A_ = BatchEncoding() A_ = self.image_processor( UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase ).pixel_values A_ = query_pixel_values if images is not None: A_ = self.image_processor(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase ) if text is not None and images is not None: A_ = image_features.pixel_values return encoding elif query_images is not None and images is not None: A_ = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**UpperCAmelCase ) , tensor_type=UpperCAmelCase ) def __A ( self : Optional[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : List[Any] ): return self.image_processor.post_process(*UpperCAmelCase , **UpperCAmelCase ) def __A ( self : str , *UpperCAmelCase : str , **UpperCAmelCase : Union[str, Any] ): return self.image_processor.post_process_object_detection(*UpperCAmelCase , **UpperCAmelCase ) def __A ( self : List[Any] , *UpperCAmelCase : int , **UpperCAmelCase : int ): return self.image_processor.post_process_image_guided_detection(*UpperCAmelCase , **UpperCAmelCase ) def __A ( self : List[Any] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Any ): return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase ) def __A ( self : Tuple , *UpperCAmelCase : Dict , **UpperCAmelCase : str ): return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase ) @property def __A ( self : Union[str, Any] ): warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCAmelCase , ) return self.image_processor_class @property def __A ( self : Optional[Any] ): warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCAmelCase , ) return self.image_processor
312
1
import argparse import collections import json from pathlib import Path import requests import torch import yaml from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTImageProcessor, MobileViTVaConfig, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, ) from transformers.utils import logging logging.set_verbosity_info() __a :int = logging.get_logger(__name__) def __snake_case ( __UpperCamelCase : Union[str, Any] ): """simple docstring""" print("Loading config file..." ) def flatten_yaml_as_dict(__UpperCamelCase : Dict ,__UpperCamelCase : str="" ,__UpperCamelCase : int="." ): A_ = [] for k, v in d.items(): A_ = parent_key + sep + k if parent_key else k if isinstance(__UpperCamelCase ,collections.abc.MutableMapping ): items.extend(flatten_yaml_as_dict(__UpperCamelCase ,__UpperCamelCase ,sep=__UpperCamelCase ).items() ) else: items.append((new_key, v) ) return dict(__UpperCamelCase ) A_ = argparse.Namespace() with open(__UpperCamelCase ,"r" ) as yaml_file: try: A_ = yaml.load(__UpperCamelCase ,Loader=yaml.FullLoader ) A_ = flatten_yaml_as_dict(__UpperCamelCase ) for k, v in flat_cfg.items(): setattr(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) except yaml.YAMLError as exc: logger.error("Error while loading config file: {}. Error message: {}".format(__UpperCamelCase ,str(__UpperCamelCase ) ) ) return config def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Optional[int] ): """simple docstring""" A_ = MobileViTVaConfig() A_ = False # dataset if task_name.startswith("imagenet1k_" ): A_ = 1000 if int(task_name.strip().split("_" )[-1] ) == 384: A_ = 384 else: A_ = 256 A_ = "imagenet-1k-id2label.json" elif task_name.startswith("imagenet21k_to_1k_" ): A_ = 2_1000 if int(task_name.strip().split("_" )[-1] ) == 384: A_ = 384 else: A_ = 256 A_ = "imagenet-22k-id2label.json" elif task_name.startswith("ade20k_" ): A_ = 151 A_ = 512 A_ = "ade20k-id2label.json" A_ = True elif task_name.startswith("voc_" ): A_ = 21 A_ = 512 A_ = "pascal-voc-id2label.json" A_ = True # orig_config A_ = load_orig_config_file(__UpperCamelCase ) assert getattr(__UpperCamelCase ,"model.classification.name" ,-1 ) == "mobilevit_v2", "Invalid model" A_ = getattr(__UpperCamelCase ,"model.classification.mitv2.width_multiplier" ,1.0 ) assert ( getattr(__UpperCamelCase ,"model.classification.mitv2.attn_norm_layer" ,-1 ) == "layer_norm_2d" ), "Norm layers other than layer_norm_2d is not supported" A_ = getattr(__UpperCamelCase ,"model.classification.activation.name" ,"swish" ) # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256) if is_segmentation_model: A_ = getattr(__UpperCamelCase ,"model.segmentation.output_stride" ,16 ) if "_deeplabv3" in task_name: A_ = getattr(__UpperCamelCase ,"model.segmentation.deeplabv3.aspp_rates" ,[12, 24, 36] ) A_ = getattr(__UpperCamelCase ,"model.segmentation.deeplabv3.aspp_out_channels" ,512 ) A_ = getattr(__UpperCamelCase ,"model.segmentation.deeplabv3.aspp_dropout" ,0.1 ) # id2label A_ = "huggingface/label-files" A_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type="dataset" ) ,"r" ) ) A_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()} A_ = idalabel A_ = {v: k for k, v in idalabel.items()} return config def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : List[str] ,__UpperCamelCase : Tuple ): """simple docstring""" A_ = dct.pop(__UpperCamelCase ) A_ = val def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : str=False ): """simple docstring""" if base_model: A_ = "" else: A_ = "mobilevitv2." A_ = [] for k in state_dict.keys(): if k[:8] == "encoder.": A_ = k[8:] else: A_ = k if ".block." in k: A_ = k_new.replace(".block." ,"." ) if ".conv." in k: A_ = k_new.replace(".conv." ,".convolution." ) if ".norm." in k: A_ = k_new.replace(".norm." ,".normalization." ) if "conv_1." in k: A_ = k_new.replace("conv_1." ,f'''{model_prefix}conv_stem.''' ) for i in [1, 2]: if f'''layer_{i}.''' in k: A_ = k_new.replace(f'''layer_{i}.''' ,f'''{model_prefix}encoder.layer.{i-1}.layer.''' ) if ".exp_1x1." in k: A_ = k_new.replace(".exp_1x1." ,".expand_1x1." ) if ".red_1x1." in k: A_ = k_new.replace(".red_1x1." ,".reduce_1x1." ) for i in [3, 4, 5]: if f'''layer_{i}.0.''' in k: A_ = k_new.replace(f'''layer_{i}.0.''' ,f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' ) if f'''layer_{i}.1.local_rep.0.''' in k: A_ = k_new.replace(f'''layer_{i}.1.local_rep.0.''' ,f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' ) if f'''layer_{i}.1.local_rep.1.''' in k: A_ = k_new.replace(f'''layer_{i}.1.local_rep.1.''' ,f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' ) for i in [3, 4, 5]: if i == 3: A_ = [0, 1] elif i == 4: A_ = [0, 1, 2, 3] elif i == 5: A_ = [0, 1, 2] for j in j_in: if f'''layer_{i}.1.global_rep.{j}.''' in k: A_ = k_new.replace( f'''layer_{i}.1.global_rep.{j}.''' ,f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' ) if f'''layer_{i}.1.global_rep.{j+1}.''' in k: A_ = k_new.replace( f'''layer_{i}.1.global_rep.{j+1}.''' ,f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' ) if f'''layer_{i}.1.conv_proj.''' in k: A_ = k_new.replace(f'''layer_{i}.1.conv_proj.''' ,f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' ) if "pre_norm_attn.0." in k: A_ = k_new.replace("pre_norm_attn.0." ,"layernorm_before." ) if "pre_norm_attn.1." in k: A_ = k_new.replace("pre_norm_attn.1." ,"attention." ) if "pre_norm_ffn.0." in k: A_ = k_new.replace("pre_norm_ffn.0." ,"layernorm_after." ) if "pre_norm_ffn.1." in k: A_ = k_new.replace("pre_norm_ffn.1." ,"ffn.conv1." ) if "pre_norm_ffn.3." in k: A_ = k_new.replace("pre_norm_ffn.3." ,"ffn.conv2." ) if "classifier.1." in k: A_ = k_new.replace("classifier.1." ,"classifier." ) if "seg_head." in k: A_ = k_new.replace("seg_head." ,"segmentation_head." ) if ".aspp_layer." in k: A_ = k_new.replace(".aspp_layer." ,"." ) if ".aspp_pool." in k: A_ = k_new.replace(".aspp_pool." ,"." ) rename_keys.append((k, k_new) ) return rename_keys def __snake_case ( __UpperCamelCase : str ): """simple docstring""" A_ = [] for k in state_dict.keys(): if k.startswith("seg_head.aux_head." ): keys_to_ignore.append(__UpperCamelCase ) for k in keys_to_ignore: state_dict.pop(__UpperCamelCase ,__UpperCamelCase ) def __snake_case ( ): """simple docstring""" A_ = "http://images.cocodataset.org/val2017/000000039769.jpg" # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg" A_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw ) return im @torch.no_grad() def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Any ,__UpperCamelCase : List[Any] ): """simple docstring""" A_ = get_mobilevitva_config(__UpperCamelCase ,__UpperCamelCase ) # load original state_dict A_ = torch.load(__UpperCamelCase ,map_location="cpu" ) # load huggingface model if task_name.startswith("ade20k_" ) or task_name.startswith("voc_" ): A_ = MobileViTVaForSemanticSegmentation(__UpperCamelCase ).eval() A_ = False else: A_ = MobileViTVaForImageClassification(__UpperCamelCase ).eval() A_ = False # remove and rename some keys of load the original model A_ = checkpoint remove_unused_keys(__UpperCamelCase ) A_ = create_rename_keys(__UpperCamelCase ,base_model=__UpperCamelCase ) for rename_key_src, rename_key_dest in rename_keys: rename_key(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) # load modified state_dict model.load_state_dict(__UpperCamelCase ) # Check outputs on an image, prepared by MobileViTImageProcessor A_ = MobileViTImageProcessor(crop_size=config.image_size ,size=config.image_size + 32 ) A_ = image_processor(images=prepare_img() ,return_tensors="pt" ) A_ = model(**__UpperCamelCase ) # verify classification model if task_name.startswith("imagenet" ): A_ = outputs.logits A_ = logits.argmax(-1 ).item() print("Predicted class:" ,model.config.idalabel[predicted_class_idx] ) if task_name.startswith("imagenet1k_256" ) and config.width_multiplier == 1.0: # expected_logits for base variant A_ = torch.tensor([-1.6_3_3_6E0_0, -7.3_2_0_4E-0_2, -5.1_8_8_3E-0_1] ) assert torch.allclose(logits[0, :3] ,__UpperCamelCase ,atol=1E-4 ) Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase ) print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__UpperCamelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": __a :Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '--task', default='imagenet1k_256', type=str, help=( 'Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . ' '\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n ' ), choices=[ 'imagenet1k_256', 'imagenet1k_384', 'imagenet21k_to_1k_256', 'imagenet21k_to_1k_384', 'ade20k_deeplabv3', 'voc_deeplabv3', ], ) parser.add_argument( '--orig_checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).' ) parser.add_argument('--orig_config_path', required=True, type=str, help='Path to the original config file.') parser.add_argument( '--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.' ) __a :Optional[Any] = parser.parse_args() convert_mobilevitva_checkpoint( args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path )
312
from typing import Optional, Union import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models.modeling_utils import ModelMixin class _a ( snake_case_ , snake_case_ ): """simple docstring""" @register_to_config def __init__( self : Dict , UpperCAmelCase : int = 768 , ): super().__init__() A_ = nn.Parameter(torch.zeros(1 , UpperCAmelCase ) ) A_ = nn.Parameter(torch.ones(1 , UpperCAmelCase ) ) def __A ( self : str , UpperCAmelCase : Optional[Union[str, torch.device]] = None , UpperCAmelCase : Optional[torch.dtype] = None , ): A_ = nn.Parameter(self.mean.to(UpperCAmelCase ).to(UpperCAmelCase ) ) A_ = nn.Parameter(self.std.to(UpperCAmelCase ).to(UpperCAmelCase ) ) return self def __A ( self : Dict , UpperCAmelCase : List[Any] ): A_ = (embeds - self.mean) * 1.0 / self.std return embeds def __A ( self : int , UpperCAmelCase : int ): A_ = (embeds * self.std) + self.mean return embeds
312
1
from __future__ import annotations def __snake_case ( __UpperCamelCase : int ): """simple docstring""" A_ = str(__UpperCamelCase ) return len(__UpperCamelCase ) == 9 and set(__UpperCamelCase ) == set("123456789" ) def __snake_case ( ): """simple docstring""" for base_num in range(9999 ,4999 ,-1 ): A_ = 10_0002 * base_num if is_9_pandigital(__UpperCamelCase ): return candidate for base_num in range(333 ,99 ,-1 ): A_ = 100_2003 * base_num if is_9_pandigital(__UpperCamelCase ): return candidate return None if __name__ == "__main__": print(F"{solution() = }")
312
from __future__ import annotations import numpy as np from numpy import floataa from numpy.typing import NDArray def __snake_case ( __UpperCamelCase : NDArray[floataa] ,__UpperCamelCase : NDArray[floataa] ,__UpperCamelCase : list[int] ,__UpperCamelCase : int ,): """simple docstring""" A_ , A_ = coefficient_matrix.shape A_ , A_ = constant_matrix.shape if rowsa != colsa: A_ = f'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}''' raise ValueError(__UpperCamelCase ) if colsa != 1: A_ = f'''Constant matrix must be nx1 but received {rowsa}x{colsa}''' raise ValueError(__UpperCamelCase ) if rowsa != rowsa: A_ = ( "Coefficient and constant matrices dimensions must be nxn and nx1 but " f'''received {rowsa}x{colsa} and {rowsa}x{colsa}''' ) raise ValueError(__UpperCamelCase ) if len(__UpperCamelCase ) != rowsa: A_ = ( "Number of initial values must be equal to number of rows in coefficient " f'''matrix but received {len(__UpperCamelCase )} and {rowsa}''' ) raise ValueError(__UpperCamelCase ) if iterations <= 0: raise ValueError("Iterations must be at least 1" ) A_ = np.concatenate( (coefficient_matrix, constant_matrix) ,axis=1 ) A_ , A_ = table.shape strictly_diagonally_dominant(__UpperCamelCase ) # Iterates the whole matrix for given number of times for _ in range(__UpperCamelCase ): A_ = [] for row in range(__UpperCamelCase ): A_ = 0 for col in range(__UpperCamelCase ): if col == row: A_ = table[row][col] elif col == cols - 1: A_ = table[row][col] else: temp += (-1) * table[row][col] * init_val[col] A_ = (temp + val) / denom new_val.append(__UpperCamelCase ) A_ = new_val return [float(__UpperCamelCase ) for i in new_val] def __snake_case ( __UpperCamelCase : NDArray[floataa] ): """simple docstring""" A_ , A_ = table.shape A_ = True for i in range(0 ,__UpperCamelCase ): A_ = 0 for j in range(0 ,cols - 1 ): if i == j: continue else: total += table[i][j] if table[i][i] <= total: raise ValueError("Coefficient matrix is not strictly diagonally dominant" ) return is_diagonally_dominant # Test Cases if __name__ == "__main__": import doctest doctest.testmod()
312
1
import json import multiprocessing import os import re from collections import defaultdict import torch from accelerate import Accelerator from accelerate.utils import set_seed from arguments import HumanEvalArguments from datasets import load_dataset, load_metric from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from tqdm import tqdm import transformers from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList __a :Optional[int] = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif'] class _a ( snake_case_ ): """simple docstring""" def __init__( self : str , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple=None , UpperCAmelCase : Optional[Any]=1 ): A_ = tokenizer A_ = dataset A_ = len(UpperCAmelCase ) if n_tasks is None else n_tasks A_ = n_copies def __iter__( self : Optional[int] ): A_ = [] for task in range(self.n_tasks ): # without strip, the model generate commented codes ... prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() ) A_ = self.tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors="pt" ) for task in range(self.n_tasks ): for _ in range(self.n_copies ): yield { "ids": outputs.input_ids[task], "task_id": task, "input_len": outputs.attention_mask[task].sum(), } class _a ( snake_case_ ): """simple docstring""" def __init__( self : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any ): A_ = start_length A_ = eof_strings A_ = tokenizer def __call__( self : Dict , UpperCAmelCase : Dict , UpperCAmelCase : Any , **UpperCAmelCase : Tuple ): A_ = self.tokenizer.batch_decode(input_ids[:, self.start_length :] ) A_ = [] for decoded_generation in decoded_generations: done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) ) return all(UpperCAmelCase ) def __snake_case ( __UpperCamelCase : Tuple ): """simple docstring""" A_ = re.split("(%s)" % "|".join(__UpperCamelCase ) ,__UpperCamelCase ) # last string should be "" return "".join(string_list[:-2] ) def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : List[str] ,__UpperCamelCase : int ,__UpperCamelCase : Tuple ,__UpperCamelCase : List[Any]=20 ,**__UpperCamelCase : Union[str, Any] ): """simple docstring""" A_ = defaultdict(__UpperCamelCase ) # dict of list of generated tokens for step, batch in tqdm(enumerate(__UpperCamelCase ) ): with torch.no_grad(): A_ = batch["ids"].shape[-1] A_ = accelerator.unwrap_model(__UpperCamelCase ).generate( input_ids=batch["ids"][:, : batch["input_len"]] ,num_return_sequences=__UpperCamelCase ,**__UpperCamelCase ) # each task is generated batch_size times A_ = batch["task_id"].repeat(__UpperCamelCase ) A_ = accelerator.pad_across_processes( __UpperCamelCase ,dim=1 ,pad_index=tokenizer.pad_token_id ) A_ , A_ = accelerator.gather((generated_tokens, generated_tasks) ) A_ = generated_tokens.cpu().numpy() A_ = generated_tasks.cpu().numpy() for task, generated_tokens in zip(__UpperCamelCase ,__UpperCamelCase ): gen_token_dict[task].append(__UpperCamelCase ) A_ = [[] for _ in range(__UpperCamelCase )] for task, generated_tokens in gen_token_dict.items(): for s in generated_tokens: A_ = tokenizer.decode(__UpperCamelCase ,skip_special_tokens=__UpperCamelCase ,clean_up_tokenization_spaces=__UpperCamelCase ) code_gens[task].append(remove_last_block(__UpperCamelCase ) ) return code_gens def __snake_case ( ): """simple docstring""" A_ = HfArgumentParser(__UpperCamelCase ) A_ = parser.parse_args() transformers.logging.set_verbosity_error() # enables code execution in code_eval metric A_ = args.HF_ALLOW_CODE_EVAL # make sure tokenizer plays nice with multiprocessing A_ = "false" if args.num_workers is None: A_ = multiprocessing.cpu_count() # Use dataset load to feed to accelerate A_ = Accelerator() set_seed(args.seed ,device_specific=__UpperCamelCase ) # Load model and tokenizer A_ = AutoTokenizer.from_pretrained(args.model_ckpt ) A_ = tokenizer.eos_token A_ = AutoModelForCausalLM.from_pretrained(args.model_ckpt ) # Generation settings A_ = { "do_sample": args.do_sample, "temperature": args.temperature, "max_new_tokens": args.max_new_tokens, "top_p": args.top_p, "top_k": args.top_k, "stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 ,__UpperCamelCase ,__UpperCamelCase )] ), } # Load evaluation dataset and metric A_ = load_dataset("openai_humaneval" ) A_ = load_metric("code_eval" ) A_ = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] ) A_ = args.n_samples // args.batch_size A_ = TokenizedDataset(__UpperCamelCase ,human_eval["test"] ,n_copies=__UpperCamelCase ,n_tasks=__UpperCamelCase ) # do not confuse args.batch_size, which is actually the num_return_sequences A_ = DataLoader(__UpperCamelCase ,batch_size=1 ) # Run a quick test to see if code evaluation is enabled try: A_ = code_eval_metric.compute(references=[""] ,predictions=[[""]] ) except ValueError as exception: print( "Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`" " flag to enable code evaluation." ) raise exception A_ , A_ = accelerator.prepare(__UpperCamelCase ,__UpperCamelCase ) A_ = complete_code( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,n_tasks=__UpperCamelCase ,batch_size=args.batch_size ,**__UpperCamelCase ,) if accelerator.is_main_process: A_ = [] for task in tqdm(range(__UpperCamelCase ) ): A_ = human_eval["test"][task]["test"] A_ = f'''check({human_eval["test"][task]["entry_point"]})''' references.append("\n" + test_func + "\n" + entry_point ) # Evaluate completions with "code_eval" metric A_ , A_ = code_eval_metric.compute( references=__UpperCamelCase ,predictions=__UpperCamelCase ,num_workers=args.num_workers ) print(f'''Results: {pass_at_k}''' ) # Save results to json file with open(args.output_file ,"w" ) as fp: json.dump(__UpperCamelCase ,__UpperCamelCase ) # For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing # https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script if __name__ == "__main__": main()
312
from unittest import TestCase from datasets import Dataset from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters def __snake_case ( ): """simple docstring""" A_ = { "repo_name": ["test_repo1", "test_repo2", "test_repo3"], "path": ["test_1.py", "test_2.py", "unit_test.py"], "content": ["a " * 20, "a " * 30, "b " * 7], } A_ = Dataset.from_dict(__UpperCamelCase ) return dataset class _a ( snake_case_ ): """simple docstring""" def __A ( self : Union[str, Any] ): A_ = get_dataset() A_ = make_duplicate_clusters(UpperCAmelCase , 0.85 ) self.assertEqual(len(duplicate_clusters[0] ) , 2 ) def __A ( self : List[Any] ): A_ = get_dataset() A_ , A_ = deduplicate_dataset(UpperCAmelCase ) self.assertEqual(len(UpperCAmelCase ) , 2 ) print(UpperCAmelCase ) self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 ) self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , UpperCAmelCase )
312
1
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import AlignProcessor, EfficientNetImageProcessor @require_vision class _a ( unittest.TestCase ): """simple docstring""" def __A ( self : List[str] ): A_ = tempfile.mkdtemp() A_ = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) A_ = { "do_resize": True, "size": 20, "do_center_crop": True, "crop_size": 18, "do_normalize": True, "image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073], "image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711], } A_ = os.path.join(self.tmpdirname , UpperCAmelCase ) with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp: json.dump(UpperCAmelCase , UpperCAmelCase ) def __A ( self : Dict , **UpperCAmelCase : Optional[Any] ): return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase ) def __A ( self : Optional[Any] , **UpperCAmelCase : Optional[Any] ): return BertTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase ) def __A ( self : int , **UpperCAmelCase : Any ): return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase ) def __A ( self : Union[str, Any] ): shutil.rmtree(self.tmpdirname ) def __A ( self : List[str] ): A_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] A_ = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def __A ( self : List[str] ): A_ = self.get_tokenizer() A_ = self.get_rust_tokenizer() A_ = self.get_image_processor() A_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) processor_slow.save_pretrained(self.tmpdirname ) A_ = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase ) A_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) processor_fast.save_pretrained(self.tmpdirname ) A_ = AlignProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase ) self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase ) self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase ) def __A ( self : Any ): A_ = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) A_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) A_ = self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 ) A_ = AlignProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=UpperCAmelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , UpperCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCAmelCase ) def __A ( self : List[Any] ): A_ = self.get_image_processor() A_ = self.get_tokenizer() A_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) A_ = self.prepare_image_inputs() A_ = image_processor(UpperCAmelCase , return_tensors="np" ) A_ = processor(images=UpperCAmelCase , return_tensors="np" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __A ( self : Optional[Any] ): A_ = self.get_image_processor() A_ = self.get_tokenizer() A_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) A_ = "lower newer" A_ = processor(text=UpperCAmelCase ) A_ = tokenizer(UpperCAmelCase , padding="max_length" , max_length=64 ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __A ( self : Tuple ): A_ = self.get_image_processor() A_ = self.get_tokenizer() A_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) A_ = "lower newer" A_ = self.prepare_image_inputs() A_ = processor(text=UpperCAmelCase , images=UpperCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(UpperCAmelCase ): processor() def __A ( self : List[Any] ): A_ = self.get_image_processor() A_ = self.get_tokenizer() A_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) A_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] A_ = processor.batch_decode(UpperCAmelCase ) A_ = tokenizer.batch_decode(UpperCAmelCase ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) def __A ( self : Union[str, Any] ): A_ = self.get_image_processor() A_ = self.get_tokenizer() A_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) A_ = "lower newer" A_ = self.prepare_image_inputs() A_ = processor(text=UpperCAmelCase , images=UpperCAmelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
312
import os from typing import Dict, List, Tuple, TypeVar, Union __a :Any = TypeVar('T') __a :Union[str, Any] = Union[List[T], Tuple[T, ...]] __a :List[str] = Union[T, List[T], Dict[str, T]] __a :Any = Union[str, bytes, os.PathLike]
312
1
import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_mvp import MvpTokenizer __a :Optional[int] = logging.get_logger(__name__) __a :Optional[int] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} # See all MVP models at https://huggingface.co/models?filter=mvp __a :List[Any] = { 'vocab_file': { 'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json', }, 'added_tokens.json': { 'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json', }, 'merges_file': { 'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt', }, 'tokenizer_file': { 'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json', }, } __a :Optional[int] = { 'RUCAIBox/mvp': 1024, } class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : Union[str, Any] = VOCAB_FILES_NAMES _lowerCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase : Tuple = ['input_ids', 'attention_mask'] _lowerCamelCase : str = MvpTokenizer def __init__( self : Tuple , UpperCAmelCase : Dict=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Any=None , UpperCAmelCase : Dict="replace" , UpperCAmelCase : Optional[int]="<s>" , UpperCAmelCase : str="</s>" , UpperCAmelCase : Tuple="</s>" , UpperCAmelCase : Dict="<s>" , UpperCAmelCase : List[str]="<unk>" , UpperCAmelCase : List[str]="<pad>" , UpperCAmelCase : Optional[int]="<mask>" , UpperCAmelCase : List[Any]=False , UpperCAmelCase : List[Any]=True , **UpperCAmelCase : Union[str, Any] , ): super().__init__( UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , errors=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , trim_offsets=UpperCAmelCase , **UpperCAmelCase , ) A_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , UpperCAmelCase ) != add_prefix_space: A_ = getattr(UpperCAmelCase , pre_tok_state.pop("type" ) ) A_ = add_prefix_space A_ = pre_tok_class(**UpperCAmelCase ) A_ = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` A_ = "post_processor" A_ = getattr(self.backend_tokenizer , UpperCAmelCase , UpperCAmelCase ) if tokenizer_component_instance: A_ = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: A_ = tuple(state["sep"] ) if "cls" in state: A_ = tuple(state["cls"] ) A_ = False if state.get("add_prefix_space" , UpperCAmelCase ) != add_prefix_space: A_ = add_prefix_space A_ = True if state.get("trim_offsets" , UpperCAmelCase ) != trim_offsets: A_ = trim_offsets A_ = True if changes_to_apply: A_ = getattr(UpperCAmelCase , state.pop("type" ) ) A_ = component_class(**UpperCAmelCase ) setattr(self.backend_tokenizer , UpperCAmelCase , UpperCAmelCase ) @property def __A ( self : str ): if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def __A ( self : Optional[Any] , UpperCAmelCase : str ): A_ = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else value A_ = value def __A ( self : Union[str, Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Optional[int] ): A_ = kwargs.get("is_split_into_words" , UpperCAmelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*UpperCAmelCase , **UpperCAmelCase ) def __A ( self : List[Any] , *UpperCAmelCase : Dict , **UpperCAmelCase : Optional[Any] ): A_ = kwargs.get("is_split_into_words" , UpperCAmelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*UpperCAmelCase , **UpperCAmelCase ) def __A ( self : int , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ): A_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase ) return tuple(UpperCAmelCase ) def __A ( self : str , UpperCAmelCase : Any , UpperCAmelCase : Any=None ): A_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def __A ( self : Optional[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ): A_ = [self.sep_token_id] A_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
312
__a :Dict = '0.18.2' from .configuration_utils import ConfigMixin from .utils import ( OptionalDependencyNotAvailable, is_flax_available, is_inflect_available, is_invisible_watermark_available, is_k_diffusion_available, is_k_diffusion_version, is_librosa_available, is_note_seq_available, is_onnx_available, is_scipy_available, is_torch_available, is_torchsde_available, is_transformers_available, is_transformers_version, is_unidecode_available, logging, ) try: if not is_onnx_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_onnx_objects import * # noqa F403 else: from .pipelines import OnnxRuntimeModel try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_pt_objects import * # noqa F403 else: from .models import ( AutoencoderKL, ControlNetModel, ModelMixin, PriorTransformer, TaFilmDecoder, TransformeraDModel, UNetaDModel, UNetaDConditionModel, UNetaDModel, UNetaDConditionModel, VQModel, ) from .optimization import ( get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, get_scheduler, ) from .pipelines import ( AudioPipelineOutput, ConsistencyModelPipeline, DanceDiffusionPipeline, DDIMPipeline, DDPMPipeline, DiffusionPipeline, DiTPipeline, ImagePipelineOutput, KarrasVePipeline, LDMPipeline, LDMSuperResolutionPipeline, PNDMPipeline, RePaintPipeline, ScoreSdeVePipeline, ) from .schedulers import ( CMStochasticIterativeScheduler, DDIMInverseScheduler, DDIMParallelScheduler, DDIMScheduler, DDPMParallelScheduler, DDPMScheduler, DEISMultistepScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, HeunDiscreteScheduler, IPNDMScheduler, KarrasVeScheduler, KDPMaAncestralDiscreteScheduler, KDPMaDiscreteScheduler, PNDMScheduler, RePaintScheduler, SchedulerMixin, ScoreSdeVeScheduler, UnCLIPScheduler, UniPCMultistepScheduler, VQDiffusionScheduler, ) from .training_utils import EMAModel try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .schedulers import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .schedulers import DPMSolverSDEScheduler try: if not (is_torch_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipelines import ( AltDiffusionImgaImgPipeline, AltDiffusionPipeline, AudioLDMPipeline, CycleDiffusionPipeline, IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ImageTextPipelineOutput, KandinskyImgaImgPipeline, KandinskyInpaintPipeline, KandinskyPipeline, KandinskyPriorPipeline, KandinskyVaaControlnetImgaImgPipeline, KandinskyVaaControlnetPipeline, KandinskyVaaImgaImgPipeline, KandinskyVaaInpaintPipeline, KandinskyVaaPipeline, KandinskyVaaPriorEmbaEmbPipeline, KandinskyVaaPriorPipeline, LDMTextToImagePipeline, PaintByExamplePipeline, SemanticStableDiffusionPipeline, ShapEImgaImgPipeline, ShapEPipeline, StableDiffusionAttendAndExcitePipeline, StableDiffusionControlNetImgaImgPipeline, StableDiffusionControlNetInpaintPipeline, StableDiffusionControlNetPipeline, StableDiffusionDepthaImgPipeline, StableDiffusionDiffEditPipeline, StableDiffusionImageVariationPipeline, StableDiffusionImgaImgPipeline, StableDiffusionInpaintPipeline, StableDiffusionInpaintPipelineLegacy, StableDiffusionInstructPixaPixPipeline, StableDiffusionLatentUpscalePipeline, StableDiffusionLDMaDPipeline, StableDiffusionModelEditingPipeline, StableDiffusionPanoramaPipeline, StableDiffusionParadigmsPipeline, StableDiffusionPipeline, StableDiffusionPipelineSafe, StableDiffusionPixaPixZeroPipeline, StableDiffusionSAGPipeline, StableDiffusionUpscalePipeline, StableUnCLIPImgaImgPipeline, StableUnCLIPPipeline, TextToVideoSDPipeline, TextToVideoZeroPipeline, UnCLIPImageVariationPipeline, UnCLIPPipeline, UniDiffuserModel, UniDiffuserPipeline, UniDiffuserTextDecoder, VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, VideoToVideoSDPipeline, VQDiffusionPipeline, ) try: if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403 else: from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline try: if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403 else: from .pipelines import StableDiffusionKDiffusionPipeline try: if not (is_torch_available() and is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403 else: from .pipelines import ( OnnxStableDiffusionImgaImgPipeline, OnnxStableDiffusionInpaintPipeline, OnnxStableDiffusionInpaintPipelineLegacy, OnnxStableDiffusionPipeline, OnnxStableDiffusionUpscalePipeline, StableDiffusionOnnxPipeline, ) try: if not (is_torch_available() and is_librosa_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_librosa_objects import * # noqa F403 else: from .pipelines import AudioDiffusionPipeline, Mel try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .pipelines import SpectrogramDiffusionPipeline try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_objects import * # noqa F403 else: from .models.controlnet_flax import FlaxControlNetModel from .models.modeling_flax_utils import FlaxModelMixin from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel from .models.vae_flax import FlaxAutoencoderKL from .pipelines import FlaxDiffusionPipeline from .schedulers import ( FlaxDDIMScheduler, FlaxDDPMScheduler, FlaxDPMSolverMultistepScheduler, FlaxKarrasVeScheduler, FlaxLMSDiscreteScheduler, FlaxPNDMScheduler, FlaxSchedulerMixin, FlaxScoreSdeVeScheduler, ) try: if not (is_flax_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_and_transformers_objects import * # noqa F403 else: from .pipelines import ( FlaxStableDiffusionControlNetPipeline, FlaxStableDiffusionImgaImgPipeline, FlaxStableDiffusionInpaintPipeline, FlaxStableDiffusionPipeline, ) try: if not (is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_note_seq_objects import * # noqa F403 else: from .pipelines import MidiProcessor
312
1
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInstructPixaPixPipeline, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.utils import floats_tensor, load_image, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _a ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" _lowerCamelCase : List[str] = StableDiffusionInstructPixaPixPipeline _lowerCamelCase : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width', 'cross_attention_kwargs'} _lowerCamelCase : Tuple = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS _lowerCamelCase : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS _lowerCamelCase : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS def __A ( self : List[str] ): torch.manual_seed(0 ) A_ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) A_ = PNDMScheduler(skip_prk_steps=UpperCAmelCase ) torch.manual_seed(0 ) A_ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) torch.manual_seed(0 ) A_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) A_ = CLIPTextModel(UpperCAmelCase ) A_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) A_ = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def __A ( self : Union[str, Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any]=0 ): A_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase ) A_ = image.cpu().permute(0 , 2 , 3 , 1 )[0] A_ = Image.fromarray(np.uinta(UpperCAmelCase ) ).convert("RGB" ) if str(UpperCAmelCase ).startswith("mps" ): A_ = torch.manual_seed(UpperCAmelCase ) else: A_ = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase ) A_ = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "image_guidance_scale": 1, "output_type": "numpy", } return inputs def __A ( self : Tuple ): A_ = "cpu" # ensure determinism for the device-dependent torch.Generator A_ = self.get_dummy_components() A_ = StableDiffusionInstructPixaPixPipeline(**UpperCAmelCase ) A_ = sd_pipe.to(UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase ) A_ = self.get_dummy_inputs(UpperCAmelCase ) A_ = sd_pipe(**UpperCAmelCase ).images A_ = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) A_ = np.array([0.7_526, 0.3_750, 0.4_547, 0.6_117, 0.5_866, 0.5_016, 0.4_327, 0.5_642, 0.4_815] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __A ( self : str ): A_ = "cpu" # ensure determinism for the device-dependent torch.Generator A_ = self.get_dummy_components() A_ = StableDiffusionInstructPixaPixPipeline(**UpperCAmelCase ) A_ = sd_pipe.to(UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase ) A_ = self.get_dummy_inputs(UpperCAmelCase ) A_ = "french fries" A_ = sd_pipe(**UpperCAmelCase , negative_prompt=UpperCAmelCase ) A_ = output.images A_ = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) A_ = np.array([0.7_511, 0.3_642, 0.4_553, 0.6_236, 0.5_797, 0.5_013, 0.4_343, 0.5_611, 0.4_831] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __A ( self : List[Any] ): A_ = "cpu" # ensure determinism for the device-dependent torch.Generator A_ = self.get_dummy_components() A_ = StableDiffusionInstructPixaPixPipeline(**UpperCAmelCase ) A_ = sd_pipe.to(UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase ) A_ = self.get_dummy_inputs(UpperCAmelCase ) A_ = [inputs["prompt"]] * 2 A_ = np.array(inputs["image"] ).astype(np.floataa ) / 255.0 A_ = torch.from_numpy(UpperCAmelCase ).unsqueeze(0 ).to(UpperCAmelCase ) A_ = image / 2 + 0.5 A_ = image.permute(0 , 3 , 1 , 2 ) A_ = image.repeat(2 , 1 , 1 , 1 ) A_ = sd_pipe(**UpperCAmelCase ).images A_ = image[-1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) A_ = np.array([0.5_812, 0.5_748, 0.5_222, 0.5_908, 0.5_695, 0.7_174, 0.6_804, 0.5_523, 0.5_579] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __A ( self : Any ): A_ = "cpu" # ensure determinism for the device-dependent torch.Generator A_ = self.get_dummy_components() A_ = EulerAncestralDiscreteScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" ) A_ = StableDiffusionInstructPixaPixPipeline(**UpperCAmelCase ) A_ = sd_pipe.to(UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase ) A_ = self.get_dummy_inputs(UpperCAmelCase ) A_ = sd_pipe(**UpperCAmelCase ).images A_ = image[0, -3:, -3:, -1] A_ = [round(UpperCAmelCase , 4 ) for x in image_slice.flatten().tolist()] print(",".join([str(UpperCAmelCase ) for x in slice] ) ) assert image.shape == (1, 32, 32, 3) A_ = np.array([0.7_417, 0.3_842, 0.4_732, 0.5_776, 0.5_891, 0.5_139, 0.4_052, 0.5_673, 0.4_986] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __A ( self : Dict ): super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def __A ( self : List[str] ): A_ = self.get_dummy_components() A_ = StableDiffusionInstructPixaPixPipeline(**UpperCAmelCase ) A_ = VaeImageProcessor(do_resize=UpperCAmelCase , do_normalize=UpperCAmelCase ) A_ = pipe.to(UpperCAmelCase ) pipe.set_progress_bar_config(disable=UpperCAmelCase ) A_ = pipe(**self.get_dummy_inputs_by_type(UpperCAmelCase , input_image_type="pt" ) )[0] A_ = components["vae"] A_ = self.get_dummy_inputs_by_type(UpperCAmelCase , input_image_type="pt" ) for image_param in self.image_latents_params: if image_param in inputs.keys(): A_ = vae.encode(inputs[image_param] ).latent_dist.mode() A_ = pipe(**UpperCAmelCase )[0] A_ = np.abs(out - out_latents_inputs ).max() self.assertLess(UpperCAmelCase , 1E-4 , "passing latents as image input generate different result from passing image" ) @slow @require_torch_gpu class _a ( unittest.TestCase ): """simple docstring""" def __A ( self : int ): super().tearDown() gc.collect() torch.cuda.empty_cache() def __A ( self : List[str] , UpperCAmelCase : List[Any]=0 ): A_ = torch.manual_seed(UpperCAmelCase ) A_ = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg" ) A_ = { "prompt": "turn him into a cyborg", "image": image, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "image_guidance_scale": 1.0, "output_type": "numpy", } return inputs def __A ( self : str ): A_ = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=UpperCAmelCase ) pipe.to(UpperCAmelCase ) pipe.set_progress_bar_config(disable=UpperCAmelCase ) pipe.enable_attention_slicing() A_ = self.get_inputs() A_ = pipe(**UpperCAmelCase ).images A_ = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) A_ = np.array([0.5_902, 0.6_015, 0.6_027, 0.5_983, 0.6_092, 0.6_061, 0.5_765, 0.5_785, 0.5_555] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def __A ( self : Tuple ): A_ = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=UpperCAmelCase ) A_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(UpperCAmelCase ) pipe.set_progress_bar_config(disable=UpperCAmelCase ) pipe.enable_attention_slicing() A_ = self.get_inputs() A_ = pipe(**UpperCAmelCase ).images A_ = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) A_ = np.array([0.6_578, 0.6_817, 0.6_972, 0.6_761, 0.6_856, 0.6_916, 0.6_428, 0.6_516, 0.6_301] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def __A ( self : Any ): A_ = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=UpperCAmelCase ) A_ = DDIMScheduler.from_config(pipe.scheduler.config ) pipe.to(UpperCAmelCase ) pipe.set_progress_bar_config(disable=UpperCAmelCase ) pipe.enable_attention_slicing() A_ = self.get_inputs() A_ = pipe(**UpperCAmelCase ).images A_ = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) A_ = np.array([0.3_828, 0.3_834, 0.3_818, 0.3_792, 0.3_865, 0.3_752, 0.3_792, 0.3_847, 0.3_753] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def __A ( self : Any ): A_ = 0 def callback_fn(UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : torch.FloatTensor ) -> None: A_ = True nonlocal number_of_steps number_of_steps += 1 if step == 1: A_ = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) A_ = latents[0, -3:, -3:, -1] A_ = np.array([-0.2_463, -0.4_644, -0.9_756, 1.5_176, 1.4_414, 0.7_866, 0.9_897, 0.8_521, 0.7_983] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 elif step == 2: A_ = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) A_ = latents[0, -3:, -3:, -1] A_ = np.array([-0.2_644, -0.4_626, -0.9_653, 1.5_176, 1.4_551, 0.7_686, 0.9_805, 0.8_452, 0.8_115] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 A_ = False A_ = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=UpperCAmelCase , torch_dtype=torch.floataa ) A_ = pipe.to(UpperCAmelCase ) pipe.set_progress_bar_config(disable=UpperCAmelCase ) pipe.enable_attention_slicing() A_ = self.get_inputs() pipe(**UpperCAmelCase , callback=UpperCAmelCase , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def __A ( self : List[str] ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() A_ = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=UpperCAmelCase , torch_dtype=torch.floataa ) A_ = pipe.to(UpperCAmelCase ) pipe.set_progress_bar_config(disable=UpperCAmelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() A_ = self.get_inputs() A_ = pipe(**UpperCAmelCase ) A_ = torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 10**9 def __A ( self : Tuple ): A_ = self.get_inputs() # resize to resolution that is divisible by 8 but not 16 or 32 A_ = inputs["image"].resize((504, 504) ) A_ = "timbrooks/instruct-pix2pix" A_ = StableDiffusionInstructPixaPixPipeline.from_pretrained( UpperCAmelCase , safety_checker=UpperCAmelCase , ) pipe.to(UpperCAmelCase ) pipe.set_progress_bar_config(disable=UpperCAmelCase ) pipe.enable_attention_slicing() A_ = pipe(**UpperCAmelCase ) A_ = output.images[0] A_ = image[255:258, 383:386, -1] assert image.shape == (504, 504, 3) A_ = np.array([0.2_726, 0.2_529, 0.2_664, 0.2_655, 0.2_641, 0.2_642, 0.2_591, 0.2_649, 0.2_590] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
312
def __snake_case ( __UpperCamelCase : int = 1000 ): """simple docstring""" return sum(e for e in range(3 ,__UpperCamelCase ) if e % 3 == 0 or e % 5 == 0 ) if __name__ == "__main__": print(F"{solution() = }")
312
1
import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Union[str, Any]=1 ): """simple docstring""" if n_shave_prefix_segments >= 0: return ".".join(path.split("." )[n_shave_prefix_segments:] ) else: return ".".join(path.split("." )[:n_shave_prefix_segments] ) def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : Tuple=0 ): """simple docstring""" A_ = [] for old_item in old_list: A_ = old_item.replace("in_layers.0" ,"norm1" ) A_ = new_item.replace("in_layers.2" ,"conv1" ) A_ = new_item.replace("out_layers.0" ,"norm2" ) A_ = new_item.replace("out_layers.3" ,"conv2" ) A_ = new_item.replace("emb_layers.1" ,"time_emb_proj" ) A_ = new_item.replace("skip_connection" ,"conv_shortcut" ) A_ = shave_segments(__UpperCamelCase ,n_shave_prefix_segments=__UpperCamelCase ) mapping.append({"old": old_item, "new": new_item} ) return mapping def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Dict=0 ): """simple docstring""" A_ = [] for old_item in old_list: A_ = old_item A_ = new_item.replace("norm.weight" ,"group_norm.weight" ) A_ = new_item.replace("norm.bias" ,"group_norm.bias" ) A_ = new_item.replace("proj_out.weight" ,"proj_attn.weight" ) A_ = new_item.replace("proj_out.bias" ,"proj_attn.bias" ) A_ = shave_segments(__UpperCamelCase ,n_shave_prefix_segments=__UpperCamelCase ) mapping.append({"old": old_item, "new": new_item} ) return mapping def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : List[str] ,__UpperCamelCase : Any=None ,__UpperCamelCase : int=None ,__UpperCamelCase : Dict=None ): """simple docstring""" assert isinstance(__UpperCamelCase ,__UpperCamelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): A_ = old_checkpoint[path] A_ = old_tensor.shape[0] // 3 A_ = (-1, channels) if len(old_tensor.shape ) == 3 else (-1) A_ = old_tensor.shape[0] // config["num_head_channels"] // 3 A_ = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) A_ , A_ , A_ = old_tensor.split(channels // num_heads ,dim=1 ) A_ = query.reshape(__UpperCamelCase ) A_ = key.reshape(__UpperCamelCase ) A_ = value.reshape(__UpperCamelCase ) for path in paths: A_ = path["new"] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here A_ = new_path.replace("middle_block.0" ,"mid_block.resnets.0" ) A_ = new_path.replace("middle_block.1" ,"mid_block.attentions.0" ) A_ = new_path.replace("middle_block.2" ,"mid_block.resnets.1" ) if additional_replacements is not None: for replacement in additional_replacements: A_ = new_path.replace(replacement["old"] ,replacement["new"] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: A_ = old_checkpoint[path["old"]][:, :, 0] else: A_ = old_checkpoint[path["old"]] def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Optional[Any] ): """simple docstring""" A_ = {} A_ = checkpoint["time_embed.0.weight"] A_ = checkpoint["time_embed.0.bias"] A_ = checkpoint["time_embed.2.weight"] A_ = checkpoint["time_embed.2.bias"] A_ = checkpoint["input_blocks.0.0.weight"] A_ = checkpoint["input_blocks.0.0.bias"] A_ = checkpoint["out.0.weight"] A_ = checkpoint["out.0.bias"] A_ = checkpoint["out.2.weight"] A_ = checkpoint["out.2.bias"] # Retrieves the keys for the input blocks only A_ = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "input_blocks" in layer} ) A_ = { layer_id: [key for key in checkpoint if f'''input_blocks.{layer_id}''' in key] for layer_id in range(__UpperCamelCase ) } # Retrieves the keys for the middle blocks only A_ = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "middle_block" in layer} ) A_ = { layer_id: [key for key in checkpoint if f'''middle_block.{layer_id}''' in key] for layer_id in range(__UpperCamelCase ) } # Retrieves the keys for the output blocks only A_ = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "output_blocks" in layer} ) A_ = { layer_id: [key for key in checkpoint if f'''output_blocks.{layer_id}''' in key] for layer_id in range(__UpperCamelCase ) } for i in range(1 ,__UpperCamelCase ): A_ = (i - 1) // (config["num_res_blocks"] + 1) A_ = (i - 1) % (config["num_res_blocks"] + 1) A_ = [key for key in input_blocks[i] if f'''input_blocks.{i}.0''' in key] A_ = [key for key in input_blocks[i] if f'''input_blocks.{i}.1''' in key] if f'''input_blocks.{i}.0.op.weight''' in checkpoint: A_ = checkpoint[ f'''input_blocks.{i}.0.op.weight''' ] A_ = checkpoint[ f'''input_blocks.{i}.0.op.bias''' ] continue A_ = renew_resnet_paths(__UpperCamelCase ) A_ = {"old": f'''input_blocks.{i}.0''', "new": f'''down_blocks.{block_id}.resnets.{layer_in_block_id}'''} A_ = {"old": "resnets.2.op", "new": "downsamplers.0.op"} assign_to_checkpoint( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,additional_replacements=[meta_path, resnet_op] ,config=__UpperCamelCase ) if len(__UpperCamelCase ): A_ = renew_attention_paths(__UpperCamelCase ) A_ = { "old": f'''input_blocks.{i}.1''', "new": f'''down_blocks.{block_id}.attentions.{layer_in_block_id}''', } A_ = { f'''input_blocks.{i}.1.qkv.bias''': { "key": f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''', "query": f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''', "value": f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''', }, f'''input_blocks.{i}.1.qkv.weight''': { "key": f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''', "query": f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''', "value": f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''', }, } assign_to_checkpoint( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,additional_replacements=[meta_path] ,attention_paths_to_split=__UpperCamelCase ,config=__UpperCamelCase ,) A_ = middle_blocks[0] A_ = middle_blocks[1] A_ = middle_blocks[2] A_ = renew_resnet_paths(__UpperCamelCase ) assign_to_checkpoint(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,config=__UpperCamelCase ) A_ = renew_resnet_paths(__UpperCamelCase ) assign_to_checkpoint(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,config=__UpperCamelCase ) A_ = renew_attention_paths(__UpperCamelCase ) A_ = { "middle_block.1.qkv.bias": { "key": "mid_block.attentions.0.key.bias", "query": "mid_block.attentions.0.query.bias", "value": "mid_block.attentions.0.value.bias", }, "middle_block.1.qkv.weight": { "key": "mid_block.attentions.0.key.weight", "query": "mid_block.attentions.0.query.weight", "value": "mid_block.attentions.0.value.weight", }, } assign_to_checkpoint( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,attention_paths_to_split=__UpperCamelCase ,config=__UpperCamelCase ) for i in range(__UpperCamelCase ): A_ = i // (config["num_res_blocks"] + 1) A_ = i % (config["num_res_blocks"] + 1) A_ = [shave_segments(__UpperCamelCase ,2 ) for name in output_blocks[i]] A_ = {} for layer in output_block_layers: A_ , A_ = layer.split("." )[0], shave_segments(__UpperCamelCase ,1 ) if layer_id in output_block_list: output_block_list[layer_id].append(__UpperCamelCase ) else: A_ = [layer_name] if len(__UpperCamelCase ) > 1: A_ = [key for key in output_blocks[i] if f'''output_blocks.{i}.0''' in key] A_ = [key for key in output_blocks[i] if f'''output_blocks.{i}.1''' in key] A_ = renew_resnet_paths(__UpperCamelCase ) A_ = renew_resnet_paths(__UpperCamelCase ) A_ = {"old": f'''output_blocks.{i}.0''', "new": f'''up_blocks.{block_id}.resnets.{layer_in_block_id}'''} assign_to_checkpoint(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,additional_replacements=[meta_path] ,config=__UpperCamelCase ) if ["conv.weight", "conv.bias"] in output_block_list.values(): A_ = list(output_block_list.values() ).index(["conv.weight", "conv.bias"] ) A_ = checkpoint[ f'''output_blocks.{i}.{index}.conv.weight''' ] A_ = checkpoint[ f'''output_blocks.{i}.{index}.conv.bias''' ] # Clear attentions as they have been attributed above. if len(__UpperCamelCase ) == 2: A_ = [] if len(__UpperCamelCase ): A_ = renew_attention_paths(__UpperCamelCase ) A_ = { "old": f'''output_blocks.{i}.1''', "new": f'''up_blocks.{block_id}.attentions.{layer_in_block_id}''', } A_ = { f'''output_blocks.{i}.1.qkv.bias''': { "key": f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''', "query": f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''', "value": f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''', }, f'''output_blocks.{i}.1.qkv.weight''': { "key": f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''', "query": f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''', "value": f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''', }, } assign_to_checkpoint( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,additional_replacements=[meta_path] ,attention_paths_to_split=to_split if any("qkv" in key for key in attentions ) else None ,config=__UpperCamelCase ,) else: A_ = renew_resnet_paths(__UpperCamelCase ,n_shave_prefix_segments=1 ) for path in resnet_0_paths: A_ = ".".join(["output_blocks", str(__UpperCamelCase ), path["old"]] ) A_ = ".".join(["up_blocks", str(__UpperCamelCase ), "resnets", str(__UpperCamelCase ), path["new"]] ) A_ = checkpoint[old_path] return new_checkpoint if __name__ == "__main__": __a :Dict = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the architecture.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') __a :str = parser.parse_args() __a :Optional[Any] = torch.load(args.checkpoint_path) with open(args.config_file) as f: __a :int = json.loads(f.read()) __a :Any = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] __a :Dict = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: __a :List[str] = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1])) __a :Optional[Any] = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1])) __a :str = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
312
import unittest from typing import Tuple import torch from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device from diffusers.utils.testing_utils import require_torch @require_torch class _a : """simple docstring""" @property def __A ( self : Union[str, Any] ): return self.get_dummy_input() @property def __A ( self : int ): if self.block_type == "down": return (4, 32, 16, 16) elif self.block_type == "mid": return (4, 32, 32, 32) elif self.block_type == "up": return (4, 32, 64, 64) raise ValueError(f'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' ) def __A ( self : Union[str, Any] , UpperCAmelCase : List[Any]=True , UpperCAmelCase : str=False , UpperCAmelCase : Tuple=False , UpperCAmelCase : Optional[Any]=False , ): A_ = 4 A_ = 32 A_ = (32, 32) A_ = torch.manual_seed(0 ) A_ = torch.device(UpperCAmelCase ) A_ = (batch_size, num_channels) + sizes A_ = randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=UpperCAmelCase ) A_ = {"hidden_states": hidden_states} if include_temb: A_ = 128 A_ = randn_tensor((batch_size, temb_channels) , generator=UpperCAmelCase , device=UpperCAmelCase ) if include_res_hidden_states_tuple: A_ = torch.manual_seed(1 ) A_ = (randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=UpperCAmelCase ),) if include_encoder_hidden_states: A_ = floats_tensor((batch_size, 32, 32) ).to(UpperCAmelCase ) if include_skip_sample: A_ = randn_tensor(((batch_size, 3) + sizes) , generator=UpperCAmelCase , device=UpperCAmelCase ) return dummy_input def __A ( self : Optional[int] ): A_ = { "in_channels": 32, "out_channels": 32, "temb_channels": 128, } if self.block_type == "up": A_ = 32 if self.block_type == "mid": init_dict.pop("out_channels" ) A_ = self.dummy_input return init_dict, inputs_dict def __A ( self : List[str] , UpperCAmelCase : Optional[Any] ): A_ , A_ = self.prepare_init_args_and_inputs_for_common() A_ = self.block_class(**UpperCAmelCase ) unet_block.to(UpperCAmelCase ) unet_block.eval() with torch.no_grad(): A_ = unet_block(**UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ): A_ = output[0] self.assertEqual(output.shape , self.output_shape ) A_ = output[0, -1, -3:, -3:] A_ = torch.tensor(UpperCAmelCase ).to(UpperCAmelCase ) assert torch_all_close(output_slice.flatten() , UpperCAmelCase , atol=5E-3 ) @unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" ) def __A ( self : Union[str, Any] ): A_ , A_ = self.prepare_init_args_and_inputs_for_common() A_ = self.block_class(**UpperCAmelCase ) model.to(UpperCAmelCase ) model.train() A_ = model(**UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ): A_ = output[0] A_ = torch.device(UpperCAmelCase ) A_ = randn_tensor(output.shape , device=UpperCAmelCase ) A_ = torch.nn.functional.mse_loss(UpperCAmelCase , UpperCAmelCase ) loss.backward()
312
1
import re def __snake_case ( __UpperCamelCase : str ): """simple docstring""" A_ = re.compile( R"^(?:0|94|\+94|0{2}94)" R"7(0|1|2|4|5|6|7|8)" R"(-| |)" R"\d{7}$" ) return bool(re.search(__UpperCamelCase ,__UpperCamelCase ) ) if __name__ == "__main__": __a :Optional[int] = '0094702343221' print(is_sri_lankan_phone_number(phone))
312
import copy import fnmatch import json import os import pickle as pkl import shutil import sys import tarfile import tempfile from collections import OrderedDict from contextlib import contextmanager from functools import partial from hashlib import shaaaa from io import BytesIO from pathlib import Path from urllib.parse import urlparse from zipfile import ZipFile, is_zipfile import cva import numpy as np import requests import wget from filelock import FileLock from PIL import Image from tqdm.auto import tqdm from yaml import Loader, dump, load try: import torch __a :int = True except ImportError: __a :Optional[Any] = False try: from torch.hub import _get_torch_home __a :Optional[Any] = _get_torch_home() except ImportError: __a :Tuple = os.path.expanduser( os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch')) ) __a :Optional[Any] = os.path.join(torch_cache_home, 'transformers') __a :int = 'https://cdn.huggingface.co' __a :Any = 'https://s3.amazonaws.com/models.huggingface.co/bert' __a :Optional[Any] = '/'.join(str(Path(__file__).resolve()).split('/')[:-1]) __a :str = os.path.join(PATH, 'config.yaml') __a :str = os.path.join(PATH, 'attributes.txt') __a :Optional[Any] = os.path.join(PATH, 'objects.txt') __a :Optional[int] = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path) __a :Dict = os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE) __a :List[Any] = os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE) __a :List[str] = 'pytorch_model.bin' __a :Tuple = 'config.yaml' def __snake_case ( __UpperCamelCase : Optional[Any]=OBJECTS ,__UpperCamelCase : List[str]=ATTRIBUTES ): """simple docstring""" A_ = [] with open(__UpperCamelCase ) as f: for object in f.readlines(): vg_classes.append(object.split("," )[0].lower().strip() ) A_ = [] with open(__UpperCamelCase ) as f: for object in f.readlines(): vg_attrs.append(object.split("," )[0].lower().strip() ) return vg_classes, vg_attrs def __snake_case ( __UpperCamelCase : List[Any] ): """simple docstring""" A_ = OrderedDict() with open(__UpperCamelCase ,"rb" ) as f: A_ = pkl.load(__UpperCamelCase )["model"] for k in copy.deepcopy(list(ckp.keys() ) ): A_ = ckp.pop(__UpperCamelCase ) if isinstance(__UpperCamelCase ,np.ndarray ): A_ = torch.tensor(__UpperCamelCase ) else: assert isinstance(__UpperCamelCase ,torch.tensor ), type(__UpperCamelCase ) A_ = v return r class _a : """simple docstring""" _lowerCamelCase : Union[str, Any] = {} def __init__( self : str , UpperCAmelCase : dict , UpperCAmelCase : str = "root" , UpperCAmelCase : List[str]=0 ): A_ = name A_ = level A_ = {} for k, v in dictionary.items(): if v is None: raise ValueError() A_ = copy.deepcopy(UpperCAmelCase ) A_ = copy.deepcopy(UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ): A_ = Config(UpperCAmelCase , name=UpperCAmelCase , level=level + 1 ) A_ = v setattr(self , UpperCAmelCase , UpperCAmelCase ) A_ = d def __repr__( self : Optional[Any] ): return str(list((self._pointer.keys()) ) ) def __setattr__( self : Any , UpperCAmelCase : Any , UpperCAmelCase : Any ): A_ = val A_ = val A_ = key.split("." ) A_ = len(UpperCAmelCase ) - 1 A_ = self._pointer if len(UpperCAmelCase ) > 1: for i, l in enumerate(UpperCAmelCase ): if hasattr(self , UpperCAmelCase ) and isinstance(getattr(self , UpperCAmelCase ) , UpperCAmelCase ): setattr(getattr(self , UpperCAmelCase ) , ".".join(levels[i:] ) , UpperCAmelCase ) if l == last_level: A_ = val else: A_ = pointer[l] def __A ( self : List[str] ): return self._pointer def __A ( self : int , UpperCAmelCase : Tuple , UpperCAmelCase : int ): with open(f'''{file_name}''' , "w" ) as stream: dump(UpperCAmelCase , UpperCAmelCase ) def __A ( self : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Tuple ): with open(f'''{file_name}''' , "w" ) as stream: json.dump(UpperCAmelCase , UpperCAmelCase ) @staticmethod def __A ( UpperCAmelCase : Optional[int] ): with open(UpperCAmelCase ) as stream: A_ = load(UpperCAmelCase , Loader=UpperCAmelCase ) return data def __str__( self : str ): A_ = " " if self._name != "root": A_ = f'''{t * (self._level-1)}{self._name}:\n''' else: A_ = "" A_ = self._level for i, (k, v) in enumerate(self._pointer.items() ): if isinstance(UpperCAmelCase , UpperCAmelCase ): r += f'''{t * (self._level)}{v}\n''' self._level += 1 else: r += f'''{t * (self._level)}{k}: {v} ({type(UpperCAmelCase ).__name__})\n''' A_ = level return r[:-1] @classmethod def __A ( cls : Optional[Any] , UpperCAmelCase : str , **UpperCAmelCase : str ): A_ , A_ = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase ) return cls(UpperCAmelCase ) @classmethod def __A ( cls : int , UpperCAmelCase : str , **UpperCAmelCase : int ): A_ = kwargs.pop("cache_dir" , UpperCAmelCase ) A_ = kwargs.pop("force_download" , UpperCAmelCase ) A_ = kwargs.pop("resume_download" , UpperCAmelCase ) A_ = kwargs.pop("proxies" , UpperCAmelCase ) A_ = kwargs.pop("local_files_only" , UpperCAmelCase ) if os.path.isdir(UpperCAmelCase ): A_ = os.path.join(UpperCAmelCase , UpperCAmelCase ) elif os.path.isfile(UpperCAmelCase ) or is_remote_url(UpperCAmelCase ): A_ = pretrained_model_name_or_path else: A_ = hf_bucket_url(UpperCAmelCase , filename=UpperCAmelCase , use_cdn=UpperCAmelCase ) try: # Load from URL or cache if already cached A_ = cached_path( UpperCAmelCase , cache_dir=UpperCAmelCase , force_download=UpperCAmelCase , proxies=UpperCAmelCase , resume_download=UpperCAmelCase , local_files_only=UpperCAmelCase , ) # Load config dict if resolved_config_file is None: raise EnvironmentError A_ = Config.load_yaml(UpperCAmelCase ) except EnvironmentError: A_ = "Can't load config for" raise EnvironmentError(UpperCAmelCase ) if resolved_config_file == config_file: print("loading configuration file from path" ) else: print("loading configuration file cache" ) return Config.load_yaml(UpperCAmelCase ), kwargs def __snake_case ( __UpperCamelCase : Union[str, Any] ): """simple docstring""" A_ = torch.load("dump.pt" ,map_location=in_tensor.device ) A_ = in_tensor.numpy() A_ = out_tensor.numpy()[0] print(na.shape ,na[0, 0, :5] ) print(na.shape ,na[0, 0, :5] ) assert np.allclose(__UpperCamelCase ,__UpperCamelCase ,rtol=0.01 ,atol=0.1 ), ( f'''{sum([1 for x in np.isclose(__UpperCamelCase ,__UpperCamelCase ,rtol=0.01 ,atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %''' " element-wise mismatch" ) raise Exception("tensors are all good" ) # Hugging face functions below def __snake_case ( __UpperCamelCase : Optional[int] ): """simple docstring""" A_ = urlparse(__UpperCamelCase ) return parsed.scheme in ("http", "https") def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ,__UpperCamelCase : str=True ): """simple docstring""" A_ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX A_ = "/" not in model_id if legacy_format: return f'''{endpoint}/{model_id}-{filename}''' else: return f'''{endpoint}/{model_id}/{filename}''' def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str]=None ,__UpperCamelCase : int=0 ,__UpperCamelCase : int=None ,): """simple docstring""" A_ = "python/{}".format(sys.version.split()[0] ) if _torch_available: ua += "; torch/{}".format(torch.__version__ ) if isinstance(__UpperCamelCase ,__UpperCamelCase ): ua += "; " + "; ".join("{}/{}".format(__UpperCamelCase ,__UpperCamelCase ) for k, v in user_agent.items() ) elif isinstance(__UpperCamelCase ,__UpperCamelCase ): ua += "; " + user_agent A_ = {"user-agent": ua} if resume_size > 0: A_ = "bytes=%d-" % (resume_size,) A_ = requests.get(__UpperCamelCase ,stream=__UpperCamelCase ,proxies=__UpperCamelCase ,headers=__UpperCamelCase ) if response.status_code == 416: # Range not satisfiable return A_ = response.headers.get("Content-Length" ) A_ = resume_size + int(__UpperCamelCase ) if content_length is not None else None A_ = tqdm( unit="B" ,unit_scale=__UpperCamelCase ,total=__UpperCamelCase ,initial=__UpperCamelCase ,desc="Downloading" ,) for chunk in response.iter_content(chunk_size=1024 ): if chunk: # filter out keep-alive new chunks progress.update(len(__UpperCamelCase ) ) temp_file.write(__UpperCamelCase ) progress.close() def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Any=None ,__UpperCamelCase : Dict=False ,__UpperCamelCase : Union[str, Any]=None ,__UpperCamelCase : Any=10 ,__UpperCamelCase : int=False ,__UpperCamelCase : Optional[Any]=None ,__UpperCamelCase : str=False ,): """simple docstring""" if cache_dir is None: A_ = TRANSFORMERS_CACHE if isinstance(__UpperCamelCase ,__UpperCamelCase ): A_ = str(__UpperCamelCase ) os.makedirs(__UpperCamelCase ,exist_ok=__UpperCamelCase ) A_ = None if not local_files_only: try: A_ = requests.head(__UpperCamelCase ,allow_redirects=__UpperCamelCase ,proxies=__UpperCamelCase ,timeout=__UpperCamelCase ) if response.status_code == 200: A_ = response.headers.get("ETag" ) except (EnvironmentError, requests.exceptions.Timeout): # etag is already None pass A_ = url_to_filename(__UpperCamelCase ,__UpperCamelCase ) # get cache path to put the file A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase ) # etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible. # try to get the last downloaded one if etag is None: if os.path.exists(__UpperCamelCase ): return cache_path else: A_ = [ file for file in fnmatch.filter(os.listdir(__UpperCamelCase ) ,filename + ".*" ) if not file.endswith(".json" ) and not file.endswith(".lock" ) ] if len(__UpperCamelCase ) > 0: return os.path.join(__UpperCamelCase ,matching_files[-1] ) else: # If files cannot be found and local_files_only=True, # the models might've been found if local_files_only=False # Notify the user about that if local_files_only: raise ValueError( "Cannot find the requested files in the cached path and outgoing traffic has been" " disabled. To enable model look-ups and downloads online, set 'local_files_only'" " to False." ) return None # From now on, etag is not None. if os.path.exists(__UpperCamelCase ) and not force_download: return cache_path # Prevent parallel downloads of the same file with a lock. A_ = cache_path + ".lock" with FileLock(__UpperCamelCase ): # If the download just completed while the lock was activated. if os.path.exists(__UpperCamelCase ) and not force_download: # Even if returning early like here, the lock will be released. return cache_path if resume_download: A_ = cache_path + ".incomplete" @contextmanager def _resumable_file_manager(): with open(__UpperCamelCase ,"a+b" ) as f: yield f A_ = _resumable_file_manager if os.path.exists(__UpperCamelCase ): A_ = os.stat(__UpperCamelCase ).st_size else: A_ = 0 else: A_ = partial(tempfile.NamedTemporaryFile ,dir=__UpperCamelCase ,delete=__UpperCamelCase ) A_ = 0 # Download to temporary file, then copy to cache dir once finished. # Otherwise you get corrupt cache entries if the download gets interrupted. with temp_file_manager() as temp_file: print( "%s not found in cache or force_download set to True, downloading to %s" ,__UpperCamelCase ,temp_file.name ,) http_get( __UpperCamelCase ,__UpperCamelCase ,proxies=__UpperCamelCase ,resume_size=__UpperCamelCase ,user_agent=__UpperCamelCase ,) os.replace(temp_file.name ,__UpperCamelCase ) A_ = {"url": url, "etag": etag} A_ = cache_path + ".json" with open(__UpperCamelCase ,"w" ) as meta_file: json.dump(__UpperCamelCase ,__UpperCamelCase ) return cache_path def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : str=None ): """simple docstring""" A_ = url.encode("utf-8" ) A_ = shaaaa(__UpperCamelCase ) A_ = url_hash.hexdigest() if etag: A_ = etag.encode("utf-8" ) A_ = shaaaa(__UpperCamelCase ) filename += "." + etag_hash.hexdigest() if url.endswith(".h5" ): filename += ".h5" return filename def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Union[str, Any]=None ,__UpperCamelCase : List[Any]=False ,__UpperCamelCase : List[str]=None ,__UpperCamelCase : Any=False ,__UpperCamelCase : Optional[int]=None ,__UpperCamelCase : Optional[Any]=False ,__UpperCamelCase : Dict=False ,__UpperCamelCase : Optional[Any]=False ,): """simple docstring""" if cache_dir is None: A_ = TRANSFORMERS_CACHE if isinstance(__UpperCamelCase ,__UpperCamelCase ): A_ = str(__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ): A_ = str(__UpperCamelCase ) if is_remote_url(__UpperCamelCase ): # URL, so get it from the cache (downloading if necessary) A_ = get_from_cache( __UpperCamelCase ,cache_dir=__UpperCamelCase ,force_download=__UpperCamelCase ,proxies=__UpperCamelCase ,resume_download=__UpperCamelCase ,user_agent=__UpperCamelCase ,local_files_only=__UpperCamelCase ,) elif os.path.exists(__UpperCamelCase ): # File, and it exists. A_ = url_or_filename elif urlparse(__UpperCamelCase ).scheme == "": # File, but it doesn't exist. raise EnvironmentError("file {} not found".format(__UpperCamelCase ) ) else: # Something unknown raise ValueError("unable to parse {} as a URL or as a local path".format(__UpperCamelCase ) ) if extract_compressed_file: if not is_zipfile(__UpperCamelCase ) and not tarfile.is_tarfile(__UpperCamelCase ): return output_path # Path where we extract compressed archives # We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/" A_ , A_ = os.path.split(__UpperCamelCase ) A_ = output_file.replace("." ,"-" ) + "-extracted" A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase ) if os.path.isdir(__UpperCamelCase ) and os.listdir(__UpperCamelCase ) and not force_extract: return output_path_extracted # Prevent parallel extractions A_ = output_path + ".lock" with FileLock(__UpperCamelCase ): shutil.rmtree(__UpperCamelCase ,ignore_errors=__UpperCamelCase ) os.makedirs(__UpperCamelCase ) if is_zipfile(__UpperCamelCase ): with ZipFile(__UpperCamelCase ,"r" ) as zip_file: zip_file.extractall(__UpperCamelCase ) zip_file.close() elif tarfile.is_tarfile(__UpperCamelCase ): A_ = tarfile.open(__UpperCamelCase ) tar_file.extractall(__UpperCamelCase ) tar_file.close() else: raise EnvironmentError("Archive format of {} could not be identified".format(__UpperCamelCase ) ) return output_path_extracted return output_path def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Any="," ): """simple docstring""" assert isinstance(__UpperCamelCase ,__UpperCamelCase ) if os.path.isfile(__UpperCamelCase ): with open(__UpperCamelCase ) as f: A_ = eval(f.read() ) else: A_ = requests.get(__UpperCamelCase ) try: A_ = requests.json() except Exception: A_ = req.content.decode() assert data is not None, "could not connect" try: A_ = eval(__UpperCamelCase ) except Exception: A_ = data.split("\n" ) req.close() return data def __snake_case ( __UpperCamelCase : int ): """simple docstring""" A_ = requests.get(__UpperCamelCase ) A_ = np.array(Image.open(BytesIO(response.content ) ) ) return img def __snake_case ( __UpperCamelCase : Tuple ): """simple docstring""" A_ = url.split("/" )[-1] if fn not in os.listdir(os.getcwd() ): wget.download(__UpperCamelCase ) with open(__UpperCamelCase ,"rb" ) as stream: A_ = pkl.load(__UpperCamelCase ) A_ = weights.pop("model" ) A_ = {} for k, v in model.items(): A_ = torch.from_numpy(__UpperCamelCase ) if "running_var" in k: A_ = torch.tensor([0] ) A_ = k.replace("running_var" ,"num_batches_tracked" ) A_ = zero return new def __snake_case ( ): """simple docstring""" print(f'''{os.path.abspath(os.path.join(__UpperCamelCase ,os.pardir ) )}/demo.ipynb''' ) def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Optional[int]="RGB" ): """simple docstring""" assert isinstance(__UpperCamelCase ,__UpperCamelCase ) if os.path.isfile(__UpperCamelCase ): A_ = cva.imread(__UpperCamelCase ) else: A_ = get_image_from_url(__UpperCamelCase ) assert img is not None, f'''could not connect to: {im}''' A_ = cva.cvtColor(__UpperCamelCase ,cva.COLOR_BGR2RGB ) if input_format == "RGB": A_ = img[:, :, ::-1] return img def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : List[str]=1 ): """simple docstring""" return (images[i : i + batch] for i in range(0 ,len(__UpperCamelCase ) ,__UpperCamelCase ))
312
1
import inspect from typing import Optional, Union import numpy as np import PIL import torch from torch.nn import functional as F from torchvision import transforms from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.utils import ( PIL_INTERPOLATION, randn_tensor, ) def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : str ,__UpperCamelCase : Any ): """simple docstring""" if isinstance(__UpperCamelCase ,torch.Tensor ): return image elif isinstance(__UpperCamelCase ,PIL.Image.Image ): A_ = [image] if isinstance(image[0] ,PIL.Image.Image ): A_ = [np.array(i.resize((w, h) ,resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image] A_ = np.concatenate(__UpperCamelCase ,axis=0 ) A_ = np.array(__UpperCamelCase ).astype(np.floataa ) / 255.0 A_ = image.transpose(0 ,3 ,1 ,2 ) A_ = 2.0 * image - 1.0 A_ = torch.from_numpy(__UpperCamelCase ) elif isinstance(image[0] ,torch.Tensor ): A_ = torch.cat(__UpperCamelCase ,dim=0 ) return image def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : List[str] ,__UpperCamelCase : List[Any] ,__UpperCamelCase : Any=0.9995 ): """simple docstring""" if not isinstance(__UpperCamelCase ,np.ndarray ): A_ = True A_ = va.device A_ = va.cpu().numpy() A_ = va.cpu().numpy() A_ = np.sum(va * va / (np.linalg.norm(__UpperCamelCase ) * np.linalg.norm(__UpperCamelCase )) ) if np.abs(__UpperCamelCase ) > DOT_THRESHOLD: A_ = (1 - t) * va + t * va else: A_ = np.arccos(__UpperCamelCase ) A_ = np.sin(__UpperCamelCase ) A_ = theta_a * t A_ = np.sin(__UpperCamelCase ) A_ = np.sin(theta_a - theta_t ) / sin_theta_a A_ = sin_theta_t / sin_theta_a A_ = sa * va + sa * va if inputs_are_torch: A_ = torch.from_numpy(__UpperCamelCase ).to(__UpperCamelCase ) return va def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : str ): """simple docstring""" A_ = F.normalize(__UpperCamelCase ,dim=-1 ) A_ = F.normalize(__UpperCamelCase ,dim=-1 ) return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 ) def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Tuple ): """simple docstring""" for param in model.parameters(): A_ = value class _a ( snake_case_ ): """simple docstring""" def __init__( self : Union[str, Any] , UpperCAmelCase : AutoencoderKL , UpperCAmelCase : CLIPTextModel , UpperCAmelCase : CLIPModel , UpperCAmelCase : CLIPTokenizer , UpperCAmelCase : UNetaDConditionModel , UpperCAmelCase : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , UpperCAmelCase : CLIPFeatureExtractor , UpperCAmelCase : int=None , UpperCAmelCase : int=None , UpperCAmelCase : int=None , ): super().__init__() self.register_modules( vae=UpperCAmelCase , text_encoder=UpperCAmelCase , clip_model=UpperCAmelCase , tokenizer=UpperCAmelCase , unet=UpperCAmelCase , scheduler=UpperCAmelCase , feature_extractor=UpperCAmelCase , coca_model=UpperCAmelCase , coca_tokenizer=UpperCAmelCase , coca_transform=UpperCAmelCase , ) A_ = ( feature_extractor.size if isinstance(feature_extractor.size , UpperCAmelCase ) else feature_extractor.size["shortest_edge"] ) A_ = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std ) set_requires_grad(self.text_encoder , UpperCAmelCase ) set_requires_grad(self.clip_model , UpperCAmelCase ) def __A ( self : Union[str, Any] , UpperCAmelCase : Optional[Union[str, int]] = "auto" ): if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory A_ = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(UpperCAmelCase ) def __A ( self : Union[str, Any] ): self.enable_attention_slicing(UpperCAmelCase ) def __A ( self : Optional[Any] ): set_requires_grad(self.vae , UpperCAmelCase ) def __A ( self : Optional[int] ): set_requires_grad(self.vae , UpperCAmelCase ) def __A ( self : Optional[Any] ): set_requires_grad(self.unet , UpperCAmelCase ) def __A ( self : Union[str, Any] ): set_requires_grad(self.unet , UpperCAmelCase ) def __A ( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Any ): # get the original timestep using init_timestep A_ = min(int(num_inference_steps * strength ) , UpperCAmelCase ) A_ = max(num_inference_steps - init_timestep , 0 ) A_ = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def __A ( self : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any]=None ): if not isinstance(UpperCAmelCase , torch.Tensor ): raise ValueError(f'''`image` has to be of type `torch.Tensor` but is {type(UpperCAmelCase )}''' ) A_ = image.to(device=UpperCAmelCase , dtype=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ): A_ = [ self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(UpperCAmelCase ) ] A_ = torch.cat(UpperCAmelCase , dim=0 ) else: A_ = self.vae.encode(UpperCAmelCase ).latent_dist.sample(UpperCAmelCase ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor A_ = 0.18_215 * init_latents A_ = init_latents.repeat_interleave(UpperCAmelCase , dim=0 ) A_ = randn_tensor(init_latents.shape , generator=UpperCAmelCase , device=UpperCAmelCase , dtype=UpperCAmelCase ) # get latents A_ = self.scheduler.add_noise(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) A_ = init_latents return latents def __A ( self : Optional[int] , UpperCAmelCase : Union[str, Any] ): A_ = self.coca_transform(UpperCAmelCase ).unsqueeze(0 ) with torch.no_grad(), torch.cuda.amp.autocast(): A_ = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) ) A_ = self.coca_tokenizer.decode(generated[0].cpu().numpy() ) return generated.split("<end_of_text>" )[0].replace("<start_of_text>" , "" ).rstrip(" .," ) def __A ( self : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int ): A_ = self.feature_extractor.preprocess(UpperCAmelCase ) A_ = torch.from_numpy(clip_image_input["pixel_values"][0] ).unsqueeze(0 ).to(self.device ).half() A_ = self.clip_model.get_image_features(UpperCAmelCase ) A_ = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=UpperCAmelCase ) A_ = image_embeddings_clip.repeat_interleave(UpperCAmelCase , dim=0 ) return image_embeddings_clip @torch.enable_grad() def __A ( self : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : int , ): A_ = latents.detach().requires_grad_() A_ = self.scheduler.scale_model_input(UpperCAmelCase , UpperCAmelCase ) # predict the noise residual A_ = self.unet(UpperCAmelCase , UpperCAmelCase , encoder_hidden_states=UpperCAmelCase ).sample if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ): A_ = self.scheduler.alphas_cumprod[timestep] A_ = 1 - alpha_prod_t # compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf A_ = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5 A_ = torch.sqrt(UpperCAmelCase ) A_ = pred_original_sample * (fac) + latents * (1 - fac) elif isinstance(self.scheduler , UpperCAmelCase ): A_ = self.scheduler.sigmas[index] A_ = latents - sigma * noise_pred else: raise ValueError(f'''scheduler type {type(self.scheduler )} not supported''' ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor A_ = 1 / 0.18_215 * sample A_ = self.vae.decode(UpperCAmelCase ).sample A_ = (image / 2 + 0.5).clamp(0 , 1 ) A_ = transforms.Resize(self.feature_extractor_size )(UpperCAmelCase ) A_ = self.normalize(UpperCAmelCase ).to(latents.dtype ) A_ = self.clip_model.get_image_features(UpperCAmelCase ) A_ = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=UpperCAmelCase ) A_ = spherical_dist_loss(UpperCAmelCase , UpperCAmelCase ).mean() * clip_guidance_scale A_ = -torch.autograd.grad(UpperCAmelCase , UpperCAmelCase )[0] if isinstance(self.scheduler , UpperCAmelCase ): A_ = latents.detach() + grads * (sigma**2) A_ = noise_pred_original else: A_ = noise_pred_original - torch.sqrt(UpperCAmelCase ) * grads return noise_pred, latents @torch.no_grad() def __call__( self : Union[str, Any] , UpperCAmelCase : Union[torch.FloatTensor, PIL.Image.Image] , UpperCAmelCase : Union[torch.FloatTensor, PIL.Image.Image] , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[int] = 512 , UpperCAmelCase : Optional[int] = 512 , UpperCAmelCase : float = 0.6 , UpperCAmelCase : Optional[int] = 50 , UpperCAmelCase : Optional[float] = 7.5 , UpperCAmelCase : Optional[int] = 1 , UpperCAmelCase : float = 0.0 , UpperCAmelCase : Optional[float] = 100 , UpperCAmelCase : Optional[torch.Generator] = None , UpperCAmelCase : Optional[str] = "pil" , UpperCAmelCase : bool = True , UpperCAmelCase : float = 0.8 , UpperCAmelCase : float = 0.1 , UpperCAmelCase : float = 0.1 , ): if isinstance(UpperCAmelCase , UpperCAmelCase ) and len(UpperCAmelCase ) != batch_size: raise ValueError(f'''You have passed {batch_size} batch_size, but only {len(UpperCAmelCase )} generators.''' ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' ) if isinstance(UpperCAmelCase , torch.Generator ) and batch_size > 1: A_ = [generator] + [None] * (batch_size - 1) A_ = [ ("model", self.coca_model is None), ("tokenizer", self.coca_tokenizer is None), ("transform", self.coca_transform is None), ] A_ = [x[0] for x in coca_is_none if x[1]] A_ = ", ".join(UpperCAmelCase ) # generate prompts with coca model if prompt is None if content_prompt is None: if len(UpperCAmelCase ): raise ValueError( f'''Content prompt is None and CoCa [{coca_is_none_str}] is None.''' f'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' ) A_ = self.get_image_description(UpperCAmelCase ) if style_prompt is None: if len(UpperCAmelCase ): raise ValueError( f'''Style prompt is None and CoCa [{coca_is_none_str}] is None.''' f''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' ) A_ = self.get_image_description(UpperCAmelCase ) # get prompt text embeddings for content and style A_ = self.tokenizer( UpperCAmelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=UpperCAmelCase , return_tensors="pt" , ) A_ = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0] A_ = self.tokenizer( UpperCAmelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=UpperCAmelCase , return_tensors="pt" , ) A_ = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0] A_ = slerp(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) # duplicate text embeddings for each generation per prompt A_ = text_embeddings.repeat_interleave(UpperCAmelCase , dim=0 ) # set timesteps A_ = "offset" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() ) A_ = {} if accepts_offset: A_ = 1 self.scheduler.set_timesteps(UpperCAmelCase , **UpperCAmelCase ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand self.scheduler.timesteps.to(self.device ) A_ , A_ = self.get_timesteps(UpperCAmelCase , UpperCAmelCase , self.device ) A_ = timesteps[:1].repeat(UpperCAmelCase ) # Preprocess image A_ = preprocess(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) A_ = self.prepare_latents( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , text_embeddings.dtype , self.device , UpperCAmelCase ) A_ = preprocess(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) A_ = self.prepare_latents( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , text_embeddings.dtype , self.device , UpperCAmelCase ) A_ = slerp(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) if clip_guidance_scale > 0: A_ = self.get_clip_image_embeddings(UpperCAmelCase , UpperCAmelCase ) A_ = self.get_clip_image_embeddings(UpperCAmelCase , UpperCAmelCase ) A_ = slerp( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. A_ = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: A_ = content_text_input.input_ids.shape[-1] A_ = self.tokenizer([""] , padding="max_length" , max_length=UpperCAmelCase , return_tensors="pt" ) A_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt A_ = uncond_embeddings.repeat_interleave(UpperCAmelCase , dim=0 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes A_ = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. A_ = (batch_size, self.unet.config.in_channels, height // 8, width // 8) A_ = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not work reproducibly on mps A_ = torch.randn(UpperCAmelCase , generator=UpperCAmelCase , device="cpu" , dtype=UpperCAmelCase ).to( self.device ) else: A_ = torch.randn(UpperCAmelCase , generator=UpperCAmelCase , device=self.device , dtype=UpperCAmelCase ) else: if latents.shape != latents_shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) A_ = latents.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler A_ = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] A_ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) A_ = {} if accepts_eta: A_ = eta # check if the scheduler accepts generator A_ = "generator" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) if accepts_generator: A_ = generator with self.progress_bar(total=UpperCAmelCase ): for i, t in enumerate(UpperCAmelCase ): # expand the latents if we are doing classifier free guidance A_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents A_ = self.scheduler.scale_model_input(UpperCAmelCase , UpperCAmelCase ) # predict the noise residual A_ = self.unet(UpperCAmelCase , UpperCAmelCase , encoder_hidden_states=UpperCAmelCase ).sample # perform classifier free guidance if do_classifier_free_guidance: A_ , A_ = noise_pred.chunk(2 ) A_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # perform clip guidance if clip_guidance_scale > 0: A_ = ( text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings ) A_ , A_ = self.cond_fn( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) # compute the previous noisy sample x_t -> x_t-1 A_ = self.scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor A_ = 1 / 0.18_215 * latents A_ = self.vae.decode(UpperCAmelCase ).sample A_ = (image / 2 + 0.5).clamp(0 , 1 ) A_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": A_ = self.numpy_to_pil(UpperCAmelCase ) if not return_dict: return (image, None) return StableDiffusionPipelineOutput(images=UpperCAmelCase , nsfw_content_detected=UpperCAmelCase )
312
from __future__ import annotations def __snake_case ( __UpperCamelCase : list[list[int]] ): """simple docstring""" for i in range(1 ,len(matrix[0] ) ): matrix[0][i] += matrix[0][i - 1] # preprocessing the first column for i in range(1 ,len(__UpperCamelCase ) ): matrix[i][0] += matrix[i - 1][0] # updating the path cost for current position for i in range(1 ,len(__UpperCamelCase ) ): for j in range(1 ,len(matrix[0] ) ): matrix[i][j] += min(matrix[i - 1][j] ,matrix[i][j - 1] ) return matrix[-1][-1] if __name__ == "__main__": import doctest doctest.testmod()
312
1
import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() __a :Union[str, Any] = logging.get_logger(__name__) __a :List[Any] = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'encoder.layer_norm_for_extract': 'layer_norm_for_extract', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'label_embs_concat': 'label_embeddings_concat', 'mask_emb': 'masked_spec_embed', 'spk_proj': 'speaker_proj', } __a :str = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', 'label_embeddings_concat', 'speaker_proj', 'layer_norm_for_extract', ] def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Tuple ,__UpperCamelCase : Dict ,__UpperCamelCase : Tuple ,__UpperCamelCase : Optional[int] ): """simple docstring""" for attribute in key.split("." ): A_ = getattr(__UpperCamelCase ,__UpperCamelCase ) if weight_type is not None: A_ = getattr(__UpperCamelCase ,__UpperCamelCase ).shape else: A_ = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": A_ = value elif weight_type == "weight_g": A_ = value elif weight_type == "weight_v": A_ = value elif weight_type == "bias": A_ = value else: A_ = value logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : List[str] ): """simple docstring""" A_ = [] A_ = fairseq_model.state_dict() A_ = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): A_ = False if "conv_layers" in name: load_conv_layer( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,hf_model.config.feat_extract_norm == "group" ,) A_ = True else: for key, mapped_key in MAPPING.items(): A_ = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key): # special case since naming is very similar continue A_ = True if "*" in mapped_key: A_ = name.split(__UpperCamelCase )[0].split("." )[-2] A_ = mapped_key.replace("*" ,__UpperCamelCase ) if "weight_g" in name: A_ = "weight_g" elif "weight_v" in name: A_ = "weight_v" elif "bias" in name: A_ = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj A_ = "weight" else: A_ = None set_recursively(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) continue if not is_used: unused_weights.append(__UpperCamelCase ) logger.warning(f'''Unused weights: {unused_weights}''' ) def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : List[str] ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : int ,__UpperCamelCase : Union[str, Any] ): """simple docstring""" A_ = full_name.split("conv_layers." )[-1] A_ = name.split("." ) A_ = int(items[0] ) A_ = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) A_ = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) A_ = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' ) A_ = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) A_ = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(__UpperCamelCase ) @torch.no_grad() def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Tuple ,__UpperCamelCase : str=None ,__UpperCamelCase : Optional[Any]=None ,__UpperCamelCase : int=True ): """simple docstring""" if config_path is not None: A_ = UniSpeechSatConfig.from_pretrained(__UpperCamelCase ) else: A_ = UniSpeechSatConfig() A_ = "" if is_finetuned: A_ = UniSpeechSatForCTC(__UpperCamelCase ) else: A_ = UniSpeechSatForPreTraining(__UpperCamelCase ) A_ , A_ , A_ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] ,arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) A_ = model[0].eval() recursively_load_weights(__UpperCamelCase ,__UpperCamelCase ) hf_wavavec.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": __a :int = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) __a :Tuple = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
312
from typing import Dict from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, get_torch_dist_unique_port, require_torch_multi_gpu, require_torch_neuroncore, ) from transformers.training_args import ParallelMode from transformers.utils import logging __a :int = logging.get_logger(__name__) if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset from transformers import Trainer class _a ( snake_case_ ): """simple docstring""" def __init__( self : Tuple , UpperCAmelCase : int = 101 ): A_ = length def __len__( self : int ): return self.length def __getitem__( self : Optional[int] , UpperCAmelCase : Optional[int] ): return i class _a : """simple docstring""" def __call__( self : Any , UpperCAmelCase : Optional[Any] ): return {"input_ids": torch.tensor(UpperCAmelCase ), "labels": torch.tensor(UpperCAmelCase )} class _a ( nn.Module ): """simple docstring""" def __init__( self : int ): super().__init__() # Add some (unused) params otherwise DDP will complain. A_ = nn.Linear(120 , 80 ) def __A ( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : Tuple=None ): if labels is not None: return torch.tensor(0.0 , device=input_ids.device ), input_ids else: return input_ids class _a ( snake_case_ ): """simple docstring""" @require_torch_neuroncore def __A ( self : List[str] ): A_ = f'''--nproc_per_node=2 --master_port={get_torch_dist_unique_port()} {self.test_file_dir}/test_trainer_distributed.py '''.split() A_ = self.get_auto_remove_tmp_dir() A_ = f'''--output_dir {output_dir}'''.split() A_ = ["torchrun"] + distributed_args + args execute_subprocess_async(UpperCAmelCase , env=self.get_env() ) # successful return here == success - any errors would have caused an error in the sub-call class _a ( snake_case_ ): """simple docstring""" @require_torch_multi_gpu def __A ( self : List[str] ): A_ = f'''--nproc_per_node={torch.cuda.device_count()} --master_port={get_torch_dist_unique_port()} {self.test_file_dir}/test_trainer_distributed.py '''.split() A_ = self.get_auto_remove_tmp_dir() A_ = f'''--output_dir {output_dir}'''.split() A_ = ["torchrun"] + distributed_args + args execute_subprocess_async(UpperCAmelCase , env=self.get_env() ) # successful return here == success - any errors would have caused an error in the sub-call if __name__ == "__main__": # The script below is meant to be run under torch.distributed, on a machine with multiple GPUs: # # PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py __a :Union[str, Any] = HfArgumentParser((TrainingArguments,)) __a :Tuple = parser.parse_args_into_dataclasses()[0] logger.warning( F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " F"distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}" ) # Essentially, what we want to verify in the distributed case is that we get all samples back, # in the right order. (this is crucial for prediction for instance) for dataset_length in [101, 40, 7]: __a :int = DummyDataset(dataset_length) def __snake_case ( __UpperCamelCase : EvalPrediction ): """simple docstring""" A_ = list(range(len(__UpperCamelCase ) ) ) A_ = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential if not success and training_args.local_rank == 0: logger.warning( "Predictions and/or labels do not match expected results:\n - predictions: " f'''{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}''' ) return {"success": success} __a :str = Trainer( model=DummyModel(), args=training_args, data_collator=DummyDataCollator(), eval_dataset=dataset, compute_metrics=compute_metrics, ) __a :str = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) __a :str = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) __a :Optional[int] = 2 __a :List[Any] = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) __a :str = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) __a :Union[str, Any] = None
312
1
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __a :Any = { 'configuration_mgp_str': ['MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MgpstrConfig'], 'processing_mgp_str': ['MgpstrProcessor'], 'tokenization_mgp_str': ['MgpstrTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :Optional[Any] = [ 'MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST', 'MgpstrModel', 'MgpstrPreTrainedModel', 'MgpstrForSceneTextRecognition', ] if TYPE_CHECKING: from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig from .processing_mgp_str import MgpstrProcessor from .tokenization_mgp_str import MgpstrTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mgp_str import ( MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST, MgpstrForSceneTextRecognition, MgpstrModel, MgpstrPreTrainedModel, ) else: import sys __a :List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
312
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from timm import create_model from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import BitConfig, BitForImageClassification, BitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() __a :Any = logging.get_logger(__name__) def __snake_case ( __UpperCamelCase : Optional[int] ): """simple docstring""" A_ = "huggingface/label-files" A_ = "imagenet-1k-id2label.json" A_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type="dataset" ) ,"r" ) ) A_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()} A_ = {v: k for k, v in idalabel.items()} A_ = "std_conv" if "bit" in model_name else False # note that when using BiT as backbone for ViT-hybrid checkpoints, # one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same", # config.conv_layer = "std_conv_same" A_ = BitConfig( conv_layer=__UpperCamelCase ,num_labels=1000 ,idalabel=__UpperCamelCase ,labelaid=__UpperCamelCase ,) return config def __snake_case ( __UpperCamelCase : Union[str, Any] ): """simple docstring""" if "stem.conv" in name: A_ = name.replace("stem.conv" ,"bit.embedder.convolution" ) if "blocks" in name: A_ = name.replace("blocks" ,"layers" ) if "head.fc" in name: A_ = name.replace("head.fc" ,"classifier.1" ) if name.startswith("norm" ): A_ = "bit." + name if "bit" not in name and "classifier" not in name: A_ = "bit.encoder." + name return name def __snake_case ( ): """simple docstring""" A_ = "http://images.cocodataset.org/val2017/000000039769.jpg" A_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw ) return im @torch.no_grad() def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Tuple=False ): """simple docstring""" A_ = get_config(__UpperCamelCase ) # load original model from timm A_ = create_model(__UpperCamelCase ,pretrained=__UpperCamelCase ) timm_model.eval() # load state_dict of original model A_ = timm_model.state_dict() for key in state_dict.copy().keys(): A_ = state_dict.pop(__UpperCamelCase ) A_ = val.squeeze() if "head" in key else val # load HuggingFace model A_ = BitForImageClassification(__UpperCamelCase ) model.eval() model.load_state_dict(__UpperCamelCase ) # create image processor A_ = create_transform(**resolve_data_config({} ,model=__UpperCamelCase ) ) A_ = transform.transforms A_ = { "bilinear": PILImageResampling.BILINEAR, "bicubic": PILImageResampling.BICUBIC, "nearest": PILImageResampling.NEAREST, } A_ = BitImageProcessor( do_resize=__UpperCamelCase ,size={"shortest_edge": timm_transforms[0].size} ,resample=pillow_resamplings[timm_transforms[0].interpolation.value] ,do_center_crop=__UpperCamelCase ,crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} ,do_normalize=__UpperCamelCase ,image_mean=timm_transforms[-1].mean.tolist() ,image_std=timm_transforms[-1].std.tolist() ,) A_ = prepare_img() A_ = transform(__UpperCamelCase ).unsqueeze(0 ) A_ = processor(__UpperCamelCase ,return_tensors="pt" ).pixel_values # verify pixel values assert torch.allclose(__UpperCamelCase ,__UpperCamelCase ) # verify logits with torch.no_grad(): A_ = model(__UpperCamelCase ) A_ = outputs.logits print("Logits:" ,logits[0, :3] ) print("Predicted class:" ,model.config.idalabel[logits.argmax(-1 ).item()] ) A_ = timm_model(__UpperCamelCase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(__UpperCamelCase ,outputs.logits ,atol=1E-3 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase ) print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' ) model.save_pretrained(__UpperCamelCase ) processor.save_pretrained(__UpperCamelCase ) if push_to_hub: print(f'''Pushing model {model_name} and processor to the hub''' ) model.push_to_hub(f'''ybelkada/{model_name}''' ) processor.push_to_hub(f'''ybelkada/{model_name}''' ) if __name__ == "__main__": __a :List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='resnetv2_50x1_bitm', type=str, help='Name of the BiT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether to push the model to the hub.', ) __a :str = parser.parse_args() convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
312
1
from binascii import hexlify from hashlib import shaaaa from os import urandom # RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for # Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526 __a :str = { # 1536-bit 5: { 'prime': int( 'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1' + '29024E088A67CC74020BBEA63B139B22514A08798E3404DD' + 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245' + 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED' + 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D' + 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F' + '83655D23DCA3AD961C62F356208552BB9ED529077096966D' + '670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF', base=16, ), 'generator': 2, }, # 2048-bit 14: { 'prime': int( 'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1' + '29024E088A67CC74020BBEA63B139B22514A08798E3404DD' + 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245' + 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED' + 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D' + 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F' + '83655D23DCA3AD961C62F356208552BB9ED529077096966D' + '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B' + 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9' + 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510' + '15728E5A8AACAA68FFFFFFFFFFFFFFFF', base=16, ), 'generator': 2, }, # 3072-bit 15: { 'prime': int( 'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1' + '29024E088A67CC74020BBEA63B139B22514A08798E3404DD' + 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245' + 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED' + 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D' + 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F' + '83655D23DCA3AD961C62F356208552BB9ED529077096966D' + '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B' + 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9' + 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510' + '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64' + 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7' + 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B' + 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C' + 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31' + '43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF', base=16, ), 'generator': 2, }, # 4096-bit 16: { 'prime': int( 'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1' + '29024E088A67CC74020BBEA63B139B22514A08798E3404DD' + 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245' + 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED' + 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D' + 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F' + '83655D23DCA3AD961C62F356208552BB9ED529077096966D' + '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B' + 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9' + 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510' + '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64' + 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7' + 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B' + 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C' + 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31' + '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7' + '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA' + '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6' + '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED' + '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9' + '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199' + 'FFFFFFFFFFFFFFFF', base=16, ), 'generator': 2, }, # 6144-bit 17: { 'prime': int( 'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08' + '8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B' + '302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9' + 'A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6' + '49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8' + 'FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D' + '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C' + '180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718' + '3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D' + '04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D' + 'B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226' + '1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C' + 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC' + 'E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26' + '99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB' + '04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2' + '233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127' + 'D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492' + '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406' + 'AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918' + 'DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151' + '2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03' + 'F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F' + 'BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA' + 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B' + 'B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632' + '387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E' + '6DCC4024FFFFFFFFFFFFFFFF', base=16, ), 'generator': 2, }, # 8192-bit 18: { 'prime': int( 'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1' + '29024E088A67CC74020BBEA63B139B22514A08798E3404DD' + 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245' + 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED' + 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D' + 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F' + '83655D23DCA3AD961C62F356208552BB9ED529077096966D' + '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B' + 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9' + 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510' + '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64' + 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7' + 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B' + 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C' + 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31' + '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7' + '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA' + '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6' + '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED' + '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9' + '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492' + '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD' + 'F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831' + '179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B' + 'DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF' + '5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6' + 'D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3' + '23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA' + 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328' + '06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C' + 'DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE' + '12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4' + '38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300' + '741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568' + '3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9' + '22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B' + '4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A' + '062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36' + '4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1' + 'B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92' + '4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47' + '9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71' + '60C980DD98EDD3DFFFFFFFFFFFFFFFFF', base=16, ), 'generator': 2, }, } class _a : """simple docstring""" def __init__( self : Tuple , UpperCAmelCase : int = 14 ): if group not in primes: raise ValueError("Unsupported Group" ) A_ = primes[group]["prime"] A_ = primes[group]["generator"] A_ = int(hexlify(urandom(32 ) ) , base=16 ) def __A ( self : List[str] ): return hex(self.__private_key )[2:] def __A ( self : str ): A_ = pow(self.generator , self.__private_key , self.prime ) return hex(UpperCAmelCase )[2:] def __A ( self : Optional[Any] , UpperCAmelCase : int ): # check if the other public key is valid based on NIST SP800-56 return ( 2 <= key <= self.prime - 2 and pow(UpperCAmelCase , (self.prime - 1) // 2 , self.prime ) == 1 ) def __A ( self : Tuple , UpperCAmelCase : str ): A_ = int(UpperCAmelCase , base=16 ) if not self.is_valid_public_key(UpperCAmelCase ): raise ValueError("Invalid public key" ) A_ = pow(UpperCAmelCase , self.__private_key , self.prime ) return shaaaa(str(UpperCAmelCase ).encode() ).hexdigest() @staticmethod def __A ( UpperCAmelCase : int , UpperCAmelCase : int ): # check if the other public key is valid based on NIST SP800-56 return ( 2 <= remote_public_key_str <= prime - 2 and pow(UpperCAmelCase , (prime - 1) // 2 , UpperCAmelCase ) == 1 ) @staticmethod def __A ( UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : int = 14 ): A_ = int(UpperCAmelCase , base=16 ) A_ = int(UpperCAmelCase , base=16 ) A_ = primes[group]["prime"] if not DiffieHellman.is_valid_public_key_static(UpperCAmelCase , UpperCAmelCase ): raise ValueError("Invalid public key" ) A_ = pow(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) return shaaaa(str(UpperCAmelCase ).encode() ).hexdigest() if __name__ == "__main__": import doctest doctest.testmod()
312
import os import re import sys import traceback import warnings from pathlib import Path from typing import Dict, Optional, Union from uuid import uuida from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami from huggingface_hub.file_download import REGEX_COMMIT_HASH from huggingface_hub.utils import ( EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, is_jinja_available, ) from packaging import version from requests import HTTPError from .. import __version__ from .constants import ( DEPRECATED_REVISION_ARGS, DIFFUSERS_CACHE, HUGGINGFACE_CO_RESOLVE_ENDPOINT, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, ) from .import_utils import ( ENV_VARS_TRUE_VALUES, _flax_version, _jax_version, _onnxruntime_version, _torch_version, is_flax_available, is_onnx_available, is_torch_available, ) from .logging import get_logger __a :Dict = get_logger(__name__) __a :Union[str, Any] = Path(__file__).parent / 'model_card_template.md' __a :Tuple = uuida().hex __a :List[Any] = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES __a :Union[str, Any] = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES __a :Tuple = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/' def __snake_case ( __UpperCamelCase : Union[Dict, str, None] = None ): """simple docstring""" A_ = f'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}''' if DISABLE_TELEMETRY or HF_HUB_OFFLINE: return ua + "; telemetry/off" if is_torch_available(): ua += f'''; torch/{_torch_version}''' if is_flax_available(): ua += f'''; jax/{_jax_version}''' ua += f'''; flax/{_flax_version}''' if is_onnx_available(): ua += f'''; onnxruntime/{_onnxruntime_version}''' # CI will set this value to True if os.environ.get("DIFFUSERS_IS_CI" ,"" ).upper() in ENV_VARS_TRUE_VALUES: ua += "; is_ci/true" if isinstance(__UpperCamelCase ,__UpperCamelCase ): ua += "; " + "; ".join(f'''{k}/{v}''' for k, v in user_agent.items() ) elif isinstance(__UpperCamelCase ,__UpperCamelCase ): ua += "; " + user_agent return ua def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Optional[str] = None ,__UpperCamelCase : Optional[str] = None ): """simple docstring""" if token is None: A_ = HfFolder.get_token() if organization is None: A_ = whoami(__UpperCamelCase )["name"] return f'''{username}/{model_id}''' else: return f'''{organization}/{model_id}''' def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Union[str, Any] ): """simple docstring""" if not is_jinja_available(): raise ValueError( "Modelcard rendering is based on Jinja templates." " Please make sure to have `jinja` installed before using `create_model_card`." " To install it, please run `pip install Jinja2`." ) if hasattr(__UpperCamelCase ,"local_rank" ) and args.local_rank not in [-1, 0]: return A_ = args.hub_token if hasattr(__UpperCamelCase ,"hub_token" ) else None A_ = get_full_repo_name(__UpperCamelCase ,token=__UpperCamelCase ) A_ = ModelCard.from_template( card_data=ModelCardData( # Card metadata object that will be converted to YAML block language="en" ,license="apache-2.0" ,library_name="diffusers" ,tags=[] ,datasets=args.dataset_name ,metrics=[] ,) ,template_path=__UpperCamelCase ,model_name=__UpperCamelCase ,repo_name=__UpperCamelCase ,dataset_name=args.dataset_name if hasattr(__UpperCamelCase ,"dataset_name" ) else None ,learning_rate=args.learning_rate ,train_batch_size=args.train_batch_size ,eval_batch_size=args.eval_batch_size ,gradient_accumulation_steps=( args.gradient_accumulation_steps if hasattr(__UpperCamelCase ,"gradient_accumulation_steps" ) else None ) ,adam_betaa=args.adam_betaa if hasattr(__UpperCamelCase ,"adam_beta1" ) else None ,adam_betaa=args.adam_betaa if hasattr(__UpperCamelCase ,"adam_beta2" ) else None ,adam_weight_decay=args.adam_weight_decay if hasattr(__UpperCamelCase ,"adam_weight_decay" ) else None ,adam_epsilon=args.adam_epsilon if hasattr(__UpperCamelCase ,"adam_epsilon" ) else None ,lr_scheduler=args.lr_scheduler if hasattr(__UpperCamelCase ,"lr_scheduler" ) else None ,lr_warmup_steps=args.lr_warmup_steps if hasattr(__UpperCamelCase ,"lr_warmup_steps" ) else None ,ema_inv_gamma=args.ema_inv_gamma if hasattr(__UpperCamelCase ,"ema_inv_gamma" ) else None ,ema_power=args.ema_power if hasattr(__UpperCamelCase ,"ema_power" ) else None ,ema_max_decay=args.ema_max_decay if hasattr(__UpperCamelCase ,"ema_max_decay" ) else None ,mixed_precision=args.mixed_precision ,) A_ = os.path.join(args.output_dir ,"README.md" ) model_card.save(__UpperCamelCase ) def __snake_case ( __UpperCamelCase : Optional[str] ,__UpperCamelCase : Optional[str] = None ): """simple docstring""" if resolved_file is None or commit_hash is not None: return commit_hash A_ = str(Path(__UpperCamelCase ).as_posix() ) A_ = re.search(R"snapshots/([^/]+)/" ,__UpperCamelCase ) if search is None: return None A_ = search.groups()[0] return commit_hash if REGEX_COMMIT_HASH.match(__UpperCamelCase ) else None # Old default cache path, potentially to be migrated. # This logic was more or less taken from `transformers`, with the following differences: # - Diffusers doesn't use custom environment variables to specify the cache path. # - There is no need to migrate the cache format, just move the files to the new location. __a :str = os.path.expanduser( os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface')) ) __a :List[Any] = os.path.join(hf_cache_home, 'diffusers') def __snake_case ( __UpperCamelCase : Optional[str] = None ,__UpperCamelCase : Optional[str] = None ): """simple docstring""" if new_cache_dir is None: A_ = DIFFUSERS_CACHE if old_cache_dir is None: A_ = old_diffusers_cache A_ = Path(__UpperCamelCase ).expanduser() A_ = Path(__UpperCamelCase ).expanduser() for old_blob_path in old_cache_dir.glob("**/blobs/*" ): if old_blob_path.is_file() and not old_blob_path.is_symlink(): A_ = new_cache_dir / old_blob_path.relative_to(__UpperCamelCase ) new_blob_path.parent.mkdir(parents=__UpperCamelCase ,exist_ok=__UpperCamelCase ) os.replace(__UpperCamelCase ,__UpperCamelCase ) try: os.symlink(__UpperCamelCase ,__UpperCamelCase ) except OSError: logger.warning( "Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." ) # At this point, old_cache_dir contains symlinks to the new cache (it can still be used). __a :Dict = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt') if not os.path.isfile(cache_version_file): __a :Optional[int] = 0 else: with open(cache_version_file) as f: try: __a :Dict = int(f.read()) except ValueError: __a :str = 0 if cache_version < 1: __a :Optional[Any] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0 if old_cache_is_not_empty: logger.warning( 'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your ' 'existing cached models. This is a one-time operation, you can interrupt it or run it ' 'later by calling `diffusers.utils.hub_utils.move_cache()`.' ) try: move_cache() except Exception as e: __a :Optional[Any] = '\n'.join(traceback.format_tb(e.__traceback__)) logger.error( F"There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease " 'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole ' 'message and we will do our best to help.' ) if cache_version < 1: try: os.makedirs(DIFFUSERS_CACHE, exist_ok=True) with open(cache_version_file, 'w') as f: f.write('1') except Exception: logger.warning( F"There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure " 'the directory exists and can be written to.' ) def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Optional[str] = None ): """simple docstring""" if variant is not None: A_ = weights_name.split("." ) A_ = splits[:-1] + [variant] + splits[-1:] A_ = ".".join(__UpperCamelCase ) return weights_name def __snake_case ( __UpperCamelCase : Optional[Any] ,*, __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Any ,__UpperCamelCase : Tuple ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : str ,__UpperCamelCase : int ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : int ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : Tuple ,__UpperCamelCase : Optional[int]=None ,): """simple docstring""" A_ = str(__UpperCamelCase ) if os.path.isfile(__UpperCamelCase ): return pretrained_model_name_or_path elif os.path.isdir(__UpperCamelCase ): if os.path.isfile(os.path.join(__UpperCamelCase ,__UpperCamelCase ) ): # Load from a PyTorch checkpoint A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase ) return model_file elif subfolder is not None and os.path.isfile( os.path.join(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) ): A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) return model_file else: raise EnvironmentError( f'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' ) else: # 1. First check if deprecated way of loading from branches is used if ( revision in DEPRECATED_REVISION_ARGS and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME) and version.parse(version.parse(__UpperCamelCase ).base_version ) >= version.parse("0.20.0" ) ): try: A_ = hf_hub_download( __UpperCamelCase ,filename=_add_variant(__UpperCamelCase ,__UpperCamelCase ) ,cache_dir=__UpperCamelCase ,force_download=__UpperCamelCase ,proxies=__UpperCamelCase ,resume_download=__UpperCamelCase ,local_files_only=__UpperCamelCase ,use_auth_token=__UpperCamelCase ,user_agent=__UpperCamelCase ,subfolder=__UpperCamelCase ,revision=revision or commit_hash ,) warnings.warn( f'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' ,__UpperCamelCase ,) return model_file except: # noqa: E722 warnings.warn( f'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__UpperCamelCase ,__UpperCamelCase )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(__UpperCamelCase ,__UpperCamelCase )}\' so that the correct variant file can be added.''' ,__UpperCamelCase ,) try: # 2. Load model file as usual A_ = hf_hub_download( __UpperCamelCase ,filename=__UpperCamelCase ,cache_dir=__UpperCamelCase ,force_download=__UpperCamelCase ,proxies=__UpperCamelCase ,resume_download=__UpperCamelCase ,local_files_only=__UpperCamelCase ,use_auth_token=__UpperCamelCase ,user_agent=__UpperCamelCase ,subfolder=__UpperCamelCase ,revision=revision or commit_hash ,) return model_file except RepositoryNotFoundError: raise EnvironmentError( f'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier ''' "listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a " "token having permission to this repo with `use_auth_token` or log in with `huggingface-cli " "login`." ) except RevisionNotFoundError: raise EnvironmentError( f'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for ''' "this model name. Check the model page at " f'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' ) except EntryNotFoundError: raise EnvironmentError( f'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' ) except HTTPError as err: raise EnvironmentError( f'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' ) except ValueError: raise EnvironmentError( f'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it''' f''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a''' f''' directory containing a file named {weights_name} or''' " \nCheckout your internet connection or see how to run the library in" " offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." ) except EnvironmentError: raise EnvironmentError( f'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from ''' "'https://huggingface.co/models', make sure you don't have a local directory with the same name. " f'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory ''' f'''containing a file named {weights_name}''' )
312
1
from __future__ import annotations def __snake_case ( __UpperCamelCase : list[list[int]] ): """simple docstring""" for i in range(1 ,len(matrix[0] ) ): matrix[0][i] += matrix[0][i - 1] # preprocessing the first column for i in range(1 ,len(__UpperCamelCase ) ): matrix[i][0] += matrix[i - 1][0] # updating the path cost for current position for i in range(1 ,len(__UpperCamelCase ) ): for j in range(1 ,len(matrix[0] ) ): matrix[i][j] += min(matrix[i - 1][j] ,matrix[i][j - 1] ) return matrix[-1][-1] if __name__ == "__main__": import doctest doctest.testmod()
312
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __a :Any = { 'configuration_mgp_str': ['MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MgpstrConfig'], 'processing_mgp_str': ['MgpstrProcessor'], 'tokenization_mgp_str': ['MgpstrTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :Optional[Any] = [ 'MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST', 'MgpstrModel', 'MgpstrPreTrainedModel', 'MgpstrForSceneTextRecognition', ] if TYPE_CHECKING: from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig from .processing_mgp_str import MgpstrProcessor from .tokenization_mgp_str import MgpstrTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mgp_str import ( MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST, MgpstrForSceneTextRecognition, MgpstrModel, MgpstrPreTrainedModel, ) else: import sys __a :List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
312
1
def __snake_case ( __UpperCamelCase : list ): """simple docstring""" A_ = len(__UpperCamelCase ) for _ in range(__UpperCamelCase ): for i in range(_ % 2 ,arr_size - 1 ,2 ): if arr[i + 1] < arr[i]: A_ , A_ = arr[i + 1], arr[i] return arr if __name__ == "__main__": __a :Optional[int] = list(range(10, 0, -1)) print(F"Original: {arr}. Sorted: {odd_even_transposition(arr)}")
312
import functools from typing import Any def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : list[str] ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or len(__UpperCamelCase ) == 0: raise ValueError("the string should be not empty string" ) if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not all( isinstance(__UpperCamelCase ,__UpperCamelCase ) and len(__UpperCamelCase ) > 0 for item in words ): raise ValueError("the words should be a list of non-empty strings" ) # Build trie A_ = {} A_ = "WORD_KEEPER" for word in words: A_ = trie for c in word: if c not in trie_node: A_ = {} A_ = trie_node[c] A_ = True A_ = len(__UpperCamelCase ) # Dynamic programming method @functools.cache def is_breakable(__UpperCamelCase : int ) -> bool: if index == len_string: return True A_ = trie for i in range(__UpperCamelCase ,__UpperCamelCase ): A_ = trie_node.get(string[i] ,__UpperCamelCase ) if trie_node is None: return False if trie_node.get(__UpperCamelCase ,__UpperCamelCase ) and is_breakable(i + 1 ): return True return False return is_breakable(0 ) if __name__ == "__main__": import doctest doctest.testmod()
312
1
from __future__ import annotations def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : int ): """simple docstring""" if partitions <= 0: raise ValueError("partitions must be a positive number!" ) if partitions > number_of_bytes: raise ValueError("partitions can not > number_of_bytes!" ) A_ = number_of_bytes // partitions A_ = [] for i in range(__UpperCamelCase ): A_ = i * bytes_per_partition + 1 A_ = ( number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition ) allocation_list.append(f'''{start_bytes}-{end_bytes}''' ) return allocation_list if __name__ == "__main__": import doctest doctest.testmod()
312
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_electra import ElectraTokenizer __a :List[str] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} __a :Union[str, Any] = { 'vocab_file': { 'google/electra-small-generator': ( 'https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt' ), 'google/electra-base-generator': 'https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt', 'google/electra-large-generator': ( 'https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt' ), 'google/electra-small-discriminator': ( 'https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt' ), 'google/electra-base-discriminator': ( 'https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt' ), 'google/electra-large-discriminator': ( 'https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'google/electra-small-generator': ( 'https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json' ), 'google/electra-base-generator': ( 'https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json' ), 'google/electra-large-generator': ( 'https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json' ), 'google/electra-small-discriminator': ( 'https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json' ), 'google/electra-base-discriminator': ( 'https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json' ), 'google/electra-large-discriminator': ( 'https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json' ), }, } __a :Optional[int] = { 'google/electra-small-generator': 512, 'google/electra-base-generator': 512, 'google/electra-large-generator': 512, 'google/electra-small-discriminator': 512, 'google/electra-base-discriminator': 512, 'google/electra-large-discriminator': 512, } __a :str = { 'google/electra-small-generator': {'do_lower_case': True}, 'google/electra-base-generator': {'do_lower_case': True}, 'google/electra-large-generator': {'do_lower_case': True}, 'google/electra-small-discriminator': {'do_lower_case': True}, 'google/electra-base-discriminator': {'do_lower_case': True}, 'google/electra-large-discriminator': {'do_lower_case': True}, } class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : Tuple = VOCAB_FILES_NAMES _lowerCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase : int = PRETRAINED_INIT_CONFIGURATION _lowerCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase : int = ElectraTokenizer def __init__( self : Tuple , UpperCAmelCase : Dict=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Any=True , UpperCAmelCase : Any="[UNK]" , UpperCAmelCase : Union[str, Any]="[SEP]" , UpperCAmelCase : List[Any]="[PAD]" , UpperCAmelCase : Union[str, Any]="[CLS]" , UpperCAmelCase : List[Any]="[MASK]" , UpperCAmelCase : List[str]=True , UpperCAmelCase : Any=None , **UpperCAmelCase : Union[str, Any] , ): super().__init__( UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , ) A_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , UpperCAmelCase ) != do_lower_case or normalizer_state.get("strip_accents" , UpperCAmelCase ) != strip_accents or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase ) != tokenize_chinese_chars ): A_ = getattr(UpperCAmelCase , normalizer_state.pop("type" ) ) A_ = do_lower_case A_ = strip_accents A_ = tokenize_chinese_chars A_ = normalizer_class(**UpperCAmelCase ) A_ = do_lower_case def __A ( self : int , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any]=None ): A_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __A ( self : Union[str, Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ): A_ = [self.sep_token_id] A_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __A ( self : Tuple , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ): A_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase ) return tuple(UpperCAmelCase )
312
1
import logging import os import threading import time try: import warnings except ImportError: __a :Optional[Any] = None try: import msvcrt except ImportError: __a :List[Any] = None try: import fcntl except ImportError: __a :List[Any] = None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: __a :List[str] = OSError # Data # ------------------------------------------------ __a :str = [ 'Timeout', 'BaseFileLock', 'WindowsFileLock', 'UnixFileLock', 'SoftFileLock', 'FileLock', ] __a :List[Any] = '3.0.12' __a :Optional[int] = None def __snake_case ( ): """simple docstring""" global _logger A_ = _logger or logging.getLogger(__name__ ) return _logger class _a ( snake_case_ ): """simple docstring""" def __init__( self : Any , UpperCAmelCase : List[str] ): A_ = lock_file return None def __str__( self : Dict ): A_ = f'''The file lock \'{self.lock_file}\' could not be acquired.''' return temp class _a : """simple docstring""" def __init__( self : Tuple , UpperCAmelCase : List[str] ): A_ = lock return None def __enter__( self : List[str] ): return self.lock def __exit__( self : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : str ): self.lock.release() return None class _a : """simple docstring""" def __init__( self : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int=-1 , UpperCAmelCase : int=None ): A_ = max_filename_length if max_filename_length is not None else 255 # Hash the filename if it's too long A_ = self.hash_filename_if_too_long(UpperCAmelCase , UpperCAmelCase ) # The path to the lock file. A_ = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. A_ = None # The default timeout value. A_ = timeout # We use this lock primarily for the lock counter. A_ = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. A_ = 0 return None @property def __A ( self : int ): return self._lock_file @property def __A ( self : Optional[int] ): return self._timeout @timeout.setter def __A ( self : List[Any] , UpperCAmelCase : Dict ): A_ = float(UpperCAmelCase ) return None def __A ( self : Union[str, Any] ): raise NotImplementedError() def __A ( self : Union[str, Any] ): raise NotImplementedError() @property def __A ( self : Optional[int] ): return self._lock_file_fd is not None def __A ( self : str , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : List[str]=0.05 ): # Use the default timeout, if no timeout is provided. if timeout is None: A_ = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 A_ = id(self ) A_ = self._lock_file A_ = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(f'''Attempting to acquire lock {lock_id} on {lock_filename}''' ) self._acquire() if self.is_locked: logger().debug(f'''Lock {lock_id} acquired on {lock_filename}''' ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(f'''Timeout on acquiring lock {lock_id} on {lock_filename}''' ) raise Timeout(self._lock_file ) else: logger().debug( f'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' ) time.sleep(UpperCAmelCase ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: A_ = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def __A ( self : List[str] , UpperCAmelCase : Optional[Any]=False ): with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: A_ = id(self ) A_ = self._lock_file logger().debug(f'''Attempting to release lock {lock_id} on {lock_filename}''' ) self._release() A_ = 0 logger().debug(f'''Lock {lock_id} released on {lock_filename}''' ) return None def __enter__( self : List[Any] ): self.acquire() return self def __exit__( self : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple ): self.release() return None def __del__( self : Optional[Any] ): self.release(force=UpperCAmelCase ) return None def __A ( self : Dict , UpperCAmelCase : str , UpperCAmelCase : int ): A_ = os.path.basename(UpperCAmelCase ) if len(UpperCAmelCase ) > max_length and max_length > 0: A_ = os.path.dirname(UpperCAmelCase ) A_ = str(hash(UpperCAmelCase ) ) A_ = filename[: max_length - len(UpperCAmelCase ) - 8] + "..." + hashed_filename + ".lock" return os.path.join(UpperCAmelCase , UpperCAmelCase ) else: return path class _a ( snake_case_ ): """simple docstring""" def __init__( self : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int]=-1 , UpperCAmelCase : Dict=None ): from .file_utils import relative_to_absolute_path super().__init__(UpperCAmelCase , timeout=UpperCAmelCase , max_filename_length=UpperCAmelCase ) A_ = "\\\\?\\" + relative_to_absolute_path(self.lock_file ) def __A ( self : Optional[int] ): A_ = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: A_ = os.open(self._lock_file , UpperCAmelCase ) except OSError: pass else: try: msvcrt.locking(UpperCAmelCase , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(UpperCAmelCase ) else: A_ = fd return None def __A ( self : List[Any] ): A_ = self._lock_file_fd A_ = None msvcrt.locking(UpperCAmelCase , msvcrt.LK_UNLCK , 1 ) os.close(UpperCAmelCase ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class _a ( snake_case_ ): """simple docstring""" def __init__( self : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : List[str]=-1 , UpperCAmelCase : str=None ): A_ = os.statvfs(os.path.dirname(UpperCAmelCase ) ).f_namemax super().__init__(UpperCAmelCase , timeout=UpperCAmelCase , max_filename_length=UpperCAmelCase ) def __A ( self : Optional[int] ): A_ = os.O_RDWR | os.O_CREAT | os.O_TRUNC A_ = os.open(self._lock_file , UpperCAmelCase ) try: fcntl.flock(UpperCAmelCase , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(UpperCAmelCase ) else: A_ = fd return None def __A ( self : str ): # Do not remove the lockfile: # # https://github.com/benediktschmitt/py-filelock/issues/31 # https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition A_ = self._lock_file_fd A_ = None fcntl.flock(UpperCAmelCase , fcntl.LOCK_UN ) os.close(UpperCAmelCase ) return None class _a ( snake_case_ ): """simple docstring""" def __A ( self : Optional[int] ): A_ = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: A_ = os.open(self._lock_file , UpperCAmelCase ) except OSError: pass else: A_ = fd return None def __A ( self : Optional[int] ): os.close(self._lock_file_fd ) A_ = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None __a :Tuple = None if msvcrt: __a :Tuple = WindowsFileLock elif fcntl: __a :Optional[Any] = UnixFileLock else: __a :str = SoftFileLock if warnings is not None: warnings.warn('only soft file lock is available')
312
# flake8: noqa # Lint as: python3 from typing import Dict, List, Optional, Type from .. import config from ..utils import logging from .formatting import ( ArrowFormatter, CustomFormatter, Formatter, PandasFormatter, PythonFormatter, TensorFormatter, format_table, query_table, ) from .np_formatter import NumpyFormatter __a :Optional[Any] = logging.get_logger(__name__) __a :Dict[Optional[str], Type[Formatter]] = {} __a :Dict[Optional[str], str] = {} __a :Dict[Optional[str], Exception] = {} def __snake_case ( __UpperCamelCase : type ,__UpperCamelCase : Optional[str] ,__UpperCamelCase : Optional[List[str]] = None ,): """simple docstring""" A_ = aliases if aliases is not None else [] if format_type in _FORMAT_TYPES: logger.warning( f'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' ) A_ = formatter_cls for alias in set(aliases + [format_type] ): if alias in _FORMAT_TYPES_ALIASES: logger.warning( f'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' ) A_ = format_type def __snake_case ( __UpperCamelCase : Exception ,__UpperCamelCase : Optional[str] ,__UpperCamelCase : Optional[List[str]] = None ): """simple docstring""" A_ = aliases if aliases is not None else [] for alias in set(aliases + [format_type] ): A_ = unavailable_error # Here we define all the available formatting functions that can be used by `Dataset.set_format` _register_formatter(PythonFormatter, None, aliases=['python']) _register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow']) _register_formatter(NumpyFormatter, 'numpy', aliases=['np']) _register_formatter(PandasFormatter, 'pandas', aliases=['pd']) _register_formatter(CustomFormatter, 'custom') if config.TORCH_AVAILABLE: from .torch_formatter import TorchFormatter _register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch']) else: __a :List[Any] = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.') _register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch']) if config.TF_AVAILABLE: from .tf_formatter import TFFormatter _register_formatter(TFFormatter, 'tensorflow', aliases=['tf']) else: __a :List[str] = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.') _register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf']) if config.JAX_AVAILABLE: from .jax_formatter import JaxFormatter _register_formatter(JaxFormatter, 'jax', aliases=[]) else: __a :Tuple = ValueError('JAX needs to be installed to be able to return JAX arrays.') _register_unavailable_formatter(_jax_error, 'jax', aliases=[]) def __snake_case ( __UpperCamelCase : Optional[str] ): """simple docstring""" if format_type in _FORMAT_TYPES_ALIASES: return _FORMAT_TYPES_ALIASES[format_type] else: return format_type def __snake_case ( __UpperCamelCase : Optional[str] ,**__UpperCamelCase : List[Any] ): """simple docstring""" A_ = get_format_type_from_alias(__UpperCamelCase ) if format_type in _FORMAT_TYPES: return _FORMAT_TYPES[format_type](**__UpperCamelCase ) if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE: raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type] else: raise ValueError( f'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
312
1
import unittest import numpy as np import torch from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class _a ( unittest.TestCase ): """simple docstring""" @property def __A ( self : Tuple ): torch.manual_seed(0 ) A_ = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , ) return model def __A ( self : str ): A_ = self.dummy_uncond_unet A_ = KarrasVeScheduler() A_ = KarrasVePipeline(unet=UpperCAmelCase , scheduler=UpperCAmelCase ) pipe.to(UpperCAmelCase ) pipe.set_progress_bar_config(disable=UpperCAmelCase ) A_ = torch.manual_seed(0 ) A_ = pipe(num_inference_steps=2 , generator=UpperCAmelCase , output_type="numpy" ).images A_ = torch.manual_seed(0 ) A_ = pipe(num_inference_steps=2 , generator=UpperCAmelCase , output_type="numpy" , return_dict=UpperCAmelCase )[0] A_ = image[0, -3:, -3:, -1] A_ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) A_ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class _a ( unittest.TestCase ): """simple docstring""" def __A ( self : Tuple ): A_ = "google/ncsnpp-celebahq-256" A_ = UNetaDModel.from_pretrained(UpperCAmelCase ) A_ = KarrasVeScheduler() A_ = KarrasVePipeline(unet=UpperCAmelCase , scheduler=UpperCAmelCase ) pipe.to(UpperCAmelCase ) pipe.set_progress_bar_config(disable=UpperCAmelCase ) A_ = torch.manual_seed(0 ) A_ = pipe(num_inference_steps=20 , generator=UpperCAmelCase , output_type="numpy" ).images A_ = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) A_ = np.array([0.578, 0.5_811, 0.5_924, 0.5_809, 0.587, 0.5_886, 0.5_861, 0.5_802, 0.586] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
312
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __a :int = { 'configuration_mask2former': [ 'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Mask2FormerConfig', ], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :Union[str, Any] = ['Mask2FormerImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :Optional[Any] = [ 'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'Mask2FormerForUniversalSegmentation', 'Mask2FormerModel', 'Mask2FormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_maskaformer import MaskaFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskaformer import ( MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskaFormerForUniversalSegmentation, MaskaFormerModel, MaskaFormerPreTrainedModel, ) else: import sys __a :Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
312
1
import gc import unittest from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class _a ( unittest.TestCase ): """simple docstring""" def __A ( self : Union[str, Any] ): # clean up the VRAM after each test super().tearDown() gc.collect() def __A ( self : List[Any] ): A_ , A_ = FlaxControlNetModel.from_pretrained( "lllyasviel/sd-controlnet-canny" , from_pt=UpperCAmelCase , dtype=jnp.bfloataa ) A_ , A_ = FlaxStableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5" , controlnet=UpperCAmelCase , from_pt=UpperCAmelCase , dtype=jnp.bfloataa ) A_ = controlnet_params A_ = "bird" A_ = jax.device_count() A_ = pipe.prepare_text_inputs([prompts] * num_samples ) A_ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ) A_ = pipe.prepare_image_inputs([canny_image] * num_samples ) A_ = jax.random.PRNGKey(0 ) A_ = jax.random.split(UpperCAmelCase , jax.device_count() ) A_ = replicate(UpperCAmelCase ) A_ = shard(UpperCAmelCase ) A_ = shard(UpperCAmelCase ) A_ = pipe( prompt_ids=UpperCAmelCase , image=UpperCAmelCase , params=UpperCAmelCase , prng_seed=UpperCAmelCase , num_inference_steps=50 , jit=UpperCAmelCase , ).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) A_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) A_ = images[0, 253:256, 253:256, -1] A_ = jnp.asarray(jax.device_get(image_slice.flatten() ) ) A_ = jnp.array( [0.167_969, 0.116_699, 0.081_543, 0.154_297, 0.132_812, 0.108_887, 0.169_922, 0.169_922, 0.205_078] ) print(f'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 def __A ( self : List[str] ): A_ , A_ = FlaxControlNetModel.from_pretrained( "lllyasviel/sd-controlnet-openpose" , from_pt=UpperCAmelCase , dtype=jnp.bfloataa ) A_ , A_ = FlaxStableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5" , controlnet=UpperCAmelCase , from_pt=UpperCAmelCase , dtype=jnp.bfloataa ) A_ = controlnet_params A_ = "Chef in the kitchen" A_ = jax.device_count() A_ = pipe.prepare_text_inputs([prompts] * num_samples ) A_ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" ) A_ = pipe.prepare_image_inputs([pose_image] * num_samples ) A_ = jax.random.PRNGKey(0 ) A_ = jax.random.split(UpperCAmelCase , jax.device_count() ) A_ = replicate(UpperCAmelCase ) A_ = shard(UpperCAmelCase ) A_ = shard(UpperCAmelCase ) A_ = pipe( prompt_ids=UpperCAmelCase , image=UpperCAmelCase , params=UpperCAmelCase , prng_seed=UpperCAmelCase , num_inference_steps=50 , jit=UpperCAmelCase , ).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) A_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) A_ = images[0, 253:256, 253:256, -1] A_ = jnp.asarray(jax.device_get(image_slice.flatten() ) ) A_ = jnp.array( [[0.271_484, 0.261_719, 0.275_391, 0.277_344, 0.279_297, 0.291_016, 0.294_922, 0.302_734, 0.302_734]] ) print(f'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
312
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, ClassLabel, Features from .base import TaskTemplate @dataclass(frozen=snake_case_ ) class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : str = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True} ) _lowerCamelCase : ClassVar[Features] = Features({'audio': Audio()} ) _lowerCamelCase : ClassVar[Features] = Features({'labels': ClassLabel} ) _lowerCamelCase : str = "audio" _lowerCamelCase : str = "labels" def __A ( self : str , UpperCAmelCase : List[Any] ): if self.label_column not in features: raise ValueError(f'''Column {self.label_column} is not present in features.''' ) if not isinstance(features[self.label_column] , UpperCAmelCase ): raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' ) A_ = copy.deepcopy(self ) A_ = self.label_schema.copy() A_ = features[self.label_column] A_ = label_schema return task_template @property def __A ( self : List[str] ): return { self.audio_column: "audio", self.label_column: "labels", }
312
1
import fire from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ,**__UpperCamelCase : Dict ): """simple docstring""" A_ = AutoConfig.from_pretrained(__UpperCamelCase ,**__UpperCamelCase ) A_ = AutoModelForSeqaSeqLM.from_config(__UpperCamelCase ) model.save_pretrained(__UpperCamelCase ) AutoTokenizer.from_pretrained(__UpperCamelCase ).save_pretrained(__UpperCamelCase ) return model if __name__ == "__main__": fire.Fire(save_randomly_initialized_version)
312
def __snake_case ( __UpperCamelCase : bytes ): """simple docstring""" return "".join([hex(__UpperCamelCase )[2:].zfill(2 ).upper() for byte in list(__UpperCamelCase )] ) def __snake_case ( __UpperCamelCase : str ): """simple docstring""" if (len(__UpperCamelCase ) % 2) != 0: raise ValueError( "Base16 encoded data is invalid:\nData does not have an even number of hex digits." ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(__UpperCamelCase ) <= set("0123456789ABCDEF" ): raise ValueError( "Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] ,16 ) for i in range(0 ,len(__UpperCamelCase ) ,2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
312
1
from ..utils import DummyObject, requires_backends class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : List[str] = ['keras_nlp'] def __init__( self : str , *UpperCAmelCase : int , **UpperCAmelCase : List[str] ): requires_backends(self , ["keras_nlp"] )
312
import cva import numpy as np class _a : """simple docstring""" def __init__( self : Any , UpperCAmelCase : float , UpperCAmelCase : int ): if k in (0.04, 0.06): A_ = k A_ = window_size else: raise ValueError("invalid k value" ) def __str__( self : Optional[Any] ): return str(self.k ) def __A ( self : int , UpperCAmelCase : str ): A_ = cva.imread(UpperCAmelCase , 0 ) A_ , A_ = img.shape A_ = [] A_ = img.copy() A_ = cva.cvtColor(UpperCAmelCase , cva.COLOR_GRAY2RGB ) A_ , A_ = np.gradient(UpperCAmelCase ) A_ = dx**2 A_ = dy**2 A_ = dx * dy A_ = 0.04 A_ = self.window_size // 2 for y in range(UpperCAmelCase , h - offset ): for x in range(UpperCAmelCase , w - offset ): A_ = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() A_ = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() A_ = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() A_ = (wxx * wyy) - (wxy**2) A_ = wxx + wyy A_ = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) , 0 ) color_img.itemset((y, x, 1) , 0 ) color_img.itemset((y, x, 2) , 255 ) return color_img, corner_list if __name__ == "__main__": __a :List[str] = HarrisCorner(0.04, 3) __a , __a :str = edge_detect.detect('path_to_image') cva.imwrite('detect.png', color_img)
312
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __a :Any = { 'configuration_groupvit': [ 'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GroupViTConfig', 'GroupViTOnnxConfig', 'GroupViTTextConfig', 'GroupViTVisionConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :Dict = [ 'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'GroupViTModel', 'GroupViTPreTrainedModel', 'GroupViTTextModel', 'GroupViTVisionModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :List[Any] = [ 'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFGroupViTModel', 'TFGroupViTPreTrainedModel', 'TFGroupViTTextModel', 'TFGroupViTVisionModel', ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys __a :Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
312
def __snake_case ( __UpperCamelCase : int = 1000 ): """simple docstring""" return sum(2 * a * ((a - 1) // 2) for a in range(3 ,n + 1 ) ) if __name__ == "__main__": print(solution())
312
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __a :Optional[int] = logging.get_logger(__name__) __a :Dict = { 'weiweishi/roc-bert-base-zh': 'https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json', } class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : Dict = 'roc_bert' def __init__( self : Any , UpperCAmelCase : Tuple=30522 , UpperCAmelCase : Union[str, Any]=768 , UpperCAmelCase : Dict=12 , UpperCAmelCase : Any=12 , UpperCAmelCase : Optional[int]=3072 , UpperCAmelCase : int="gelu" , UpperCAmelCase : Dict=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : Union[str, Any]=512 , UpperCAmelCase : Any=2 , UpperCAmelCase : Optional[int]=0.02 , UpperCAmelCase : str=1E-12 , UpperCAmelCase : List[str]=True , UpperCAmelCase : Dict=0 , UpperCAmelCase : Union[str, Any]="absolute" , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Any=True , UpperCAmelCase : List[str]=768 , UpperCAmelCase : Tuple=910 , UpperCAmelCase : List[str]=512 , UpperCAmelCase : int=24858 , UpperCAmelCase : Dict=True , **UpperCAmelCase : Union[str, Any] , ): A_ = vocab_size A_ = max_position_embeddings A_ = hidden_size A_ = num_hidden_layers A_ = num_attention_heads A_ = intermediate_size A_ = hidden_act A_ = hidden_dropout_prob A_ = attention_probs_dropout_prob A_ = initializer_range A_ = type_vocab_size A_ = layer_norm_eps A_ = use_cache A_ = enable_pronunciation A_ = enable_shape A_ = pronunciation_embed_dim A_ = pronunciation_vocab_size A_ = shape_embed_dim A_ = shape_vocab_size A_ = concat_input A_ = position_embedding_type A_ = classifier_dropout super().__init__(pad_token_id=UpperCAmelCase , **UpperCAmelCase )
312
import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : Union[str, Any] = ['image_processor', 'tokenizer'] _lowerCamelCase : Tuple = 'OwlViTImageProcessor' _lowerCamelCase : List[Any] = ('CLIPTokenizer', 'CLIPTokenizerFast') def __init__( self : Optional[Any] , UpperCAmelCase : int=None , UpperCAmelCase : Union[str, Any]=None , **UpperCAmelCase : Any ): A_ = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , UpperCAmelCase , ) A_ = kwargs.pop("feature_extractor" ) A_ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(UpperCAmelCase , UpperCAmelCase ) def __call__( self : Optional[int] , UpperCAmelCase : List[str]=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Dict="max_length" , UpperCAmelCase : Optional[Any]="np" , **UpperCAmelCase : Optional[int] ): if text is None and query_images is None and images is None: raise ValueError( "You have to specify at least one text or query image or image. All three cannot be none." ) if text is not None: if isinstance(UpperCAmelCase , UpperCAmelCase ) or (isinstance(UpperCAmelCase , UpperCAmelCase ) and not isinstance(text[0] , UpperCAmelCase )): A_ = [self.tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )] elif isinstance(UpperCAmelCase , UpperCAmelCase ) and isinstance(text[0] , UpperCAmelCase ): A_ = [] # Maximum number of queries across batch A_ = max([len(UpperCAmelCase ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(UpperCAmelCase ) != max_num_queries: A_ = t + [" "] * (max_num_queries - len(UpperCAmelCase )) A_ = self.tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase ) encodings.append(UpperCAmelCase ) else: raise TypeError("Input text should be a string, a list of strings or a nested list of strings" ) if return_tensors == "np": A_ = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) A_ = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp A_ = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) A_ = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch A_ = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 ) A_ = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf A_ = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 ) A_ = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 ) else: raise ValueError("Target return tensor type could not be returned" ) A_ = BatchEncoding() A_ = input_ids A_ = attention_mask if query_images is not None: A_ = BatchEncoding() A_ = self.image_processor( UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase ).pixel_values A_ = query_pixel_values if images is not None: A_ = self.image_processor(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase ) if text is not None and images is not None: A_ = image_features.pixel_values return encoding elif query_images is not None and images is not None: A_ = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**UpperCAmelCase ) , tensor_type=UpperCAmelCase ) def __A ( self : Optional[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : List[Any] ): return self.image_processor.post_process(*UpperCAmelCase , **UpperCAmelCase ) def __A ( self : str , *UpperCAmelCase : str , **UpperCAmelCase : Union[str, Any] ): return self.image_processor.post_process_object_detection(*UpperCAmelCase , **UpperCAmelCase ) def __A ( self : List[Any] , *UpperCAmelCase : int , **UpperCAmelCase : int ): return self.image_processor.post_process_image_guided_detection(*UpperCAmelCase , **UpperCAmelCase ) def __A ( self : List[Any] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Any ): return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase ) def __A ( self : Tuple , *UpperCAmelCase : Dict , **UpperCAmelCase : str ): return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase ) @property def __A ( self : Union[str, Any] ): warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCAmelCase , ) return self.image_processor_class @property def __A ( self : Optional[Any] ): warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCAmelCase , ) return self.image_processor
312
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available, is_vision_available, ) __a :str = {'configuration_beit': ['BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BeitConfig', 'BeitOnnxConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :List[Any] = ['BeitFeatureExtractor'] __a :Tuple = ['BeitImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :List[Any] = [ 'BEIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'BeitForImageClassification', 'BeitForMaskedImageModeling', 'BeitForSemanticSegmentation', 'BeitModel', 'BeitPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :Any = [ 'FlaxBeitForImageClassification', 'FlaxBeitForMaskedImageModeling', 'FlaxBeitModel', 'FlaxBeitPreTrainedModel', ] if TYPE_CHECKING: from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_beit import BeitFeatureExtractor from .image_processing_beit import BeitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_beit import ( BEIT_PRETRAINED_MODEL_ARCHIVE_LIST, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, BeitPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_beit import ( FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel, FlaxBeitPreTrainedModel, ) else: import sys __a :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
312
from typing import Optional, Union import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models.modeling_utils import ModelMixin class _a ( snake_case_ , snake_case_ ): """simple docstring""" @register_to_config def __init__( self : Dict , UpperCAmelCase : int = 768 , ): super().__init__() A_ = nn.Parameter(torch.zeros(1 , UpperCAmelCase ) ) A_ = nn.Parameter(torch.ones(1 , UpperCAmelCase ) ) def __A ( self : str , UpperCAmelCase : Optional[Union[str, torch.device]] = None , UpperCAmelCase : Optional[torch.dtype] = None , ): A_ = nn.Parameter(self.mean.to(UpperCAmelCase ).to(UpperCAmelCase ) ) A_ = nn.Parameter(self.std.to(UpperCAmelCase ).to(UpperCAmelCase ) ) return self def __A ( self : Dict , UpperCAmelCase : List[Any] ): A_ = (embeds - self.mean) * 1.0 / self.std return embeds def __A ( self : int , UpperCAmelCase : int ): A_ = (embeds * self.std) + self.mean return embeds
312
1
from typing import TYPE_CHECKING from ..utils import _LazyModule __a :Optional[int] = { 'config': [ 'EXTERNAL_DATA_FORMAT_SIZE_LIMIT', 'OnnxConfig', 'OnnxConfigWithPast', 'OnnxSeq2SeqConfigWithPast', 'PatchingSpec', ], 'convert': ['export', 'validate_model_outputs'], 'features': ['FeaturesManager'], 'utils': ['ParameterFormat', 'compute_serialized_parameters_size'], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys __a :Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
312
from __future__ import annotations import numpy as np from numpy import floataa from numpy.typing import NDArray def __snake_case ( __UpperCamelCase : NDArray[floataa] ,__UpperCamelCase : NDArray[floataa] ,__UpperCamelCase : list[int] ,__UpperCamelCase : int ,): """simple docstring""" A_ , A_ = coefficient_matrix.shape A_ , A_ = constant_matrix.shape if rowsa != colsa: A_ = f'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}''' raise ValueError(__UpperCamelCase ) if colsa != 1: A_ = f'''Constant matrix must be nx1 but received {rowsa}x{colsa}''' raise ValueError(__UpperCamelCase ) if rowsa != rowsa: A_ = ( "Coefficient and constant matrices dimensions must be nxn and nx1 but " f'''received {rowsa}x{colsa} and {rowsa}x{colsa}''' ) raise ValueError(__UpperCamelCase ) if len(__UpperCamelCase ) != rowsa: A_ = ( "Number of initial values must be equal to number of rows in coefficient " f'''matrix but received {len(__UpperCamelCase )} and {rowsa}''' ) raise ValueError(__UpperCamelCase ) if iterations <= 0: raise ValueError("Iterations must be at least 1" ) A_ = np.concatenate( (coefficient_matrix, constant_matrix) ,axis=1 ) A_ , A_ = table.shape strictly_diagonally_dominant(__UpperCamelCase ) # Iterates the whole matrix for given number of times for _ in range(__UpperCamelCase ): A_ = [] for row in range(__UpperCamelCase ): A_ = 0 for col in range(__UpperCamelCase ): if col == row: A_ = table[row][col] elif col == cols - 1: A_ = table[row][col] else: temp += (-1) * table[row][col] * init_val[col] A_ = (temp + val) / denom new_val.append(__UpperCamelCase ) A_ = new_val return [float(__UpperCamelCase ) for i in new_val] def __snake_case ( __UpperCamelCase : NDArray[floataa] ): """simple docstring""" A_ , A_ = table.shape A_ = True for i in range(0 ,__UpperCamelCase ): A_ = 0 for j in range(0 ,cols - 1 ): if i == j: continue else: total += table[i][j] if table[i][i] <= total: raise ValueError("Coefficient matrix is not strictly diagonally dominant" ) return is_diagonally_dominant # Test Cases if __name__ == "__main__": import doctest doctest.testmod()
312
1
import logging import torch from accelerate import Accelerator from arguments import EvaluationArguments from datasets import load_dataset from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed class _a ( snake_case_ ): """simple docstring""" def __init__( self : int , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any]=1024 , UpperCAmelCase : List[str]=1024 , UpperCAmelCase : List[Any]=3.6 ): A_ = tokenizer A_ = tokenizer.bos_token_id A_ = dataset A_ = seq_length A_ = seq_length * chars_per_token * num_of_sequences def __iter__( self : Dict ): A_ = iter(self.dataset ) A_ = True while more_examples: A_ , A_ = [], 0 while True: if buffer_len >= self.input_characters: break try: buffer.append(next(UpperCAmelCase )["content"] ) buffer_len += len(buffer[-1] ) except StopIteration: A_ = False break A_ = tokenizer(UpperCAmelCase , truncation=UpperCAmelCase )["input_ids"] A_ = [] for tokenized_input in tokenized_inputs: all_token_ids.extend(tokenized_input + [self.concat_token_id] ) for i in range(0 , len(UpperCAmelCase ) , self.seq_length ): A_ = all_token_ids[i : i + self.seq_length] if len(UpperCAmelCase ) == self.seq_length: yield torch.tensor(UpperCAmelCase ) def __snake_case ( __UpperCamelCase : Any ): """simple docstring""" A_ = {"streaming": True} A_ = load_dataset(args.dataset_name ,split="train" ,**__UpperCamelCase ) A_ = ConstantLengthDataset(__UpperCamelCase ,__UpperCamelCase ,seq_length=args.seq_length ) A_ = DataLoader(__UpperCamelCase ,batch_size=args.batch_size ) return eval_dataloader def __snake_case ( __UpperCamelCase : Dict ): """simple docstring""" model.eval() A_ = [] for step, batch in enumerate(__UpperCamelCase ): with torch.no_grad(): A_ = model(__UpperCamelCase ,labels=__UpperCamelCase ) A_ = outputs.loss.repeat(args.batch_size ) losses.append(accelerator.gather(__UpperCamelCase ) ) if args.max_eval_steps > 0 and step >= args.max_eval_steps: break A_ = torch.mean(torch.cat(__UpperCamelCase ) ) try: A_ = torch.exp(__UpperCamelCase ) except OverflowError: A_ = float("inf" ) return loss.item(), perplexity.item() # Setup Accelerator __a :int = Accelerator() # Parse configuration __a :str = HfArgumentParser(EvaluationArguments) __a :List[Any] = parser.parse_args() set_seed(args.seed) # Logging __a :Dict = logging.getLogger(__name__) logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO ) # Load model and tokenizer __a :Optional[int] = AutoModelForCausalLM.from_pretrained(args.model_ckpt) __a :Dict = AutoTokenizer.from_pretrained(args.model_ckpt) # Load dataset and dataloader __a :Optional[int] = create_dataloader(args) # Prepare everything with our `accelerator`. __a , __a :int = accelerator.prepare(model, eval_dataloader) # Evaluate and save the last checkpoint logger.info('Evaluating and saving model after training') __a , __a :Optional[int] = evaluate(args) logger.info(F"loss/eval: {eval_loss}, perplexity: {perplexity}")
312
from unittest import TestCase from datasets import Dataset from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters def __snake_case ( ): """simple docstring""" A_ = { "repo_name": ["test_repo1", "test_repo2", "test_repo3"], "path": ["test_1.py", "test_2.py", "unit_test.py"], "content": ["a " * 20, "a " * 30, "b " * 7], } A_ = Dataset.from_dict(__UpperCamelCase ) return dataset class _a ( snake_case_ ): """simple docstring""" def __A ( self : Union[str, Any] ): A_ = get_dataset() A_ = make_duplicate_clusters(UpperCAmelCase , 0.85 ) self.assertEqual(len(duplicate_clusters[0] ) , 2 ) def __A ( self : List[Any] ): A_ = get_dataset() A_ , A_ = deduplicate_dataset(UpperCAmelCase ) self.assertEqual(len(UpperCAmelCase ) , 2 ) print(UpperCAmelCase ) self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 ) self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , UpperCAmelCase )
312
1
import unittest from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available from transformers.pipelines import pipeline from transformers.pipelines.document_question_answering import apply_tesseract from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_detectrona, require_pytesseract, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image from transformers.image_utils import load_image else: class _a : """simple docstring""" @staticmethod def __A ( *UpperCAmelCase : Tuple , **UpperCAmelCase : Any ): pass def __snake_case ( __UpperCamelCase : Dict ): """simple docstring""" return None # This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace, # so we can expect it to be available. __a :str = ( 'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png' ) @is_pipeline_test @require_torch @require_vision class _a ( unittest.TestCase ): """simple docstring""" _lowerCamelCase : str = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING @require_pytesseract @require_vision def __A ( self : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : str ): A_ = pipeline( "document-question-answering" , model=UpperCAmelCase , tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) A_ = INVOICE_URL A_ = list(zip(*apply_tesseract(load_image(UpperCAmelCase ) , UpperCAmelCase , "" ) ) ) A_ = "What is the placebo?" A_ = [ { "image": load_image(UpperCAmelCase ), "question": question, }, { "image": image, "question": question, }, { "image": image, "question": question, "word_boxes": word_boxes, }, ] return dqa_pipeline, examples def __A ( self : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple ): A_ = dqa_pipeline(UpperCAmelCase , top_k=2 ) self.assertEqual( UpperCAmelCase , [ [ {"score": ANY(UpperCAmelCase ), "answer": ANY(UpperCAmelCase ), "start": ANY(UpperCAmelCase ), "end": ANY(UpperCAmelCase )}, {"score": ANY(UpperCAmelCase ), "answer": ANY(UpperCAmelCase ), "start": ANY(UpperCAmelCase ), "end": ANY(UpperCAmelCase )}, ] ] * 3 , ) @require_torch @require_detectrona @require_pytesseract def __A ( self : int ): A_ = pipeline("document-question-answering" , model="hf-internal-testing/tiny-random-layoutlmv2" ) A_ = INVOICE_URL A_ = "How many cats are there?" A_ = [ {"score": 0.0_001, "answer": "oy 2312/2019", "start": 38, "end": 39}, {"score": 0.0_001, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40}, ] A_ = dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 ) self.assertEqual(nested_simplify(UpperCAmelCase , decimals=4 ) , UpperCAmelCase ) A_ = dqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual(nested_simplify(UpperCAmelCase , decimals=4 ) , UpperCAmelCase ) # This image does not detect ANY text in it, meaning layoutlmv2 should fail. # Empty answer probably A_ = "./tests/fixtures/tests_samples/COCO/000000039769.png" A_ = dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 ) self.assertEqual(UpperCAmelCase , [] ) # We can optionnally pass directly the words and bounding boxes A_ = "./tests/fixtures/tests_samples/COCO/000000039769.png" A_ = [] A_ = [] A_ = dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , words=UpperCAmelCase , boxes=UpperCAmelCase , top_k=2 ) self.assertEqual(UpperCAmelCase , [] ) @slow @require_torch @require_detectrona @require_pytesseract def __A ( self : Optional[int] ): A_ = pipeline( "document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , ) A_ = INVOICE_URL A_ = "What is the invoice number?" A_ = dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase , decimals=4 ) , [ {"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16}, ] , ) A_ = dqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase , decimals=4 ) , [ {"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16}, ] , ) A_ = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase , decimals=4 ) , [ [ {"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16}, ], ] * 2 , ) @slow @require_torch @require_detectrona @require_pytesseract def __A ( self : str ): A_ = pipeline( "document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , max_seq_len=50 , ) A_ = INVOICE_URL A_ = "What is the invoice number?" A_ = dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase , decimals=4 ) , [ {"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23}, {"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16}, ] , ) A_ = dqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase , decimals=4 ) , [ {"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23}, {"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16}, ] , ) A_ = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase , decimals=4 ) , [ [ {"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23}, {"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16}, ] ] * 2 , ) @slow @require_torch @require_pytesseract @require_vision def __A ( self : List[str] ): A_ = AutoTokenizer.from_pretrained( "impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=UpperCAmelCase ) A_ = pipeline( "document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=UpperCAmelCase , revision="3dc6de3" , ) A_ = INVOICE_URL A_ = "What is the invoice number?" A_ = dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase , decimals=4 ) , [ {"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23}, ] , ) A_ = dqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase , decimals=4 ) , [ {"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23}, ] , ) A_ = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase , decimals=4 ) , [ [ {"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23}, ] ] * 2 , ) A_ = list(zip(*apply_tesseract(load_image(UpperCAmelCase ) , UpperCAmelCase , "" ) ) ) # This model should also work if `image` is set to None A_ = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase , decimals=4 ) , [ {"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23}, ] , ) @slow @require_torch @require_pytesseract @require_vision def __A ( self : Union[str, Any] ): A_ = AutoTokenizer.from_pretrained( "impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=UpperCAmelCase ) A_ = pipeline( "document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=UpperCAmelCase , revision="3dc6de3" , max_seq_len=50 , ) A_ = INVOICE_URL A_ = "What is the invoice number?" A_ = dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase , decimals=4 ) , [ {"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16}, ] , ) A_ = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase , decimals=4 ) , [ [ {"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16}, ] ] * 2 , ) A_ = list(zip(*apply_tesseract(load_image(UpperCAmelCase ) , UpperCAmelCase , "" ) ) ) # This model should also work if `image` is set to None A_ = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase , decimals=4 ) , [ {"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16}, ] , ) @slow @require_torch def __A ( self : int ): A_ = pipeline( "document-question-answering" , model="naver-clova-ix/donut-base-finetuned-docvqa" , tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ) , feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa" , ) A_ = INVOICE_URL A_ = "What is the invoice number?" A_ = dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 ) self.assertEqual(nested_simplify(UpperCAmelCase , decimals=4 ) , [{"answer": "us-001"}] ) @require_tf @unittest.skip("Document question answering not implemented in TF" ) def __A ( self : Tuple ): pass
312
import os from typing import Dict, List, Tuple, TypeVar, Union __a :Any = TypeVar('T') __a :Union[str, Any] = Union[List[T], Tuple[T, ...]] __a :List[str] = Union[T, List[T], Dict[str, T]] __a :Any = Union[str, bytes, os.PathLike]
312
1
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Dict ,__UpperCamelCase : List[Any] ): """simple docstring""" A_ = [False] * len(__UpperCamelCase ) A_ = [] queue.append(__UpperCamelCase ) A_ = True while queue: A_ = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(__UpperCamelCase ) A_ = True A_ = u return visited[t] def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : Tuple ,__UpperCamelCase : Dict ): """simple docstring""" A_ = [-1] * (len(__UpperCamelCase )) A_ = 0 while bfs(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ): A_ = float("Inf" ) A_ = sink while s != source: # Find the minimum value in select path A_ = min(__UpperCamelCase ,graph[parent[s]][s] ) A_ = parent[s] max_flow += path_flow A_ = sink while v != source: A_ = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow A_ = parent[v] return max_flow __a :Union[str, Any] = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] __a , __a :List[Any] = 0, 5 print(ford_fulkerson(graph, source, sink))
312
__a :Dict = '0.18.2' from .configuration_utils import ConfigMixin from .utils import ( OptionalDependencyNotAvailable, is_flax_available, is_inflect_available, is_invisible_watermark_available, is_k_diffusion_available, is_k_diffusion_version, is_librosa_available, is_note_seq_available, is_onnx_available, is_scipy_available, is_torch_available, is_torchsde_available, is_transformers_available, is_transformers_version, is_unidecode_available, logging, ) try: if not is_onnx_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_onnx_objects import * # noqa F403 else: from .pipelines import OnnxRuntimeModel try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_pt_objects import * # noqa F403 else: from .models import ( AutoencoderKL, ControlNetModel, ModelMixin, PriorTransformer, TaFilmDecoder, TransformeraDModel, UNetaDModel, UNetaDConditionModel, UNetaDModel, UNetaDConditionModel, VQModel, ) from .optimization import ( get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, get_scheduler, ) from .pipelines import ( AudioPipelineOutput, ConsistencyModelPipeline, DanceDiffusionPipeline, DDIMPipeline, DDPMPipeline, DiffusionPipeline, DiTPipeline, ImagePipelineOutput, KarrasVePipeline, LDMPipeline, LDMSuperResolutionPipeline, PNDMPipeline, RePaintPipeline, ScoreSdeVePipeline, ) from .schedulers import ( CMStochasticIterativeScheduler, DDIMInverseScheduler, DDIMParallelScheduler, DDIMScheduler, DDPMParallelScheduler, DDPMScheduler, DEISMultistepScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, HeunDiscreteScheduler, IPNDMScheduler, KarrasVeScheduler, KDPMaAncestralDiscreteScheduler, KDPMaDiscreteScheduler, PNDMScheduler, RePaintScheduler, SchedulerMixin, ScoreSdeVeScheduler, UnCLIPScheduler, UniPCMultistepScheduler, VQDiffusionScheduler, ) from .training_utils import EMAModel try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .schedulers import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .schedulers import DPMSolverSDEScheduler try: if not (is_torch_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipelines import ( AltDiffusionImgaImgPipeline, AltDiffusionPipeline, AudioLDMPipeline, CycleDiffusionPipeline, IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ImageTextPipelineOutput, KandinskyImgaImgPipeline, KandinskyInpaintPipeline, KandinskyPipeline, KandinskyPriorPipeline, KandinskyVaaControlnetImgaImgPipeline, KandinskyVaaControlnetPipeline, KandinskyVaaImgaImgPipeline, KandinskyVaaInpaintPipeline, KandinskyVaaPipeline, KandinskyVaaPriorEmbaEmbPipeline, KandinskyVaaPriorPipeline, LDMTextToImagePipeline, PaintByExamplePipeline, SemanticStableDiffusionPipeline, ShapEImgaImgPipeline, ShapEPipeline, StableDiffusionAttendAndExcitePipeline, StableDiffusionControlNetImgaImgPipeline, StableDiffusionControlNetInpaintPipeline, StableDiffusionControlNetPipeline, StableDiffusionDepthaImgPipeline, StableDiffusionDiffEditPipeline, StableDiffusionImageVariationPipeline, StableDiffusionImgaImgPipeline, StableDiffusionInpaintPipeline, StableDiffusionInpaintPipelineLegacy, StableDiffusionInstructPixaPixPipeline, StableDiffusionLatentUpscalePipeline, StableDiffusionLDMaDPipeline, StableDiffusionModelEditingPipeline, StableDiffusionPanoramaPipeline, StableDiffusionParadigmsPipeline, StableDiffusionPipeline, StableDiffusionPipelineSafe, StableDiffusionPixaPixZeroPipeline, StableDiffusionSAGPipeline, StableDiffusionUpscalePipeline, StableUnCLIPImgaImgPipeline, StableUnCLIPPipeline, TextToVideoSDPipeline, TextToVideoZeroPipeline, UnCLIPImageVariationPipeline, UnCLIPPipeline, UniDiffuserModel, UniDiffuserPipeline, UniDiffuserTextDecoder, VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, VideoToVideoSDPipeline, VQDiffusionPipeline, ) try: if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403 else: from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline try: if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403 else: from .pipelines import StableDiffusionKDiffusionPipeline try: if not (is_torch_available() and is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403 else: from .pipelines import ( OnnxStableDiffusionImgaImgPipeline, OnnxStableDiffusionInpaintPipeline, OnnxStableDiffusionInpaintPipelineLegacy, OnnxStableDiffusionPipeline, OnnxStableDiffusionUpscalePipeline, StableDiffusionOnnxPipeline, ) try: if not (is_torch_available() and is_librosa_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_librosa_objects import * # noqa F403 else: from .pipelines import AudioDiffusionPipeline, Mel try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .pipelines import SpectrogramDiffusionPipeline try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_objects import * # noqa F403 else: from .models.controlnet_flax import FlaxControlNetModel from .models.modeling_flax_utils import FlaxModelMixin from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel from .models.vae_flax import FlaxAutoencoderKL from .pipelines import FlaxDiffusionPipeline from .schedulers import ( FlaxDDIMScheduler, FlaxDDPMScheduler, FlaxDPMSolverMultistepScheduler, FlaxKarrasVeScheduler, FlaxLMSDiscreteScheduler, FlaxPNDMScheduler, FlaxSchedulerMixin, FlaxScoreSdeVeScheduler, ) try: if not (is_flax_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_and_transformers_objects import * # noqa F403 else: from .pipelines import ( FlaxStableDiffusionControlNetPipeline, FlaxStableDiffusionImgaImgPipeline, FlaxStableDiffusionInpaintPipeline, FlaxStableDiffusionPipeline, ) try: if not (is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_note_seq_objects import * # noqa F403 else: from .pipelines import MidiProcessor
312
1
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _a ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" _lowerCamelCase : Optional[Any] = StableDiffusionInpaintPipeline _lowerCamelCase : Tuple = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS _lowerCamelCase : Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS _lowerCamelCase : Dict = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess _lowerCamelCase : str = frozenset([] ) def __A ( self : Any ): torch.manual_seed(0 ) A_ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCAmelCase , ) A_ = PNDMScheduler(skip_prk_steps=UpperCAmelCase ) torch.manual_seed(0 ) A_ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) A_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , ) A_ = CLIPTextModel(UpperCAmelCase ) A_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) A_ = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def __A ( self : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict=0 ): # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched A_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase ) A_ = image.cpu().permute(0 , 2 , 3 , 1 )[0] A_ = Image.fromarray(np.uinta(UpperCAmelCase ) ).convert("RGB" ).resize((64, 64) ) A_ = Image.fromarray(np.uinta(image + 4 ) ).convert("RGB" ).resize((64, 64) ) if str(UpperCAmelCase ).startswith("mps" ): A_ = torch.manual_seed(UpperCAmelCase ) else: A_ = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase ) A_ = { "prompt": "A painting of a squirrel eating a burger", "image": init_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def __A ( self : List[Any] ): A_ = "cpu" # ensure determinism for the device-dependent torch.Generator A_ = self.get_dummy_components() A_ = StableDiffusionInpaintPipeline(**UpperCAmelCase ) A_ = sd_pipe.to(UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase ) A_ = self.get_dummy_inputs(UpperCAmelCase ) A_ = sd_pipe(**UpperCAmelCase ).images A_ = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) A_ = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __A ( self : List[str] ): super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class _a ( unittest.TestCase ): """simple docstring""" def __A ( self : List[str] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __A ( self : List[Any] ): A_ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png" ) A_ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" ) A_ = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint" "/yellow_cat_sitting_on_a_park_bench.npy" ) A_ = "stabilityai/stable-diffusion-2-inpainting" A_ = StableDiffusionInpaintPipeline.from_pretrained(UpperCAmelCase , safety_checker=UpperCAmelCase ) pipe.to(UpperCAmelCase ) pipe.set_progress_bar_config(disable=UpperCAmelCase ) pipe.enable_attention_slicing() A_ = "Face of a yellow cat, high resolution, sitting on a park bench" A_ = torch.manual_seed(0 ) A_ = pipe( prompt=UpperCAmelCase , image=UpperCAmelCase , mask_image=UpperCAmelCase , generator=UpperCAmelCase , output_type="np" , ) A_ = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 9E-3 def __A ( self : Any ): A_ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png" ) A_ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" ) A_ = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint" "/yellow_cat_sitting_on_a_park_bench_fp16.npy" ) A_ = "stabilityai/stable-diffusion-2-inpainting" A_ = StableDiffusionInpaintPipeline.from_pretrained( UpperCAmelCase , torch_dtype=torch.floataa , safety_checker=UpperCAmelCase , ) pipe.to(UpperCAmelCase ) pipe.set_progress_bar_config(disable=UpperCAmelCase ) pipe.enable_attention_slicing() A_ = "Face of a yellow cat, high resolution, sitting on a park bench" A_ = torch.manual_seed(0 ) A_ = pipe( prompt=UpperCAmelCase , image=UpperCAmelCase , mask_image=UpperCAmelCase , generator=UpperCAmelCase , output_type="np" , ) A_ = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 5E-1 def __A ( self : Optional[int] ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() A_ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png" ) A_ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" ) A_ = "stabilityai/stable-diffusion-2-inpainting" A_ = PNDMScheduler.from_pretrained(UpperCAmelCase , subfolder="scheduler" ) A_ = StableDiffusionInpaintPipeline.from_pretrained( UpperCAmelCase , safety_checker=UpperCAmelCase , scheduler=UpperCAmelCase , torch_dtype=torch.floataa , ) pipe.to(UpperCAmelCase ) pipe.set_progress_bar_config(disable=UpperCAmelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() A_ = "Face of a yellow cat, high resolution, sitting on a park bench" A_ = torch.manual_seed(0 ) A_ = pipe( prompt=UpperCAmelCase , image=UpperCAmelCase , mask_image=UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=2 , output_type="np" , ) A_ = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 10**9
312
def __snake_case ( __UpperCamelCase : int = 1000 ): """simple docstring""" return sum(e for e in range(3 ,__UpperCamelCase ) if e % 3 == 0 or e % 5 == 0 ) if __name__ == "__main__": print(F"{solution() = }")
312
1
import gc import random import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline from diffusers.utils import floats_tensor, nightly, torch_device from diffusers.utils.testing_utils import require_torch_gpu class _a ( unittest.TestCase ): """simple docstring""" def __A ( self : int ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def __A ( self : Any ): A_ = 1 A_ = 3 A_ = (32, 32) A_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCAmelCase ) return image @property def __A ( self : Union[str, Any] ): torch.manual_seed(0 ) A_ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) return model @property def __A ( self : Dict ): torch.manual_seed(0 ) A_ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) return model @property def __A ( self : str ): torch.manual_seed(0 ) A_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModel(UpperCAmelCase ) @property def __A ( self : Union[str, Any] ): def extract(*UpperCAmelCase : Tuple , **UpperCAmelCase : Tuple ): class _a : """simple docstring""" def __init__( self : str ): A_ = torch.ones([0] ) def __A ( self : List[Any] , UpperCAmelCase : List[str] ): self.pixel_values.to(UpperCAmelCase ) return self return Out() return extract def __A ( self : Dict ): A_ = "cpu" # ensure determinism for the device-dependent torch.Generator A_ = self.dummy_cond_unet A_ = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=UpperCAmelCase , set_alpha_to_one=UpperCAmelCase , ) A_ = self.dummy_vae A_ = self.dummy_text_encoder A_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) # make sure here that pndm scheduler skips prk A_ = StableDiffusionPipeline( unet=UpperCAmelCase , scheduler=UpperCAmelCase , vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , safety_checker=UpperCAmelCase , feature_extractor=self.dummy_extractor , ) A_ = sd_pipe.to(UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase ) A_ = "A painting of a squirrel eating a burger" A_ = torch.Generator(device=UpperCAmelCase ).manual_seed(0 ) A_ = sd_pipe([prompt] , generator=UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" ) A_ = output.images A_ = torch.Generator(device=UpperCAmelCase ).manual_seed(0 ) A_ = sd_pipe( [prompt] , generator=UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=UpperCAmelCase , )[0] A_ = image[0, -3:, -3:, -1] A_ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) A_ = np.array([0.5_756, 0.6_118, 0.5_005, 0.5_041, 0.5_471, 0.4_726, 0.4_976, 0.4_865, 0.4_864] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def __A ( self : List[Any] ): A_ = "cpu" # ensure determinism for the device-dependent torch.Generator A_ = self.dummy_cond_unet A_ = PNDMScheduler(skip_prk_steps=UpperCAmelCase ) A_ = self.dummy_vae A_ = self.dummy_text_encoder A_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) # make sure here that pndm scheduler skips prk A_ = StableDiffusionPipeline( unet=UpperCAmelCase , scheduler=UpperCAmelCase , vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , safety_checker=UpperCAmelCase , feature_extractor=self.dummy_extractor , ) A_ = sd_pipe.to(UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase ) A_ = "A painting of a squirrel eating a burger" A_ = torch.Generator(device=UpperCAmelCase ).manual_seed(0 ) A_ = sd_pipe([prompt] , generator=UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" ) A_ = output.images A_ = torch.Generator(device=UpperCAmelCase ).manual_seed(0 ) A_ = sd_pipe( [prompt] , generator=UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=UpperCAmelCase , )[0] A_ = image[0, -3:, -3:, -1] A_ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) A_ = np.array([0.5_125, 0.5_716, 0.4_828, 0.5_060, 0.5_650, 0.4_768, 0.5_185, 0.4_895, 0.4_993] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def __A ( self : Any ): A_ = StableDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-lms-pipe" , safety_checker=UpperCAmelCase ) assert isinstance(UpperCAmelCase , UpperCAmelCase ) assert isinstance(pipe.scheduler , UpperCAmelCase ) assert pipe.safety_checker is None A_ = pipe("example prompt" , num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(UpperCAmelCase ) A_ = StableDiffusionPipeline.from_pretrained(UpperCAmelCase ) # sanity check that the pipeline still works assert pipe.safety_checker is None A_ = pipe("example prompt" , num_inference_steps=2 ).images[0] assert image is not None @unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" ) def __A ( self : Dict ): A_ = self.dummy_cond_unet A_ = PNDMScheduler(skip_prk_steps=UpperCAmelCase ) A_ = self.dummy_vae A_ = self.dummy_text_encoder A_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) # put models in fp16 A_ = unet.half() A_ = vae.half() A_ = bert.half() # make sure here that pndm scheduler skips prk A_ = StableDiffusionPipeline( unet=UpperCAmelCase , scheduler=UpperCAmelCase , vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , safety_checker=UpperCAmelCase , feature_extractor=self.dummy_extractor , ) A_ = sd_pipe.to(UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase ) A_ = "A painting of a squirrel eating a burger" A_ = sd_pipe([prompt] , num_inference_steps=2 , output_type="np" ).images assert image.shape == (1, 64, 64, 3) @nightly @require_torch_gpu class _a ( unittest.TestCase ): """simple docstring""" def __A ( self : Optional[Any] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __A ( self : Union[str, Any] ): A_ = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=UpperCAmelCase ) A_ = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) A_ = sd_pipe.to(UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase ) A_ = ( "portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle" " coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with" " anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and" " children from bahnhof zoo, detailed " ) A_ = 4003660346 A_ = 7 # without safety guidance (sld_guidance_scale = 0) A_ = torch.manual_seed(UpperCAmelCase ) A_ = sd_pipe( [prompt] , generator=UpperCAmelCase , guidance_scale=UpperCAmelCase , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , ) A_ = output.images A_ = image[0, -3:, -3:, -1] A_ = [0.2_278, 0.2_231, 0.2_249, 0.2_333, 0.2_303, 0.1_885, 0.2_273, 0.2_144, 0.2_176] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 # without safety guidance (strong configuration) A_ = torch.manual_seed(UpperCAmelCase ) A_ = sd_pipe( [prompt] , generator=UpperCAmelCase , guidance_scale=UpperCAmelCase , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) A_ = output.images A_ = image[0, -3:, -3:, -1] A_ = [0.2_383, 0.2_276, 0.236, 0.2_192, 0.2_186, 0.2_053, 0.1_971, 0.1_901, 0.1_719] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __A ( self : Tuple ): A_ = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=UpperCAmelCase ) A_ = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) A_ = sd_pipe.to(UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase ) A_ = "padme amidala taking a bath artwork, safe for work, no nudity" A_ = 2734971755 A_ = 7 A_ = torch.manual_seed(UpperCAmelCase ) A_ = sd_pipe( [prompt] , generator=UpperCAmelCase , guidance_scale=UpperCAmelCase , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , ) A_ = output.images A_ = image[0, -3:, -3:, -1] A_ = [0.3_502, 0.3_622, 0.3_396, 0.3_642, 0.3_478, 0.3_318, 0.35, 0.3_348, 0.3_297] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 A_ = torch.manual_seed(UpperCAmelCase ) A_ = sd_pipe( [prompt] , generator=UpperCAmelCase , guidance_scale=UpperCAmelCase , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) A_ = output.images A_ = image[0, -3:, -3:, -1] A_ = [0.5_531, 0.5_206, 0.4_895, 0.5_156, 0.5_182, 0.4_751, 0.4_802, 0.4_803, 0.4_443] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __A ( self : List[str] ): A_ = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" ) A_ = sd_pipe.to(UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase ) A_ = ( "the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c." " leyendecker" ) A_ = 1044355234 A_ = 12 A_ = torch.manual_seed(UpperCAmelCase ) A_ = sd_pipe( [prompt] , generator=UpperCAmelCase , guidance_scale=UpperCAmelCase , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , ) A_ = output.images A_ = image[0, -3:, -3:, -1] A_ = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] ) assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7 A_ = torch.manual_seed(UpperCAmelCase ) A_ = sd_pipe( [prompt] , generator=UpperCAmelCase , guidance_scale=UpperCAmelCase , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) A_ = output.images A_ = image[0, -3:, -3:, -1] A_ = np.array([0.5_818, 0.6_285, 0.6_835, 0.6_019, 0.625, 0.6_754, 0.6_096, 0.6_334, 0.6_561] ) assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
312
import unittest from typing import Tuple import torch from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device from diffusers.utils.testing_utils import require_torch @require_torch class _a : """simple docstring""" @property def __A ( self : Union[str, Any] ): return self.get_dummy_input() @property def __A ( self : int ): if self.block_type == "down": return (4, 32, 16, 16) elif self.block_type == "mid": return (4, 32, 32, 32) elif self.block_type == "up": return (4, 32, 64, 64) raise ValueError(f'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' ) def __A ( self : Union[str, Any] , UpperCAmelCase : List[Any]=True , UpperCAmelCase : str=False , UpperCAmelCase : Tuple=False , UpperCAmelCase : Optional[Any]=False , ): A_ = 4 A_ = 32 A_ = (32, 32) A_ = torch.manual_seed(0 ) A_ = torch.device(UpperCAmelCase ) A_ = (batch_size, num_channels) + sizes A_ = randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=UpperCAmelCase ) A_ = {"hidden_states": hidden_states} if include_temb: A_ = 128 A_ = randn_tensor((batch_size, temb_channels) , generator=UpperCAmelCase , device=UpperCAmelCase ) if include_res_hidden_states_tuple: A_ = torch.manual_seed(1 ) A_ = (randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=UpperCAmelCase ),) if include_encoder_hidden_states: A_ = floats_tensor((batch_size, 32, 32) ).to(UpperCAmelCase ) if include_skip_sample: A_ = randn_tensor(((batch_size, 3) + sizes) , generator=UpperCAmelCase , device=UpperCAmelCase ) return dummy_input def __A ( self : Optional[int] ): A_ = { "in_channels": 32, "out_channels": 32, "temb_channels": 128, } if self.block_type == "up": A_ = 32 if self.block_type == "mid": init_dict.pop("out_channels" ) A_ = self.dummy_input return init_dict, inputs_dict def __A ( self : List[str] , UpperCAmelCase : Optional[Any] ): A_ , A_ = self.prepare_init_args_and_inputs_for_common() A_ = self.block_class(**UpperCAmelCase ) unet_block.to(UpperCAmelCase ) unet_block.eval() with torch.no_grad(): A_ = unet_block(**UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ): A_ = output[0] self.assertEqual(output.shape , self.output_shape ) A_ = output[0, -1, -3:, -3:] A_ = torch.tensor(UpperCAmelCase ).to(UpperCAmelCase ) assert torch_all_close(output_slice.flatten() , UpperCAmelCase , atol=5E-3 ) @unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" ) def __A ( self : Union[str, Any] ): A_ , A_ = self.prepare_init_args_and_inputs_for_common() A_ = self.block_class(**UpperCAmelCase ) model.to(UpperCAmelCase ) model.train() A_ = model(**UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ): A_ = output[0] A_ = torch.device(UpperCAmelCase ) A_ = randn_tensor(output.shape , device=UpperCAmelCase ) A_ = torch.nn.functional.mse_loss(UpperCAmelCase , UpperCAmelCase ) loss.backward()
312
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..ta.tokenization_ta import TaTokenizer else: from ...utils.dummy_sentencepiece_objects import TaTokenizer UpperCAmelCase__ = TaTokenizer if is_tokenizers_available(): from ..ta.tokenization_ta_fast import TaTokenizerFast else: from ...utils.dummy_tokenizers_objects import TaTokenizerFast UpperCAmelCase__ = TaTokenizerFast UpperCAmelCase__ = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ "MT5EncoderModel", "MT5ForConditionalGeneration", "MT5ForQuestionAnswering", "MT5Model", "MT5PreTrainedModel", "MT5Stack", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"] if TYPE_CHECKING: from .configuration_mta import MTaConfig, MTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mta import ( MTaEncoderModel, MTaForConditionalGeneration, MTaForQuestionAnswering, MTaModel, MTaPreTrainedModel, MTaStack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel else: import sys UpperCAmelCase__ = _LazyModule( __name__, globals()["__file__"], _import_structure, extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast}, module_spec=__spec__, )
0
import copy import fnmatch import json import os import pickle as pkl import shutil import sys import tarfile import tempfile from collections import OrderedDict from contextlib import contextmanager from functools import partial from hashlib import shaaaa from io import BytesIO from pathlib import Path from urllib.parse import urlparse from zipfile import ZipFile, is_zipfile import cva import numpy as np import requests import wget from filelock import FileLock from PIL import Image from tqdm.auto import tqdm from yaml import Loader, dump, load try: import torch __a :int = True except ImportError: __a :Optional[Any] = False try: from torch.hub import _get_torch_home __a :Optional[Any] = _get_torch_home() except ImportError: __a :Tuple = os.path.expanduser( os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch')) ) __a :Optional[Any] = os.path.join(torch_cache_home, 'transformers') __a :int = 'https://cdn.huggingface.co' __a :Any = 'https://s3.amazonaws.com/models.huggingface.co/bert' __a :Optional[Any] = '/'.join(str(Path(__file__).resolve()).split('/')[:-1]) __a :str = os.path.join(PATH, 'config.yaml') __a :str = os.path.join(PATH, 'attributes.txt') __a :Optional[Any] = os.path.join(PATH, 'objects.txt') __a :Optional[int] = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path) __a :Dict = os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE) __a :List[Any] = os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE) __a :List[str] = 'pytorch_model.bin' __a :Tuple = 'config.yaml' def __snake_case ( __UpperCamelCase : Optional[Any]=OBJECTS ,__UpperCamelCase : List[str]=ATTRIBUTES ): """simple docstring""" A_ = [] with open(__UpperCamelCase ) as f: for object in f.readlines(): vg_classes.append(object.split("," )[0].lower().strip() ) A_ = [] with open(__UpperCamelCase ) as f: for object in f.readlines(): vg_attrs.append(object.split("," )[0].lower().strip() ) return vg_classes, vg_attrs def __snake_case ( __UpperCamelCase : List[Any] ): """simple docstring""" A_ = OrderedDict() with open(__UpperCamelCase ,"rb" ) as f: A_ = pkl.load(__UpperCamelCase )["model"] for k in copy.deepcopy(list(ckp.keys() ) ): A_ = ckp.pop(__UpperCamelCase ) if isinstance(__UpperCamelCase ,np.ndarray ): A_ = torch.tensor(__UpperCamelCase ) else: assert isinstance(__UpperCamelCase ,torch.tensor ), type(__UpperCamelCase ) A_ = v return r class _a : """simple docstring""" _lowerCamelCase : Union[str, Any] = {} def __init__( self : str , UpperCAmelCase : dict , UpperCAmelCase : str = "root" , UpperCAmelCase : List[str]=0 ): A_ = name A_ = level A_ = {} for k, v in dictionary.items(): if v is None: raise ValueError() A_ = copy.deepcopy(UpperCAmelCase ) A_ = copy.deepcopy(UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ): A_ = Config(UpperCAmelCase , name=UpperCAmelCase , level=level + 1 ) A_ = v setattr(self , UpperCAmelCase , UpperCAmelCase ) A_ = d def __repr__( self : Optional[Any] ): return str(list((self._pointer.keys()) ) ) def __setattr__( self : Any , UpperCAmelCase : Any , UpperCAmelCase : Any ): A_ = val A_ = val A_ = key.split("." ) A_ = len(UpperCAmelCase ) - 1 A_ = self._pointer if len(UpperCAmelCase ) > 1: for i, l in enumerate(UpperCAmelCase ): if hasattr(self , UpperCAmelCase ) and isinstance(getattr(self , UpperCAmelCase ) , UpperCAmelCase ): setattr(getattr(self , UpperCAmelCase ) , ".".join(levels[i:] ) , UpperCAmelCase ) if l == last_level: A_ = val else: A_ = pointer[l] def __A ( self : List[str] ): return self._pointer def __A ( self : int , UpperCAmelCase : Tuple , UpperCAmelCase : int ): with open(f'''{file_name}''' , "w" ) as stream: dump(UpperCAmelCase , UpperCAmelCase ) def __A ( self : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Tuple ): with open(f'''{file_name}''' , "w" ) as stream: json.dump(UpperCAmelCase , UpperCAmelCase ) @staticmethod def __A ( UpperCAmelCase : Optional[int] ): with open(UpperCAmelCase ) as stream: A_ = load(UpperCAmelCase , Loader=UpperCAmelCase ) return data def __str__( self : str ): A_ = " " if self._name != "root": A_ = f'''{t * (self._level-1)}{self._name}:\n''' else: A_ = "" A_ = self._level for i, (k, v) in enumerate(self._pointer.items() ): if isinstance(UpperCAmelCase , UpperCAmelCase ): r += f'''{t * (self._level)}{v}\n''' self._level += 1 else: r += f'''{t * (self._level)}{k}: {v} ({type(UpperCAmelCase ).__name__})\n''' A_ = level return r[:-1] @classmethod def __A ( cls : Optional[Any] , UpperCAmelCase : str , **UpperCAmelCase : str ): A_ , A_ = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase ) return cls(UpperCAmelCase ) @classmethod def __A ( cls : int , UpperCAmelCase : str , **UpperCAmelCase : int ): A_ = kwargs.pop("cache_dir" , UpperCAmelCase ) A_ = kwargs.pop("force_download" , UpperCAmelCase ) A_ = kwargs.pop("resume_download" , UpperCAmelCase ) A_ = kwargs.pop("proxies" , UpperCAmelCase ) A_ = kwargs.pop("local_files_only" , UpperCAmelCase ) if os.path.isdir(UpperCAmelCase ): A_ = os.path.join(UpperCAmelCase , UpperCAmelCase ) elif os.path.isfile(UpperCAmelCase ) or is_remote_url(UpperCAmelCase ): A_ = pretrained_model_name_or_path else: A_ = hf_bucket_url(UpperCAmelCase , filename=UpperCAmelCase , use_cdn=UpperCAmelCase ) try: # Load from URL or cache if already cached A_ = cached_path( UpperCAmelCase , cache_dir=UpperCAmelCase , force_download=UpperCAmelCase , proxies=UpperCAmelCase , resume_download=UpperCAmelCase , local_files_only=UpperCAmelCase , ) # Load config dict if resolved_config_file is None: raise EnvironmentError A_ = Config.load_yaml(UpperCAmelCase ) except EnvironmentError: A_ = "Can't load config for" raise EnvironmentError(UpperCAmelCase ) if resolved_config_file == config_file: print("loading configuration file from path" ) else: print("loading configuration file cache" ) return Config.load_yaml(UpperCAmelCase ), kwargs def __snake_case ( __UpperCamelCase : Union[str, Any] ): """simple docstring""" A_ = torch.load("dump.pt" ,map_location=in_tensor.device ) A_ = in_tensor.numpy() A_ = out_tensor.numpy()[0] print(na.shape ,na[0, 0, :5] ) print(na.shape ,na[0, 0, :5] ) assert np.allclose(__UpperCamelCase ,__UpperCamelCase ,rtol=0.01 ,atol=0.1 ), ( f'''{sum([1 for x in np.isclose(__UpperCamelCase ,__UpperCamelCase ,rtol=0.01 ,atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %''' " element-wise mismatch" ) raise Exception("tensors are all good" ) # Hugging face functions below def __snake_case ( __UpperCamelCase : Optional[int] ): """simple docstring""" A_ = urlparse(__UpperCamelCase ) return parsed.scheme in ("http", "https") def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ,__UpperCamelCase : str=True ): """simple docstring""" A_ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX A_ = "/" not in model_id if legacy_format: return f'''{endpoint}/{model_id}-{filename}''' else: return f'''{endpoint}/{model_id}/{filename}''' def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str]=None ,__UpperCamelCase : int=0 ,__UpperCamelCase : int=None ,): """simple docstring""" A_ = "python/{}".format(sys.version.split()[0] ) if _torch_available: ua += "; torch/{}".format(torch.__version__ ) if isinstance(__UpperCamelCase ,__UpperCamelCase ): ua += "; " + "; ".join("{}/{}".format(__UpperCamelCase ,__UpperCamelCase ) for k, v in user_agent.items() ) elif isinstance(__UpperCamelCase ,__UpperCamelCase ): ua += "; " + user_agent A_ = {"user-agent": ua} if resume_size > 0: A_ = "bytes=%d-" % (resume_size,) A_ = requests.get(__UpperCamelCase ,stream=__UpperCamelCase ,proxies=__UpperCamelCase ,headers=__UpperCamelCase ) if response.status_code == 416: # Range not satisfiable return A_ = response.headers.get("Content-Length" ) A_ = resume_size + int(__UpperCamelCase ) if content_length is not None else None A_ = tqdm( unit="B" ,unit_scale=__UpperCamelCase ,total=__UpperCamelCase ,initial=__UpperCamelCase ,desc="Downloading" ,) for chunk in response.iter_content(chunk_size=1024 ): if chunk: # filter out keep-alive new chunks progress.update(len(__UpperCamelCase ) ) temp_file.write(__UpperCamelCase ) progress.close() def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Any=None ,__UpperCamelCase : Dict=False ,__UpperCamelCase : Union[str, Any]=None ,__UpperCamelCase : Any=10 ,__UpperCamelCase : int=False ,__UpperCamelCase : Optional[Any]=None ,__UpperCamelCase : str=False ,): """simple docstring""" if cache_dir is None: A_ = TRANSFORMERS_CACHE if isinstance(__UpperCamelCase ,__UpperCamelCase ): A_ = str(__UpperCamelCase ) os.makedirs(__UpperCamelCase ,exist_ok=__UpperCamelCase ) A_ = None if not local_files_only: try: A_ = requests.head(__UpperCamelCase ,allow_redirects=__UpperCamelCase ,proxies=__UpperCamelCase ,timeout=__UpperCamelCase ) if response.status_code == 200: A_ = response.headers.get("ETag" ) except (EnvironmentError, requests.exceptions.Timeout): # etag is already None pass A_ = url_to_filename(__UpperCamelCase ,__UpperCamelCase ) # get cache path to put the file A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase ) # etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible. # try to get the last downloaded one if etag is None: if os.path.exists(__UpperCamelCase ): return cache_path else: A_ = [ file for file in fnmatch.filter(os.listdir(__UpperCamelCase ) ,filename + ".*" ) if not file.endswith(".json" ) and not file.endswith(".lock" ) ] if len(__UpperCamelCase ) > 0: return os.path.join(__UpperCamelCase ,matching_files[-1] ) else: # If files cannot be found and local_files_only=True, # the models might've been found if local_files_only=False # Notify the user about that if local_files_only: raise ValueError( "Cannot find the requested files in the cached path and outgoing traffic has been" " disabled. To enable model look-ups and downloads online, set 'local_files_only'" " to False." ) return None # From now on, etag is not None. if os.path.exists(__UpperCamelCase ) and not force_download: return cache_path # Prevent parallel downloads of the same file with a lock. A_ = cache_path + ".lock" with FileLock(__UpperCamelCase ): # If the download just completed while the lock was activated. if os.path.exists(__UpperCamelCase ) and not force_download: # Even if returning early like here, the lock will be released. return cache_path if resume_download: A_ = cache_path + ".incomplete" @contextmanager def _resumable_file_manager(): with open(__UpperCamelCase ,"a+b" ) as f: yield f A_ = _resumable_file_manager if os.path.exists(__UpperCamelCase ): A_ = os.stat(__UpperCamelCase ).st_size else: A_ = 0 else: A_ = partial(tempfile.NamedTemporaryFile ,dir=__UpperCamelCase ,delete=__UpperCamelCase ) A_ = 0 # Download to temporary file, then copy to cache dir once finished. # Otherwise you get corrupt cache entries if the download gets interrupted. with temp_file_manager() as temp_file: print( "%s not found in cache or force_download set to True, downloading to %s" ,__UpperCamelCase ,temp_file.name ,) http_get( __UpperCamelCase ,__UpperCamelCase ,proxies=__UpperCamelCase ,resume_size=__UpperCamelCase ,user_agent=__UpperCamelCase ,) os.replace(temp_file.name ,__UpperCamelCase ) A_ = {"url": url, "etag": etag} A_ = cache_path + ".json" with open(__UpperCamelCase ,"w" ) as meta_file: json.dump(__UpperCamelCase ,__UpperCamelCase ) return cache_path def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : str=None ): """simple docstring""" A_ = url.encode("utf-8" ) A_ = shaaaa(__UpperCamelCase ) A_ = url_hash.hexdigest() if etag: A_ = etag.encode("utf-8" ) A_ = shaaaa(__UpperCamelCase ) filename += "." + etag_hash.hexdigest() if url.endswith(".h5" ): filename += ".h5" return filename def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Union[str, Any]=None ,__UpperCamelCase : List[Any]=False ,__UpperCamelCase : List[str]=None ,__UpperCamelCase : Any=False ,__UpperCamelCase : Optional[int]=None ,__UpperCamelCase : Optional[Any]=False ,__UpperCamelCase : Dict=False ,__UpperCamelCase : Optional[Any]=False ,): """simple docstring""" if cache_dir is None: A_ = TRANSFORMERS_CACHE if isinstance(__UpperCamelCase ,__UpperCamelCase ): A_ = str(__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ): A_ = str(__UpperCamelCase ) if is_remote_url(__UpperCamelCase ): # URL, so get it from the cache (downloading if necessary) A_ = get_from_cache( __UpperCamelCase ,cache_dir=__UpperCamelCase ,force_download=__UpperCamelCase ,proxies=__UpperCamelCase ,resume_download=__UpperCamelCase ,user_agent=__UpperCamelCase ,local_files_only=__UpperCamelCase ,) elif os.path.exists(__UpperCamelCase ): # File, and it exists. A_ = url_or_filename elif urlparse(__UpperCamelCase ).scheme == "": # File, but it doesn't exist. raise EnvironmentError("file {} not found".format(__UpperCamelCase ) ) else: # Something unknown raise ValueError("unable to parse {} as a URL or as a local path".format(__UpperCamelCase ) ) if extract_compressed_file: if not is_zipfile(__UpperCamelCase ) and not tarfile.is_tarfile(__UpperCamelCase ): return output_path # Path where we extract compressed archives # We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/" A_ , A_ = os.path.split(__UpperCamelCase ) A_ = output_file.replace("." ,"-" ) + "-extracted" A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase ) if os.path.isdir(__UpperCamelCase ) and os.listdir(__UpperCamelCase ) and not force_extract: return output_path_extracted # Prevent parallel extractions A_ = output_path + ".lock" with FileLock(__UpperCamelCase ): shutil.rmtree(__UpperCamelCase ,ignore_errors=__UpperCamelCase ) os.makedirs(__UpperCamelCase ) if is_zipfile(__UpperCamelCase ): with ZipFile(__UpperCamelCase ,"r" ) as zip_file: zip_file.extractall(__UpperCamelCase ) zip_file.close() elif tarfile.is_tarfile(__UpperCamelCase ): A_ = tarfile.open(__UpperCamelCase ) tar_file.extractall(__UpperCamelCase ) tar_file.close() else: raise EnvironmentError("Archive format of {} could not be identified".format(__UpperCamelCase ) ) return output_path_extracted return output_path def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Any="," ): """simple docstring""" assert isinstance(__UpperCamelCase ,__UpperCamelCase ) if os.path.isfile(__UpperCamelCase ): with open(__UpperCamelCase ) as f: A_ = eval(f.read() ) else: A_ = requests.get(__UpperCamelCase ) try: A_ = requests.json() except Exception: A_ = req.content.decode() assert data is not None, "could not connect" try: A_ = eval(__UpperCamelCase ) except Exception: A_ = data.split("\n" ) req.close() return data def __snake_case ( __UpperCamelCase : int ): """simple docstring""" A_ = requests.get(__UpperCamelCase ) A_ = np.array(Image.open(BytesIO(response.content ) ) ) return img def __snake_case ( __UpperCamelCase : Tuple ): """simple docstring""" A_ = url.split("/" )[-1] if fn not in os.listdir(os.getcwd() ): wget.download(__UpperCamelCase ) with open(__UpperCamelCase ,"rb" ) as stream: A_ = pkl.load(__UpperCamelCase ) A_ = weights.pop("model" ) A_ = {} for k, v in model.items(): A_ = torch.from_numpy(__UpperCamelCase ) if "running_var" in k: A_ = torch.tensor([0] ) A_ = k.replace("running_var" ,"num_batches_tracked" ) A_ = zero return new def __snake_case ( ): """simple docstring""" print(f'''{os.path.abspath(os.path.join(__UpperCamelCase ,os.pardir ) )}/demo.ipynb''' ) def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Optional[int]="RGB" ): """simple docstring""" assert isinstance(__UpperCamelCase ,__UpperCamelCase ) if os.path.isfile(__UpperCamelCase ): A_ = cva.imread(__UpperCamelCase ) else: A_ = get_image_from_url(__UpperCamelCase ) assert img is not None, f'''could not connect to: {im}''' A_ = cva.cvtColor(__UpperCamelCase ,cva.COLOR_BGR2RGB ) if input_format == "RGB": A_ = img[:, :, ::-1] return img def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : List[str]=1 ): """simple docstring""" return (images[i : i + batch] for i in range(0 ,len(__UpperCamelCase ) ,__UpperCamelCase ))
312
0