code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" import os import time import warnings from dataclasses import dataclass, field from enum import Enum from typing import List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import logging from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors from ..processors.utils import InputFeatures __UpperCamelCase : Optional[Any] = logging.get_logger(__name__) @dataclass class a : snake_case__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(glue_processors.keys() )} ) snake_case__ = field( metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} ) snake_case__ = field( default=1_2_8 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) snake_case__ = field( default=a__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.task_name.lower() class a ( a__ ): snake_case__ = '''train''' snake_case__ = '''dev''' snake_case__ = '''test''' class a ( a__ ): snake_case__ = 42 snake_case__ = 42 snake_case__ = 42 def __init__( self , _snake_case , _snake_case , _snake_case = None , _snake_case = Split.train , _snake_case = None , ): """simple docstring""" warnings.warn( 'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets ' 'library. You can have a look at this example script for pointers: ' 'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , _snake_case , ) lowerCAmelCase = args lowerCAmelCase = glue_processors[args.task_name]() lowerCAmelCase = glue_output_modes[args.task_name] if isinstance(_snake_case , _snake_case ): try: lowerCAmelCase = Split[mode] except KeyError: raise KeyError('mode is not a valid split name' ) # Load data features from cache or dataset file lowerCAmelCase = os.path.join( cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , ) lowerCAmelCase = self.processor.get_labels() if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in ( "RobertaTokenizer", "RobertaTokenizerFast", "XLMRobertaTokenizer", "BartTokenizer", "BartTokenizerFast", ): # HACK(label indices are swapped in RoBERTa pretrained model) lowerCAmelCase ,lowerCAmelCase = label_list[2], label_list[1] lowerCAmelCase = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. lowerCAmelCase = cached_features_file + '.lock' with FileLock(_snake_case ): if os.path.exists(_snake_case ) and not args.overwrite_cache: lowerCAmelCase = time.time() lowerCAmelCase = torch.load(_snake_case ) logger.info( F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start ) else: logger.info(F'Creating features from dataset file at {args.data_dir}' ) if mode == Split.dev: lowerCAmelCase = self.processor.get_dev_examples(args.data_dir ) elif mode == Split.test: lowerCAmelCase = self.processor.get_test_examples(args.data_dir ) else: lowerCAmelCase = self.processor.get_train_examples(args.data_dir ) if limit_length is not None: lowerCAmelCase = examples[:limit_length] lowerCAmelCase = glue_convert_examples_to_features( _snake_case , _snake_case , max_length=args.max_seq_length , label_list=_snake_case , output_mode=self.output_mode , ) lowerCAmelCase = time.time() torch.save(self.features , _snake_case ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' ) def __len__( self ): """simple docstring""" return len(self.features ) def __getitem__( self , _snake_case ): """simple docstring""" return self.features[i] def UpperCamelCase__ ( self ): """simple docstring""" return self.label_list
309
"""simple docstring""" import io import os import unicodedata from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) __UpperCamelCase : Dict = '''▁''' __UpperCamelCase : Optional[int] = {'''vocab_file''': '''vocab.txt''', '''sentencepiece_model_ckpt''': '''sentencepiece.bpe.model'''} __UpperCamelCase : str = { '''sentencepiece_model_file''': '''sentencepiece.bpe.model''', '''vocab_file''': '''vocab.txt''', } __UpperCamelCase : Tuple = { '''vocab_file''': { '''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''', '''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''', }, '''sentencepiece_model_file''': { '''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''', '''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''', }, } __UpperCamelCase : Optional[Any] = { '''ernie-m-base''': 514, '''ernie-m-large''': 514, } __UpperCamelCase : str = { '''ernie-m-base''': {'''do_lower_case''': False}, '''ernie-m-large''': {'''do_lower_case''': False}, } class a ( a__ ): snake_case__ = ["input_ids"] snake_case__ = VOCAB_FILES_NAMES snake_case__ = PRETRAINED_INIT_CONFIGURATION snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case__ = PRETRAINED_VOCAB_FILES_MAP snake_case__ = RESOURCE_FILES_NAMES def __init__( self , _snake_case , _snake_case=None , _snake_case=False , _snake_case="utf8" , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case = None , **_snake_case , ): """simple docstring""" lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , vocab_file=_snake_case , encoding=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , ) lowerCAmelCase = do_lower_case lowerCAmelCase = sentencepiece_model_ckpt lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_snake_case ) # to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning if vocab_file is not None: lowerCAmelCase = self.load_vocab(filepath=_snake_case ) else: lowerCAmelCase = {self.sp_model.id_to_piece(_snake_case ): id for id in range(self.sp_model.get_piece_size() )} lowerCAmelCase = {v: k for k, v in self.vocab.items()} def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" if text is None: return None lowerCAmelCase = self.tokenize(_snake_case ) lowerCAmelCase ,lowerCAmelCase = '', [] for i, ch in enumerate(_snake_case ): if ch in self.SP_CHAR_MAPPING: lowerCAmelCase = self.SP_CHAR_MAPPING.get(_snake_case ) else: lowerCAmelCase = unicodedata.normalize('NFKC' , _snake_case ) if self.is_whitespace(_snake_case ): continue normalized_text += ch char_mapping.extend([i] * len(_snake_case ) ) lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = normalized_text, [], 0 if self.do_lower_case: lowerCAmelCase = text.lower() for token in split_tokens: if token[:1] == "▁": lowerCAmelCase = token[1:] lowerCAmelCase = text[offset:].index(_snake_case ) + offset lowerCAmelCase = start + len(_snake_case ) token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) ) lowerCAmelCase = end return token_mapping @property def UpperCamelCase__ ( self ): """simple docstring""" return len(self.vocab ) def UpperCamelCase__ ( self ): """simple docstring""" return dict(self.vocab , **self.added_tokens_encoder ) def __getstate__( self ): """simple docstring""" lowerCAmelCase = self.__dict__.copy() lowerCAmelCase = None return state def __setstate__( self , _snake_case ): """simple docstring""" lowerCAmelCase = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): lowerCAmelCase = {} lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.sentencepiece_model_ckpt ) def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" return "".join((self.SP_CHAR_MAPPING.get(_snake_case , _snake_case ) for c in text) ) def UpperCamelCase__ ( self , _snake_case , _snake_case=False , _snake_case=64 , _snake_case=0.1 ): """simple docstring""" if self.sp_model_kwargs.get('enable_sampling' ) is True: lowerCAmelCase = True if self.sp_model_kwargs.get('alpha' ) is not None: lowerCAmelCase = self.sp_model_kwargs.get('alpha' ) if self.sp_model_kwargs.get('nbest_size' ) is not None: lowerCAmelCase = self.sp_model_kwargs.get('nbest_size' ) if not enable_sampling: lowerCAmelCase = self.sp_model.EncodeAsPieces(_snake_case ) else: lowerCAmelCase = self.sp_model.SampleEncodeAsPieces(_snake_case , _snake_case , _snake_case ) lowerCAmelCase = [] for pi, piece in enumerate(_snake_case ): if piece == SPIECE_UNDERLINE: if not pieces[pi + 1].startswith(_snake_case ) and pi != 0: new_pieces.append(_snake_case ) continue else: continue lowerCAmelCase = 0 for i, chunk in enumerate(_snake_case ): if chunk == SPIECE_UNDERLINE: continue if self.is_ch_char(_snake_case ) or self.is_punct(_snake_case ): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) new_pieces.append(_snake_case ) lowerCAmelCase = i + 1 elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) lowerCAmelCase = i elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) lowerCAmelCase = i if len(_snake_case ) > lst_i: new_pieces.append(piece[lst_i:] ) return new_pieces def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" lowerCAmelCase = ''.join(_snake_case ).replace(_snake_case , ' ' ).strip() return out_string def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" lowerCAmelCase = self.convert_ids_to_tokens(_snake_case ) lowerCAmelCase = ''.join(_snake_case ).replace(_snake_case , ' ' ).strip() return out_string def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" return self.vocab.get(_snake_case , self.vocab.get(self.unk_token ) ) def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" return self.reverse_vocab.get(_snake_case , self.unk_token ) def UpperCamelCase__ ( self , _snake_case , _snake_case=None ): """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCAmelCase = [self.cls_token_id] lowerCAmelCase = [self.sep_token_id] return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep def UpperCamelCase__ ( self , _snake_case , _snake_case=None ): """simple docstring""" if offset_mapping_a is None: return [(0, 0)] + offset_mapping_a + [(0, 0)] return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)] def UpperCamelCase__ ( self , _snake_case , _snake_case=None , _snake_case=False ): """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( 'You should not supply a second sequence if the provided sequence of ' 'ids is already formatted with special tokens for the model.' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case )) + [1] return [1] + ([0] * len(_snake_case )) + [1] def UpperCamelCase__ ( self , _snake_case , _snake_case = None ): """simple docstring""" if token_ids_a is None: # [CLS] X [SEP] return (len(_snake_case ) + 2) * [0] # [CLS] A [SEP] [SEP] B [SEP] return [0] * (len(_snake_case ) + 1) + [1] * (len(_snake_case ) + 3) def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" if "\u4e00" <= char <= "\u9fff": return True return False def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" if ("a" <= char <= "z") or ("A" <= char <= "Z"): return True return False def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" if char in ",;:.?!~,;:。?!《》【】": return True return False def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" if char == " " or char == "\t" or char == "\n" or char == "\r": return True if len(_snake_case ) == 1: lowerCAmelCase = unicodedata.category(_snake_case ) if cat == "Zs": return True return False def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" lowerCAmelCase = {} with io.open(_snake_case , 'r' , encoding='utf-8' ) as f: for index, line in enumerate(_snake_case ): lowerCAmelCase = line.rstrip('\n' ) lowerCAmelCase = int(_snake_case ) return token_to_idx def UpperCamelCase__ ( self , _snake_case , _snake_case = None ): """simple docstring""" lowerCAmelCase = 0 if os.path.isdir(_snake_case ): lowerCAmelCase = os.path.join( _snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) else: lowerCAmelCase = (filename_prefix + '-' if filename_prefix else '') + save_directory with open(_snake_case , 'w' , encoding='utf-8' ) as writer: for token, token_index in sorted(self.vocab.items() , key=lambda _snake_case : kv[1] ): if index != token_index: logger.warning( F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.' ' Please check that the vocabulary is not corrupted!' ) lowerCAmelCase = token_index writer.write(token + '\n' ) index += 1 lowerCAmelCase = os.path.join(_snake_case , 'sentencepiece.bpe.model' ) with open(_snake_case , 'wb' ) as fi: lowerCAmelCase = self.sp_model.serialized_model_proto() fi.write(_snake_case ) return (vocab_file,)
309
1
"""simple docstring""" from __future__ import annotations import unittest import numpy as np from transformers import LayoutLMConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.layoutlm.modeling_tf_layoutlm import ( TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMForMaskedLM, TFLayoutLMForQuestionAnswering, TFLayoutLMForSequenceClassification, TFLayoutLMForTokenClassification, TFLayoutLMModel, ) class a : def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=2 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , _snake_case=10_00 , ): """simple docstring""" lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = seq_length lowerCAmelCase = is_training lowerCAmelCase = use_input_mask lowerCAmelCase = use_token_type_ids lowerCAmelCase = use_labels lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_act lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = type_vocab_size lowerCAmelCase = type_sequence_label_size lowerCAmelCase = initializer_range lowerCAmelCase = num_labels lowerCAmelCase = num_choices lowerCAmelCase = scope lowerCAmelCase = range_bbox def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # convert bbox to numpy since TF does not support item assignment lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: lowerCAmelCase = bbox[i, j, 3] lowerCAmelCase = bbox[i, j, 1] lowerCAmelCase = t if bbox[i, j, 2] < bbox[i, j, 0]: lowerCAmelCase = bbox[i, j, 2] lowerCAmelCase = bbox[i, j, 0] lowerCAmelCase = t lowerCAmelCase = tf.convert_to_tensor(_snake_case ) lowerCAmelCase = None if self.use_input_mask: lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase = None if self.use_token_type_ids: lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase = None lowerCAmelCase = None lowerCAmelCase = None if self.use_labels: lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase = LayoutLMConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = TFLayoutLMModel(config=_snake_case ) lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case ) lowerCAmelCase = model(_snake_case , _snake_case , token_type_ids=_snake_case ) lowerCAmelCase = model(_snake_case , _snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = TFLayoutLMForMaskedLM(config=_snake_case ) lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = self.num_labels lowerCAmelCase = TFLayoutLMForSequenceClassification(config=_snake_case ) lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = self.num_labels lowerCAmelCase = TFLayoutLMForTokenClassification(config=_snake_case ) lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = TFLayoutLMForQuestionAnswering(config=_snake_case ) lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.prepare_config_and_inputs() ( ( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) , ) = config_and_inputs lowerCAmelCase = { 'input_ids': input_ids, 'bbox': bbox, 'token_type_ids': token_type_ids, 'attention_mask': input_mask, } return config, inputs_dict @require_tf class a ( a__ , a__ , unittest.TestCase ): snake_case__ = ( ( TFLayoutLMModel, TFLayoutLMForMaskedLM, TFLayoutLMForTokenClassification, TFLayoutLMForSequenceClassification, TFLayoutLMForQuestionAnswering, ) if is_tf_available() else () ) snake_case__ = ( { '''feature-extraction''': TFLayoutLMModel, '''fill-mask''': TFLayoutLMForMaskedLM, '''text-classification''': TFLayoutLMForSequenceClassification, '''token-classification''': TFLayoutLMForTokenClassification, '''zero-shot''': TFLayoutLMForSequenceClassification, } if is_tf_available() else {} ) snake_case__ = False snake_case__ = True snake_case__ = 1_0 def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = TFLayoutLMModelTester(self ) lowerCAmelCase = ConfigTester(self , config_class=_snake_case , hidden_size=37 ) def UpperCamelCase__ ( self ): """simple docstring""" self.config_tester.run_common_tests() def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_snake_case ) @slow def UpperCamelCase__ ( self ): """simple docstring""" for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase = TFLayoutLMModel.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) @unittest.skip('Onnx compliancy broke with TF 2.10' ) def UpperCamelCase__ ( self ): """simple docstring""" pass def _SCREAMING_SNAKE_CASE (): # Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on: # fmt: off lowerCAmelCase = tf.convert_to_tensor([[101,1019,1014,1016,1037,1_2849,4747,1004,1_4246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,1_1300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,1_9274,2772,6205,2_7814,1_6147,1_6147,4343,2047,1_0283,1_0969,1_4389,1012,2338,102]] ) # noqa: E231 lowerCAmelCase = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231 lowerCAmelCase = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231 lowerCAmelCase = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231 # these are sequence labels (i.e. at the token level) lowerCAmelCase = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231 # fmt: on return input_ids, attention_mask, bbox, token_type_ids, labels @require_tf class a ( unittest.TestCase ): @slow def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased' ) lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = prepare_layoutlm_batch_inputs() # forward pass lowerCAmelCase = model(input_ids=_snake_case , bbox=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case ) # test the sequence output on [0, :3, :3] lowerCAmelCase = tf.convert_to_tensor( [[0.1_785, -0.1_947, -0.0_425], [-0.3_254, -0.2_807, 0.2_553], [-0.5_391, -0.3_322, 0.3_364]] , ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _snake_case , atol=1E-3 ) ) # test the pooled output on [1, :3] lowerCAmelCase = tf.convert_to_tensor([-0.6_580, -0.0_214, 0.8_552] ) self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , _snake_case , atol=1E-3 ) ) @slow def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=2 ) lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = prepare_layoutlm_batch_inputs() # forward pass lowerCAmelCase = model( input_ids=_snake_case , bbox=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=tf.convert_to_tensor([1, 1] ) , ) # test whether we get a loss as a scalar lowerCAmelCase = outputs.loss lowerCAmelCase = (2,) self.assertEqual(loss.shape , _snake_case ) # test the shape of the logits lowerCAmelCase = outputs.logits lowerCAmelCase = (2, 2) self.assertEqual(logits.shape , _snake_case ) @slow def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=13 ) lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = prepare_layoutlm_batch_inputs() # forward pass lowerCAmelCase = model( input_ids=_snake_case , bbox=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case ) # test the shape of the logits lowerCAmelCase = outputs.logits lowerCAmelCase = tf.convert_to_tensor((2, 25, 13) ) self.assertEqual(logits.shape , _snake_case ) @slow def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = TFLayoutLMForQuestionAnswering.from_pretrained('microsoft/layoutlm-base-uncased' ) lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = prepare_layoutlm_batch_inputs() # forward pass lowerCAmelCase = model(input_ids=_snake_case , bbox=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case ) # test the shape of the logits lowerCAmelCase = tf.convert_to_tensor((2, 25) ) self.assertEqual(outputs.start_logits.shape , _snake_case ) self.assertEqual(outputs.end_logits.shape , _snake_case )
309
"""simple docstring""" import argparse import os import torch from transformers.utils import WEIGHTS_NAME __UpperCamelCase : int = ['''small''', '''medium''', '''large'''] __UpperCamelCase : str = '''lm_head.decoder.weight''' __UpperCamelCase : Dict = '''lm_head.weight''' def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ): lowerCAmelCase = torch.load(_UpperCAmelCase ) lowerCAmelCase = d.pop(_UpperCAmelCase ) os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase ) torch.save(_UpperCAmelCase , os.path.join(_UpperCAmelCase , _UpperCAmelCase ) ) if __name__ == "__main__": __UpperCamelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument('''--dialogpt_path''', default='''.''', type=str) __UpperCamelCase : Optional[int] = parser.parse_args() for MODEL in DIALOGPT_MODELS: __UpperCamelCase : Dict = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''') __UpperCamelCase : str = f'''./DialoGPT-{MODEL}''' convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
309
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_beit import BeitImageProcessor __UpperCamelCase : List[str] = logging.get_logger(__name__) class a ( a__ ): def __init__( self , *_snake_case , **_snake_case ): """simple docstring""" warnings.warn( 'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use BeitImageProcessor instead.' , _snake_case , ) super().__init__(*_snake_case , **_snake_case )
309
"""simple docstring""" __UpperCamelCase : Dict = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []} __UpperCamelCase : str = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]} def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] , _UpperCAmelCase : int , _UpperCAmelCase : list[bool] ): lowerCAmelCase = True lowerCAmelCase = [] for neighbour in graph[vert]: if not visited[neighbour]: order += topology_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) order.append(_UpperCAmelCase ) return order def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] , _UpperCAmelCase : int , _UpperCAmelCase : list[bool] ): lowerCAmelCase = True lowerCAmelCase = [vert] for neighbour in reversed_graph[vert]: if not visited[neighbour]: component += find_components(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) return component def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] ): lowerCAmelCase = len(_UpperCAmelCase ) * [False] lowerCAmelCase = {vert: [] for vert in range(len(_UpperCAmelCase ) )} for vert, neighbours in graph.items(): for neighbour in neighbours: reversed_graph[neighbour].append(_UpperCAmelCase ) lowerCAmelCase = [] for i, was_visited in enumerate(_UpperCAmelCase ): if not was_visited: order += topology_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) lowerCAmelCase = [] lowerCAmelCase = len(_UpperCAmelCase ) * [False] for i in range(len(_UpperCAmelCase ) ): lowerCAmelCase = order[len(_UpperCAmelCase ) - i - 1] if not visited[vert]: lowerCAmelCase = find_components(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) components_list.append(_UpperCAmelCase ) return components_list
309
1
"""simple docstring""" import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNetaDConditionModel, ) from diffusers.utils import load_image, slow from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class a ( a__ , a__ , unittest.TestCase ): snake_case__ = StableDiffusionDiffEditPipeline snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''} snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''} snake_case__ = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess snake_case__ = frozenset([] ) def UpperCamelCase__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCAmelCase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_snake_case , ) lowerCAmelCase = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_snake_case , set_alpha_to_one=_snake_case , ) lowerCAmelCase = DDIMInverseScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_snake_case , set_alpha_to_zero=_snake_case , ) torch.manual_seed(0 ) lowerCAmelCase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_28 , ) torch.manual_seed(0 ) lowerCAmelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='gelu' , projection_dim=5_12 , ) lowerCAmelCase = CLIPTextModel(_snake_case ) lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) lowerCAmelCase = { 'unet': unet, 'scheduler': scheduler, 'inverse_scheduler': inverse_scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def UpperCamelCase__ ( self , _snake_case , _snake_case=0 ): """simple docstring""" lowerCAmelCase = floats_tensor((1, 16, 16) , rng=random.Random(_snake_case ) ).to(_snake_case ) lowerCAmelCase = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(_snake_case ) ).to(_snake_case ) if str(_snake_case ).startswith('mps' ): lowerCAmelCase = torch.manual_seed(_snake_case ) else: lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case ) lowerCAmelCase = { 'prompt': 'a dog and a newt', 'mask_image': mask, 'image_latents': latents, 'generator': generator, 'num_inference_steps': 2, 'inpaint_strength': 1.0, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def UpperCamelCase__ ( self , _snake_case , _snake_case=0 ): """simple docstring""" lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case ) lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCAmelCase = Image.fromarray(np.uinta(_snake_case ) ).convert('RGB' ) if str(_snake_case ).startswith('mps' ): lowerCAmelCase = torch.manual_seed(_snake_case ) else: lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case ) lowerCAmelCase = { 'image': image, 'source_prompt': 'a cat and a frog', 'target_prompt': 'a dog and a newt', 'generator': generator, 'num_inference_steps': 2, 'num_maps_per_mask': 2, 'mask_encode_strength': 1.0, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def UpperCamelCase__ ( self , _snake_case , _snake_case=0 ): """simple docstring""" lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case ) lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCAmelCase = Image.fromarray(np.uinta(_snake_case ) ).convert('RGB' ) if str(_snake_case ).startswith('mps' ): lowerCAmelCase = torch.manual_seed(_snake_case ) else: lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case ) lowerCAmelCase = { 'image': image, 'prompt': 'a cat and a frog', 'generator': generator, 'num_inference_steps': 2, 'inpaint_strength': 1.0, 'guidance_scale': 6.0, 'decode_latents': True, 'output_type': 'numpy', } return inputs def UpperCamelCase__ ( self ): """simple docstring""" if not hasattr(self.pipeline_class , '_optional_components' ): return lowerCAmelCase = self.get_dummy_components() lowerCAmelCase = self.pipeline_class(**_snake_case ) pipe.to(_snake_case ) pipe.set_progress_bar_config(disable=_snake_case ) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(_snake_case , _snake_case , _snake_case ) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} ) lowerCAmelCase = self.get_dummy_inputs(_snake_case ) lowerCAmelCase = pipe(**_snake_case )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(_snake_case ) lowerCAmelCase = self.pipeline_class.from_pretrained(_snake_case ) pipe_loaded.to(_snake_case ) pipe_loaded.set_progress_bar_config(disable=_snake_case ) for optional_component in pipe._optional_components: self.assertTrue( getattr(_snake_case , _snake_case ) is None , F'`{optional_component}` did not stay set to None after loading.' , ) lowerCAmelCase = self.get_dummy_inputs(_snake_case ) lowerCAmelCase = pipe_loaded(**_snake_case )[0] lowerCAmelCase = np.abs(output - output_loaded ).max() self.assertLess(_snake_case , 1E-4 ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = 'cpu' lowerCAmelCase = self.get_dummy_components() lowerCAmelCase = self.pipeline_class(**_snake_case ) pipe.to(_snake_case ) pipe.set_progress_bar_config(disable=_snake_case ) lowerCAmelCase = self.get_dummy_mask_inputs(_snake_case ) lowerCAmelCase = pipe.generate_mask(**_snake_case ) lowerCAmelCase = mask[0, -3:, -3:] self.assertEqual(mask.shape , (1, 16, 16) ) lowerCAmelCase = np.array([0] * 9 ) lowerCAmelCase = np.abs(mask_slice.flatten() - expected_slice ).max() self.assertLessEqual(_snake_case , 1E-3 ) self.assertEqual(mask[0, -3, -4] , 0 ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = 'cpu' lowerCAmelCase = self.get_dummy_components() lowerCAmelCase = self.pipeline_class(**_snake_case ) pipe.to(_snake_case ) pipe.set_progress_bar_config(disable=_snake_case ) lowerCAmelCase = self.get_dummy_inversion_inputs(_snake_case ) lowerCAmelCase = pipe.invert(**_snake_case ).images lowerCAmelCase = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) lowerCAmelCase = np.array( [0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , ) lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(_snake_case , 1E-3 ) def UpperCamelCase__ ( self ): """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=5E-3 ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = 'cpu' lowerCAmelCase = self.get_dummy_components() lowerCAmelCase = {'beta_start': 0.00_085, 'beta_end': 0.012, 'beta_schedule': 'scaled_linear'} lowerCAmelCase = DPMSolverMultistepScheduler(**_snake_case ) lowerCAmelCase = DPMSolverMultistepInverseScheduler(**_snake_case ) lowerCAmelCase = self.pipeline_class(**_snake_case ) pipe.to(_snake_case ) pipe.set_progress_bar_config(disable=_snake_case ) lowerCAmelCase = self.get_dummy_inversion_inputs(_snake_case ) lowerCAmelCase = pipe.invert(**_snake_case ).images lowerCAmelCase = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) lowerCAmelCase = np.array( [0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , ) lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(_snake_case , 1E-3 ) @require_torch_gpu @slow class a ( unittest.TestCase ): def UpperCamelCase__ ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def UpperCamelCase__ ( cls ): """simple docstring""" lowerCAmelCase = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' ) lowerCAmelCase = raw_image.convert('RGB' ).resize((7_68, 7_68) ) lowerCAmelCase = raw_image def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = torch.manual_seed(0 ) lowerCAmelCase = StableDiffusionDiffEditPipeline.from_pretrained( 'stabilityai/stable-diffusion-2-1' , safety_checker=_snake_case , torch_dtype=torch.floataa ) lowerCAmelCase = DDIMScheduler.from_config(pipe.scheduler.config ) lowerCAmelCase = DDIMInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=_snake_case ) lowerCAmelCase = 'a bowl of fruit' lowerCAmelCase = 'a bowl of pears' lowerCAmelCase = pipe.generate_mask( image=self.raw_image , source_prompt=_snake_case , target_prompt=_snake_case , generator=_snake_case , ) lowerCAmelCase = pipe.invert( prompt=_snake_case , image=self.raw_image , inpaint_strength=0.7 , generator=_snake_case ).latents lowerCAmelCase = pipe( prompt=_snake_case , mask_image=_snake_case , image_latents=_snake_case , generator=_snake_case , negative_prompt=_snake_case , inpaint_strength=0.7 , output_type='numpy' , ).images[0] lowerCAmelCase = ( np.array( load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/diffedit/pears.png' ).resize((7_68, 7_68) ) ) / 2_55 ) assert np.abs((expected_image - image).max() ) < 5E-1 def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = torch.manual_seed(0 ) lowerCAmelCase = StableDiffusionDiffEditPipeline.from_pretrained( 'stabilityai/stable-diffusion-2-1' , safety_checker=_snake_case , torch_dtype=torch.floataa ) lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) lowerCAmelCase = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=_snake_case ) lowerCAmelCase = 'a bowl of fruit' lowerCAmelCase = 'a bowl of pears' lowerCAmelCase = pipe.generate_mask( image=self.raw_image , source_prompt=_snake_case , target_prompt=_snake_case , generator=_snake_case , ) lowerCAmelCase = pipe.invert( prompt=_snake_case , image=self.raw_image , inpaint_strength=0.7 , generator=_snake_case , num_inference_steps=25 , ).latents lowerCAmelCase = pipe( prompt=_snake_case , mask_image=_snake_case , image_latents=_snake_case , generator=_snake_case , negative_prompt=_snake_case , inpaint_strength=0.7 , num_inference_steps=25 , output_type='numpy' , ).images[0] lowerCAmelCase = ( np.array( load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/diffedit/pears.png' ).resize((7_68, 7_68) ) ) / 2_55 ) assert np.abs((expected_image - image).max() ) < 5E-1
309
"""simple docstring""" import os import time import warnings from dataclasses import dataclass, field from enum import Enum from typing import List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import logging from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors from ..processors.utils import InputFeatures __UpperCamelCase : Optional[Any] = logging.get_logger(__name__) @dataclass class a : snake_case__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(glue_processors.keys() )} ) snake_case__ = field( metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} ) snake_case__ = field( default=1_2_8 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) snake_case__ = field( default=a__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.task_name.lower() class a ( a__ ): snake_case__ = '''train''' snake_case__ = '''dev''' snake_case__ = '''test''' class a ( a__ ): snake_case__ = 42 snake_case__ = 42 snake_case__ = 42 def __init__( self , _snake_case , _snake_case , _snake_case = None , _snake_case = Split.train , _snake_case = None , ): """simple docstring""" warnings.warn( 'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets ' 'library. You can have a look at this example script for pointers: ' 'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , _snake_case , ) lowerCAmelCase = args lowerCAmelCase = glue_processors[args.task_name]() lowerCAmelCase = glue_output_modes[args.task_name] if isinstance(_snake_case , _snake_case ): try: lowerCAmelCase = Split[mode] except KeyError: raise KeyError('mode is not a valid split name' ) # Load data features from cache or dataset file lowerCAmelCase = os.path.join( cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , ) lowerCAmelCase = self.processor.get_labels() if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in ( "RobertaTokenizer", "RobertaTokenizerFast", "XLMRobertaTokenizer", "BartTokenizer", "BartTokenizerFast", ): # HACK(label indices are swapped in RoBERTa pretrained model) lowerCAmelCase ,lowerCAmelCase = label_list[2], label_list[1] lowerCAmelCase = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. lowerCAmelCase = cached_features_file + '.lock' with FileLock(_snake_case ): if os.path.exists(_snake_case ) and not args.overwrite_cache: lowerCAmelCase = time.time() lowerCAmelCase = torch.load(_snake_case ) logger.info( F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start ) else: logger.info(F'Creating features from dataset file at {args.data_dir}' ) if mode == Split.dev: lowerCAmelCase = self.processor.get_dev_examples(args.data_dir ) elif mode == Split.test: lowerCAmelCase = self.processor.get_test_examples(args.data_dir ) else: lowerCAmelCase = self.processor.get_train_examples(args.data_dir ) if limit_length is not None: lowerCAmelCase = examples[:limit_length] lowerCAmelCase = glue_convert_examples_to_features( _snake_case , _snake_case , max_length=args.max_seq_length , label_list=_snake_case , output_mode=self.output_mode , ) lowerCAmelCase = time.time() torch.save(self.features , _snake_case ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' ) def __len__( self ): """simple docstring""" return len(self.features ) def __getitem__( self , _snake_case ): """simple docstring""" return self.features[i] def UpperCamelCase__ ( self ): """simple docstring""" return self.label_list
309
1
"""simple docstring""" from __future__ import annotations from fractions import Fraction def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : int ): return ( num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ): lowerCAmelCase = [] lowerCAmelCase = 11 lowerCAmelCase = int('1' + '0' * digit_len ) for num in range(_UpperCAmelCase , _UpperCAmelCase ): while den <= 99: if (num != den) and (num % 10 == den // 10) and (den % 10 != 0): if is_digit_cancelling(_UpperCAmelCase , _UpperCAmelCase ): solutions.append(F'{num}/{den}' ) den += 1 num += 1 lowerCAmelCase = 10 return solutions def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int = 2 ): lowerCAmelCase = 1.0 for fraction in fraction_list(_UpperCAmelCase ): lowerCAmelCase = Fraction(_UpperCAmelCase ) result *= frac.denominator / frac.numerator return int(_UpperCAmelCase ) if __name__ == "__main__": print(solution())
309
"""simple docstring""" import os from collections.abc import Iterator def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "." ): for dir_path, dir_names, filenames in os.walk(_UpperCAmelCase ): lowerCAmelCase = [d for d in dir_names if d != 'scripts' and d[0] not in '._'] for filename in filenames: if filename == "__init__.py": continue if os.path.splitext(_UpperCAmelCase )[1] in (".py", ".ipynb"): yield os.path.join(_UpperCAmelCase , _UpperCAmelCase ).lstrip('./' ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ): return F'{i * " "}*' if i else "\n##" def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ): lowerCAmelCase = old_path.split(os.sep ) for i, new_part in enumerate(new_path.split(os.sep ) ): if (i + 1 > len(_UpperCAmelCase ) or old_parts[i] != new_part) and new_part: print(F'{md_prefix(_UpperCAmelCase )} {new_part.replace("_" , " " ).title()}' ) return new_path def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "." ): lowerCAmelCase = '' for filepath in sorted(good_file_paths(_UpperCAmelCase ) ): lowerCAmelCase ,lowerCAmelCase = os.path.split(_UpperCAmelCase ) if filepath != old_path: lowerCAmelCase = print_path(_UpperCAmelCase , _UpperCAmelCase ) lowerCAmelCase = (filepath.count(os.sep ) + 1) if filepath else 0 lowerCAmelCase = F'{filepath}/{filename}'.replace(' ' , '%20' ) lowerCAmelCase = os.path.splitext(filename.replace('_' , ' ' ).title() )[0] print(F'{md_prefix(_UpperCAmelCase )} [{filename}]({url})' ) if __name__ == "__main__": print_directory_md('''.''')
309
1
"""simple docstring""" import argparse import math import os import torch from neural_compressor.utils.pytorch import load from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel def _SCREAMING_SNAKE_CASE (): lowerCAmelCase = argparse.ArgumentParser() parser.add_argument( '-m' , '--pretrained_model_name_or_path' , type=_UpperCAmelCase , default=_UpperCAmelCase , required=_UpperCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models.' , ) parser.add_argument( '-c' , '--caption' , type=_UpperCAmelCase , default='robotic cat with wings' , help='Text used to generate images.' , ) parser.add_argument( '-n' , '--images_num' , type=_UpperCAmelCase , default=4 , help='How much images to generate.' , ) parser.add_argument( '-s' , '--seed' , type=_UpperCAmelCase , default=42 , help='Seed for random process.' , ) parser.add_argument( '-ci' , '--cuda_id' , type=_UpperCAmelCase , default=0 , help='cuda_id.' , ) lowerCAmelCase = parser.parse_args() return args def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : str ): if not len(_UpperCAmelCase ) == rows * cols: raise ValueError('The specified number of rows and columns are not correct.' ) lowerCAmelCase ,lowerCAmelCase = imgs[0].size lowerCAmelCase = Image.new('RGB' , size=(cols * w, rows * h) ) lowerCAmelCase ,lowerCAmelCase = grid.size for i, img in enumerate(_UpperCAmelCase ): grid.paste(_UpperCAmelCase , box=(i % cols * w, i // cols * h) ) return grid def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str]="robotic cat with wings" , _UpperCAmelCase : Tuple=7.5 , _UpperCAmelCase : Any=50 , _UpperCAmelCase : Dict=1 , _UpperCAmelCase : Union[str, Any]=42 , ): lowerCAmelCase = torch.Generator(pipeline.device ).manual_seed(_UpperCAmelCase ) lowerCAmelCase = pipeline( _UpperCAmelCase , guidance_scale=_UpperCAmelCase , num_inference_steps=_UpperCAmelCase , generator=_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase , ).images lowerCAmelCase = int(math.sqrt(_UpperCAmelCase ) ) lowerCAmelCase = image_grid(_UpperCAmelCase , rows=_rows , cols=num_images_per_prompt // _rows ) return grid, images __UpperCamelCase : Optional[Any] = parse_args() # Load models and create wrapper for stable diffusion __UpperCamelCase : Union[str, Any] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='''tokenizer''') __UpperCamelCase : Union[str, Any] = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''text_encoder''') __UpperCamelCase : Optional[Any] = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='''vae''') __UpperCamelCase : str = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''unet''') __UpperCamelCase : Tuple = StableDiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer ) __UpperCamelCase : int = lambda images, clip_input: (images, False) if os.path.exists(os.path.join(args.pretrained_model_name_or_path, '''best_model.pt''')): __UpperCamelCase : List[Any] = load(args.pretrained_model_name_or_path, model=unet) unet.eval() setattr(pipeline, '''unet''', unet) else: __UpperCamelCase : List[str] = unet.to(torch.device('''cuda''', args.cuda_id)) __UpperCamelCase : Any = pipeline.to(unet.device) __UpperCamelCase ,__UpperCamelCase : str = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed) grid.save(os.path.join(args.pretrained_model_name_or_path, '''{}.png'''.format('''_'''.join(args.caption.split())))) __UpperCamelCase : Tuple = os.path.join(args.pretrained_model_name_or_path, '''_'''.join(args.caption.split())) os.makedirs(dirname, exist_ok=True) for idx, image in enumerate(images): image.save(os.path.join(dirname, '''{}.png'''.format(idx + 1)))
309
"""simple docstring""" import os from datetime import datetime as dt from github import Github __UpperCamelCase : int = [ '''good first issue''', '''good second issue''', '''good difficult issue''', '''enhancement''', '''new pipeline/model''', '''new scheduler''', '''wip''', ] def _SCREAMING_SNAKE_CASE (): lowerCAmelCase = Github(os.environ['GITHUB_TOKEN'] ) lowerCAmelCase = g.get_repo('huggingface/diffusers' ) lowerCAmelCase = repo.get_issues(state='open' ) for issue in open_issues: lowerCAmelCase = sorted(issue.get_comments() , key=lambda _UpperCAmelCase : i.created_at , reverse=_UpperCAmelCase ) lowerCAmelCase = comments[0] if len(_UpperCAmelCase ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Closes the issue after 7 days of inactivity since the Stalebot notification. issue.edit(state='closed' ) elif ( "stale" in issue.get_labels() and last_comment is not None and last_comment.user.login != "github-actions[bot]" ): # Opens the issue if someone other than Stalebot commented. issue.edit(state='open' ) issue.remove_from_labels('stale' ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( 'This issue has been automatically marked as stale because it has not had ' 'recent activity. If you think this still needs to be addressed ' 'please comment on this thread.\n\nPlease note that issues that do not follow the ' '[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) ' 'are likely to be ignored.' ) issue.add_to_labels('stale' ) if __name__ == "__main__": main()
309
1
"""simple docstring""" from timeit import timeit __UpperCamelCase : Optional[Any] = { '''MALAYALAM''': True, '''String''': False, '''rotor''': True, '''level''': True, '''A''': True, '''BB''': True, '''ABC''': False, '''amanaplanacanalpanama''': True, # "a man a plan a canal panama" } # Ensure our test data is valid assert all((key == key[::-1]) is value for key, value in test_data.items()) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ): lowerCAmelCase = 0 lowerCAmelCase = len(_UpperCAmelCase ) - 1 while start_i < end_i: if s[start_i] == s[end_i]: start_i += 1 end_i -= 1 else: return False return True def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ): lowerCAmelCase = len(_UpperCAmelCase ) // 2 lowerCAmelCase = len(_UpperCAmelCase ) # We need to traverse till half of the length of string # as we can get access of the i'th last element from # i'th index. # eg: [0,1,2,3,4,5] => 4th index can be accessed # with the help of 1st index (i==n-i-1) # where n is length of string return all(s[i] == s[n - i - 1] for i in range(_UpperCAmelCase ) ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ): if len(_UpperCAmelCase ) <= 2: return True if s[0] == s[len(_UpperCAmelCase ) - 1]: return is_palindrome_recursive(s[1:-1] ) else: return False def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ): return s == s[::-1] def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ): lowerCAmelCase = F'all({name}(key) is value for key, value in test_data.items())' lowerCAmelCase = F'from __main__ import test_data, {name}' lowerCAmelCase = 50_0000 lowerCAmelCase = timeit(stmt=_UpperCAmelCase , setup=_UpperCAmelCase , number=_UpperCAmelCase ) print(F'{name:<35} finished {number:,} runs in {result:.5f} seconds' ) if __name__ == "__main__": for key, value in test_data.items(): assert is_palindrome(key) is is_palindrome_recursive(key) assert is_palindrome(key) is is_palindrome_slice(key) print(f'''{key:21} {value}''') print('''a man a plan a canal panama''') # finished 500,000 runs in 0.46793 seconds benchmark_function('''is_palindrome_slice''') # finished 500,000 runs in 0.85234 seconds benchmark_function('''is_palindrome''') # finished 500,000 runs in 1.32028 seconds benchmark_function('''is_palindrome_recursive''') # finished 500,000 runs in 2.08679 seconds benchmark_function('''is_palindrome_traversal''')
309
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) __UpperCamelCase : Any = { '''configuration_layoutlmv2''': ['''LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LayoutLMv2Config'''], '''processing_layoutlmv2''': ['''LayoutLMv2Processor'''], '''tokenization_layoutlmv2''': ['''LayoutLMv2Tokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Tuple = ['''LayoutLMv2TokenizerFast'''] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Optional[int] = ['''LayoutLMv2FeatureExtractor'''] __UpperCamelCase : Optional[int] = ['''LayoutLMv2ImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Any = [ '''LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LayoutLMv2ForQuestionAnswering''', '''LayoutLMv2ForSequenceClassification''', '''LayoutLMv2ForTokenClassification''', '''LayoutLMv2Layer''', '''LayoutLMv2Model''', '''LayoutLMv2PreTrainedModel''', ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys __UpperCamelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
309
1
"""simple docstring""" import torch from diffusers import KDPMaDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class a ( a__ ): snake_case__ = (KDPMaDiscreteScheduler,) snake_case__ = 1_0 def UpperCamelCase__ ( self , **_snake_case ): """simple docstring""" lowerCAmelCase = { 'num_train_timesteps': 11_00, 'beta_start': 0.0_001, 'beta_end': 0.02, 'beta_schedule': 'linear', } config.update(**_snake_case ) return config def UpperCamelCase__ ( self ): """simple docstring""" for timesteps in [10, 50, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ): self.check_over_configs(beta_start=_snake_case , beta_end=_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config(prediction_type='v_prediction' ) lowerCAmelCase = scheduler_class(**_snake_case ) scheduler.set_timesteps(self.num_inference_steps ) lowerCAmelCase = self.dummy_model() lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCAmelCase = sample.to(_snake_case ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase = scheduler.scale_model_input(_snake_case , _snake_case ) lowerCAmelCase = model(_snake_case , _snake_case ) lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case ) lowerCAmelCase = output.prev_sample lowerCAmelCase = torch.sum(torch.abs(_snake_case ) ) lowerCAmelCase = torch.mean(torch.abs(_snake_case ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 4.69_34E-07 ) < 1E-2 assert abs(result_mean.item() - 6.11_12E-10 ) < 1E-3 else: # CUDA assert abs(result_sum.item() - 4.6_93_42_86_50_17_09_72E-07 ) < 1E-2 assert abs(result_mean.item() - 0.0_002 ) < 1E-3 def UpperCamelCase__ ( self ): """simple docstring""" if torch_device == "mps": return lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**_snake_case ) scheduler.set_timesteps(self.num_inference_steps ) lowerCAmelCase = self.dummy_model() lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCAmelCase = sample.to(_snake_case ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase = scheduler.scale_model_input(_snake_case , _snake_case ) lowerCAmelCase = model(_snake_case , _snake_case ) lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case ) lowerCAmelCase = output.prev_sample lowerCAmelCase = torch.sum(torch.abs(_snake_case ) ) lowerCAmelCase = torch.mean(torch.abs(_snake_case ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 20.4_125 ) < 1E-2 assert abs(result_mean.item() - 0.0_266 ) < 1E-3 else: # CUDA assert abs(result_sum.item() - 20.4_125 ) < 1E-2 assert abs(result_mean.item() - 0.0_266 ) < 1E-3 def UpperCamelCase__ ( self ): """simple docstring""" if torch_device == "mps": return lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**_snake_case ) scheduler.set_timesteps(self.num_inference_steps , device=_snake_case ) lowerCAmelCase = self.dummy_model() lowerCAmelCase = self.dummy_sample_deter.to(_snake_case ) * scheduler.init_noise_sigma for t in scheduler.timesteps: lowerCAmelCase = scheduler.scale_model_input(_snake_case , _snake_case ) lowerCAmelCase = model(_snake_case , _snake_case ) lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case ) lowerCAmelCase = output.prev_sample lowerCAmelCase = torch.sum(torch.abs(_snake_case ) ) lowerCAmelCase = torch.mean(torch.abs(_snake_case ) ) if str(_snake_case ).startswith('cpu' ): # The following sum varies between 148 and 156 on mps. Why? assert abs(result_sum.item() - 20.4_125 ) < 1E-2 assert abs(result_mean.item() - 0.0_266 ) < 1E-3 else: # CUDA assert abs(result_sum.item() - 20.4_125 ) < 1E-2 assert abs(result_mean.item() - 0.0_266 ) < 1E-3
309
"""simple docstring""" import warnings from ...utils import logging from .image_processing_poolformer import PoolFormerImageProcessor __UpperCamelCase : Optional[Any] = logging.get_logger(__name__) class a ( a__ ): def __init__( self , *_snake_case , **_snake_case ): """simple docstring""" warnings.warn( 'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use PoolFormerImageProcessor instead.' , _snake_case , ) super().__init__(*_snake_case , **_snake_case )
309
1
"""simple docstring""" from __future__ import annotations import os import tempfile import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import is_tensorflow_text_available, is_tf_available from transformers.testing_utils import require_tensorflow_text, require_tf, slow from ..test_modeling_tf_common import floats_tensor from .test_framework_agnostic import GenerationIntegrationTestsMixin if is_tf_available(): import tensorflow as tf from transformers import ( AutoTokenizer, TFAutoModelForCausalLM, TFAutoModelForSeqaSeqLM, TFAutoModelForSpeechSeqaSeq, TFAutoModelForVisionaSeq, TFBartForConditionalGeneration, TFLogitsProcessorList, TFMinLengthLogitsProcessor, tf_top_k_top_p_filtering, ) if is_tensorflow_text_available(): import tensorflow_text as text @require_tf class a ( unittest.TestCase ): def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = tf.convert_to_tensor( [ [ 8.2_220_991, # 3rd highest value; idx. 0 -0.5_620_044, 5.23_229_752, 4.0_386_393, -6.8_798_378, -0.54_785_802, -3.2_012_153, 2.92_777_176, 1.88_171_953, 7.35_341_276, # 5th highest value; idx. 9 8.43_207_833, # 2nd highest value; idx. 10 -9.85_711_836, -5.96_209_236, -1.13_039_161, -7.1_115_294, -0.8_369_633, -5.3_186_408, 7.06_427_407, 0.81_369_344, -0.82_023_817, -5.9_179_796, 0.58_813_443, -6.99_778_438, 4.71_551_189, -0.18_771_637, 7.44_020_759, # 4th highest value; idx. 25 9.38_450_987, # 1st highest value; idx. 26 2.12_662_941, -9.32_562_038, 2.35_652_522, ], # cummulative prob of 5 highest values <= 0.6 [ 0.58_425_518, 4.53_139_238, -5.57_510_464, -6.28_030_699, -7.19_529_503, -4.02_122_551, 1.39_337_037, -6.06_707_057, 1.59_480_517, -9.643_119, 0.03_907_799, 0.67_231_762, -8.88_206_726, 6.27_115_922, # 4th highest value; idx. 13 2.28_520_723, 4.82_767_506, 4.30_421_368, 8.8_275_313, # 2nd highest value; idx. 17 5.44_029_958, # 5th highest value; idx. 18 -4.4_735_794, 7.38_579_536, # 3rd highest value; idx. 20 -2.91_051_663, 2.61_946_077, -2.5_674_762, -9.48_959_302, -4.02_922_645, -1.35_416_918, 9.67_702_323, # 1st highest value; idx. 27 -5.89_478_553, 1.85_370_467, ], # cummulative prob of 5 highest values <= 0.6 ] , dtype=tf.floataa , ) lowerCAmelCase = tf.convert_to_tensor( [[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above lowerCAmelCase = tf.convert_to_tensor( [8.222_099, 7.3_534_126, 8.432_078, 7.4_402_075, 9.38_451, 6.271_159, 8.827_531, 5.4_402_995, 7.3_857_956, 9.677_023] , dtype=tf.floataa , ) # expected non filtered values as noted above lowerCAmelCase = tf_top_k_top_p_filtering(_snake_case , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 ) lowerCAmelCase = output[output != -float('inf' )] lowerCAmelCase = tf.cast( tf.where(tf.not_equal(_snake_case , tf.constant(-float('inf' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , ) tf.debugging.assert_near(_snake_case , _snake_case , rtol=1E-12 ) tf.debugging.assert_equal(_snake_case , _snake_case ) @require_tf class a ( unittest.TestCase , a__ ): # setting framework_dependent_parameters needs to be gated, just like its contents' imports if is_tf_available(): snake_case__ = { '''AutoModelForCausalLM''': TFAutoModelForCausalLM, '''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq, '''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM, '''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq, '''LogitsProcessorList''': TFLogitsProcessorList, '''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor, '''create_tensor_fn''': tf.convert_to_tensor, '''floats_tensor''': floats_tensor, '''return_tensors''': '''tf''', } @slow def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ) lowerCAmelCase = 2 lowerCAmelCase = 2 class a ( tf.Module ): def __init__( self , _snake_case ): """simple docstring""" super(_snake_case , self ).__init__() lowerCAmelCase = model @tf.function( input_signature=( tf.TensorSpec((None, input_length) , tf.intaa , name='input_ids' ), tf.TensorSpec((None, input_length) , tf.intaa , name='attention_mask' ), ) , jit_compile=_snake_case , ) def UpperCamelCase__ ( self , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = self.model.generate( input_ids=_snake_case , attention_mask=_snake_case , max_new_tokens=_snake_case , return_dict_in_generate=_snake_case , ) return {"sequences": outputs["sequences"]} lowerCAmelCase = [[2, 0], [1_02, 1_03]] lowerCAmelCase = [[1, 0], [1, 1]] lowerCAmelCase = DummyModel(model=_snake_case ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(_snake_case , _snake_case , signatures={'serving_default': dummy_model.serving} ) lowerCAmelCase = tf.saved_model.load(_snake_case ).signatures['serving_default'] for batch_size in range(1 , len(_snake_case ) + 1 ): lowerCAmelCase = { 'input_ids': tf.constant(dummy_input_ids[:batch_size] ), 'attention_mask': tf.constant(dummy_attention_masks[:batch_size] ), } lowerCAmelCase = serving_func(**_snake_case )['sequences'] lowerCAmelCase = test_model.generate(**_snake_case , max_new_tokens=_snake_case ) tf.debugging.assert_equal(_snake_case , _snake_case ) @slow def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ) lowerCAmelCase = 1 lowerCAmelCase = 2 class a ( tf.Module ): def __init__( self , _snake_case ): """simple docstring""" super(_snake_case , self ).__init__() lowerCAmelCase = model @tf.function( input_signature=( tf.TensorSpec((batch_size, None) , tf.intaa , name='input_ids' ), tf.TensorSpec((batch_size, None) , tf.intaa , name='attention_mask' ), ) , jit_compile=_snake_case , ) def UpperCamelCase__ ( self , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = self.model.generate( input_ids=_snake_case , attention_mask=_snake_case , max_new_tokens=_snake_case , return_dict_in_generate=_snake_case , ) return {"sequences": outputs["sequences"]} lowerCAmelCase = [[2], [1_02, 1_03]] lowerCAmelCase = [[1], [1, 1]] lowerCAmelCase = DummyModel(model=_snake_case ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(_snake_case , _snake_case , signatures={'serving_default': dummy_model.serving} ) lowerCAmelCase = tf.saved_model.load(_snake_case ).signatures['serving_default'] for input_row in range(len(_snake_case ) ): lowerCAmelCase = { 'input_ids': tf.constant([dummy_input_ids[input_row]] ), 'attention_mask': tf.constant([dummy_attention_masks[input_row]] ), } lowerCAmelCase = serving_func(**_snake_case )['sequences'] lowerCAmelCase = test_model.generate(**_snake_case , max_new_tokens=_snake_case ) tf.debugging.assert_equal(_snake_case , _snake_case ) @slow @require_tensorflow_text def UpperCamelCase__ ( self ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: # file needed to load the TF tokenizer hf_hub_download(repo_id='google/flan-t5-small' , filename='spiece.model' , local_dir=_snake_case ) class a ( tf.keras.layers.Layer ): def __init__( self ): """simple docstring""" super().__init__() lowerCAmelCase = text.SentencepieceTokenizer( model=tf.io.gfile.GFile(os.path.join(_snake_case , 'spiece.model' ) , 'rb' ).read() ) lowerCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained('hf-internal-testing/tiny-random-t5' ) def UpperCamelCase__ ( self , _snake_case , *_snake_case , **_snake_case ): """simple docstring""" lowerCAmelCase = self.tokenizer.tokenize(_snake_case ) lowerCAmelCase ,lowerCAmelCase = text.pad_model_inputs( _snake_case , max_seq_length=64 , pad_value=self.model.config.pad_token_id ) lowerCAmelCase = self.model.generate(input_ids=_snake_case , attention_mask=_snake_case ) return self.tokenizer.detokenize(_snake_case ) lowerCAmelCase = CompleteSentenceTransformer() lowerCAmelCase = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='inputs' ) lowerCAmelCase = complete_model(_snake_case ) lowerCAmelCase = tf.keras.Model(_snake_case , _snake_case ) keras_model.save(_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = { 'do_sample': True, 'num_beams': 1, 'top_p': 0.7, 'top_k': 10, 'temperature': 0.7, } lowerCAmelCase = 14 lowerCAmelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' ) lowerCAmelCase = 'Hello, my dog is cute and' lowerCAmelCase = tokenizer(_snake_case , return_tensors='tf' ) lowerCAmelCase = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ) lowerCAmelCase = 6_38 # forces the generation to happen on CPU, to avoid GPU-related quirks with tf.device(':/CPU:0' ): tf.random.set_seed(0 ) lowerCAmelCase = model.generate(**_snake_case , eos_token_id=_snake_case , **_snake_case ) self.assertTrue(expectation == len(generated_tokens[0] ) ) lowerCAmelCase = [6_38, 1_98] with tf.device(':/CPU:0' ): tf.random.set_seed(0 ) lowerCAmelCase = model.generate(**_snake_case , eos_token_id=_snake_case , **_snake_case ) self.assertTrue(expectation == len(generated_tokens[0] ) ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bart' ) lowerCAmelCase = 'Hugging Face is a technology company based in New York and Paris.' lowerCAmelCase = bart_tokenizer(_snake_case , return_tensors='tf' ).input_ids lowerCAmelCase = TFBartForConditionalGeneration.from_pretrained('hf-internal-testing/tiny-random-bart' ) lowerCAmelCase = bart_model.generate(_snake_case ).numpy() class a ( a__ ): def UpperCamelCase__ ( self , _snake_case , _snake_case=None , **_snake_case ): """simple docstring""" return super().call(_snake_case , **_snake_case ) lowerCAmelCase = FakeBart.from_pretrained('hf-internal-testing/tiny-random-bart' ) lowerCAmelCase = bart_model.generate(_snake_case , foo='bar' ).numpy() self.assertTrue(np.array_equal(_snake_case , _snake_case ) ) class a ( bart_model.model.encoder.__class__ ): def UpperCamelCase__ ( self , _snake_case , **_snake_case ): """simple docstring""" return super().call(_snake_case , **_snake_case ) lowerCAmelCase = FakeEncoder(bart_model.config , bart_model.model.shared ) lowerCAmelCase = fake_encoder # Normal generation still works (the output will be different because the encoder weights are different) lowerCAmelCase = bart_model.generate(_snake_case ).numpy() with self.assertRaises(_snake_case ): # FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo" bart_model.generate(_snake_case , foo='bar' )
309
"""simple docstring""" from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. __UpperCamelCase : str = 200 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. __UpperCamelCase : Optional[Any] = 50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. __UpperCamelCase : Dict = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 1000)) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ): lowerCAmelCase = len([g for position, g in enumerate(_UpperCAmelCase ) if g == main_target[position]] ) return (item, float(_UpperCAmelCase )) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ): lowerCAmelCase = random.randint(0 , len(_UpperCAmelCase ) - 1 ) lowerCAmelCase = parent_a[:random_slice] + parent_a[random_slice:] lowerCAmelCase = parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] ): lowerCAmelCase = list(_UpperCAmelCase ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: lowerCAmelCase = random.choice(_UpperCAmelCase ) return "".join(_UpperCAmelCase ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : tuple[str, float] , _UpperCAmelCase : list[tuple[str, float]] , _UpperCAmelCase : list[str] , ): lowerCAmelCase = [] # Generate more children proportionally to the fitness score. lowerCAmelCase = int(parent_a[1] * 100 ) + 1 lowerCAmelCase = 10 if child_n >= 10 else child_n for _ in range(_UpperCAmelCase ): lowerCAmelCase = population_score[random.randint(0 , _UpperCAmelCase )][0] lowerCAmelCase ,lowerCAmelCase = crossover(parent_a[0] , _UpperCAmelCase ) # Append new string to the population list. pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) ) pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) ) return pop def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] , _UpperCAmelCase : bool = True ): # Verify if N_POPULATION is bigger than N_SELECTED if N_POPULATION < N_SELECTED: lowerCAmelCase = F'{N_POPULATION} must be bigger than {N_SELECTED}' raise ValueError(_UpperCAmelCase ) # Verify that the target contains no genes besides the ones inside genes variable. lowerCAmelCase = sorted({c for c in target if c not in genes} ) if not_in_genes_list: lowerCAmelCase = F'{not_in_genes_list} is not in genes list, evolution cannot converge' raise ValueError(_UpperCAmelCase ) # Generate random starting population. lowerCAmelCase = [] for _ in range(_UpperCAmelCase ): population.append(''.join([random.choice(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) )] ) ) # Just some logs to know what the algorithms is doing. lowerCAmelCase ,lowerCAmelCase = 0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(_UpperCAmelCase ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. lowerCAmelCase = [evaluate(_UpperCAmelCase , _UpperCAmelCase ) for item in population] # Check if there is a matching evolution. lowerCAmelCase = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[1] , reverse=_UpperCAmelCase ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( F'\nGeneration: {generation}' F'\nTotal Population:{total_population}' F'\nBest score: {population_score[0][1]}' F'\nBest string: {population_score[0][0]}' ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. lowerCAmelCase = population[: int(N_POPULATION / 3 )] population.clear() population.extend(_UpperCAmelCase ) # Normalize population score to be between 0 and 1. lowerCAmelCase = [ (item, score / len(_UpperCAmelCase )) for item, score in population_score ] # This is selection for i in range(_UpperCAmelCase ): population.extend(select(population_score[int(_UpperCAmelCase )] , _UpperCAmelCase , _UpperCAmelCase ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(_UpperCAmelCase ) > N_POPULATION: break if __name__ == "__main__": __UpperCamelCase : Tuple = ( '''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!''' ) __UpperCamelCase : str = list( ''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm''' '''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\''' ) __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase : Dict = basic(target_str, genes_list) print( f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}''' )
309
1
"""simple docstring""" def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : int ): return int((input_a, input_a).count(1 ) != 0 ) def _SCREAMING_SNAKE_CASE (): assert or_gate(0 , 0 ) == 0 assert or_gate(0 , 1 ) == 1 assert or_gate(1 , 0 ) == 1 assert or_gate(1 , 1 ) == 1 if __name__ == "__main__": print(or_gate(0, 1)) print(or_gate(1, 0)) print(or_gate(0, 0)) print(or_gate(1, 1))
309
"""simple docstring""" import copy import os import cva import numpy as np from matplotlib import pyplot as plt class a : def __init__( self ): """simple docstring""" lowerCAmelCase = '' lowerCAmelCase = '' lowerCAmelCase = [] lowerCAmelCase = 0 lowerCAmelCase = 2_56 lowerCAmelCase = 0 lowerCAmelCase = 0 lowerCAmelCase = 0 lowerCAmelCase = 0 def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" lowerCAmelCase = cva.imread(_snake_case , 0 ) lowerCAmelCase = copy.deepcopy(self.img ) lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label='x' ) lowerCAmelCase = np.sum(_snake_case ) for i in range(len(_snake_case ) ): lowerCAmelCase = x[i] / self.k self.sk += prk lowerCAmelCase = (self.L - 1) * self.sk if self.rem != 0: lowerCAmelCase = int(last % last ) lowerCAmelCase = int(last + 1 if self.rem >= 0.5 else last ) self.last_list.append(_snake_case ) lowerCAmelCase = int(np.ma.count(self.img ) / self.img[1].size ) lowerCAmelCase = self.img[1].size for i in range(self.number_of_cols ): for j in range(self.number_of_rows ): lowerCAmelCase = self.img[j][i] if num != self.last_list[num]: lowerCAmelCase = self.last_list[num] cva.imwrite('output_data/output.jpg' , self.img ) def UpperCamelCase__ ( self ): """simple docstring""" plt.hist(self.img.ravel() , 2_56 , [0, 2_56] ) def UpperCamelCase__ ( self ): """simple docstring""" cva.imshow('Output-Image' , self.img ) cva.imshow('Input-Image' , self.original_image ) cva.waitKey(50_00 ) cva.destroyAllWindows() if __name__ == "__main__": __UpperCamelCase : int = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''') __UpperCamelCase : List[Any] = ConstantStretch() stretcher.stretch(file_path) stretcher.plot_histogram() stretcher.show_image()
309
1
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __UpperCamelCase : Optional[Any] = logging.get_logger(__name__) __UpperCamelCase : Union[str, Any] = '''▁''' __UpperCamelCase : List[Any] = {'''vocab_file''': '''spiece.model'''} __UpperCamelCase : List[str] = { '''vocab_file''': { '''google/reformer-crime-and-punishment''': ( '''https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model''' ) } } __UpperCamelCase : Tuple = { '''google/reformer-crime-and-punishment''': 52_4288, } class a ( a__ ): snake_case__ = VOCAB_FILES_NAMES snake_case__ = PRETRAINED_VOCAB_FILES_MAP snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case__ = ['''input_ids''', '''attention_mask'''] def __init__( self , _snake_case , _snake_case="</s>" , _snake_case="<unk>" , _snake_case=[] , _snake_case = None , **_snake_case , ): """simple docstring""" lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=_snake_case , unk_token=_snake_case , additional_special_tokens=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , ) lowerCAmelCase = vocab_file lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_snake_case ) @property def UpperCamelCase__ ( self ): """simple docstring""" return self.sp_model.get_piece_size() def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): """simple docstring""" lowerCAmelCase = self.__dict__.copy() lowerCAmelCase = None return state def __setstate__( self , _snake_case ): """simple docstring""" lowerCAmelCase = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): lowerCAmelCase = {} lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" return self.sp_model.encode(_snake_case , out_type=_snake_case ) def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" return self.sp_model.piece_to_id(_snake_case ) def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" if index < self.sp_model.get_piece_size(): lowerCAmelCase = self.sp_model.IdToPiece(_snake_case ) return token def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" lowerCAmelCase = [] lowerCAmelCase = '' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(_snake_case ) + token lowerCAmelCase = [] else: current_sub_tokens.append(_snake_case ) out_string += self.sp_model.decode(_snake_case ) return out_string.strip() def UpperCamelCase__ ( self , _snake_case , _snake_case = None ): """simple docstring""" if not os.path.isdir(_snake_case ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return lowerCAmelCase = os.path.join( _snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _snake_case ) elif not os.path.isfile(self.vocab_file ): with open(_snake_case , 'wb' ) as fi: lowerCAmelCase = self.sp_model.serialized_model_proto() fi.write(_snake_case ) return (out_vocab_file,)
309
"""simple docstring""" import pytest from datasets.splits import SplitDict, SplitInfo from datasets.utils.py_utils import asdict @pytest.mark.parametrize( 'split_dict' , [ SplitDict(), SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 , dataset_name='my_dataset' )} ), SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 )} ), SplitDict({'train': SplitInfo()} ), ] , ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : SplitDict ): lowerCAmelCase = split_dict._to_yaml_list() assert len(_UpperCAmelCase ) == len(_UpperCAmelCase ) lowerCAmelCase = SplitDict._from_yaml_list(_UpperCAmelCase ) for split_name, split_info in split_dict.items(): # dataset_name field is deprecated, and is therefore not part of the YAML dump lowerCAmelCase = None # the split name of split_dict takes over the name of the split info object lowerCAmelCase = split_name assert split_dict == reloaded @pytest.mark.parametrize( 'split_info' , [SplitInfo(), SplitInfo(dataset_name=_UpperCAmelCase ), SplitInfo(dataset_name='my_dataset' )] ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] ): # For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name" # field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files lowerCAmelCase = asdict(SplitDict({'train': split_info} ) ) assert "dataset_name" in split_dict_asdict["train"] assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
309
1
"""simple docstring""" from typing import Callable, List, Optional, Union import PIL import torch from transformers import ( CLIPImageProcessor, CLIPSegForImageSegmentation, CLIPSegProcessor, CLIPTextModel, CLIPTokenizer, ) from diffusers import DiffusionPipeline from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import deprecate, is_accelerate_available, logging __UpperCamelCase : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name class a ( a__ ): def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ): """simple docstring""" super().__init__() if hasattr(scheduler.config , 'steps_offset' ) and scheduler.config.steps_offset != 1: lowerCAmelCase = ( F'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`' F' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure ' 'to update the config accordingly as leaving `steps_offset` might led to incorrect results' ' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,' ' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`' ' file' ) deprecate('steps_offset!=1' , '1.0.0' , _snake_case , standard_warn=_snake_case ) lowerCAmelCase = dict(scheduler.config ) lowerCAmelCase = 1 lowerCAmelCase = FrozenDict(_snake_case ) if hasattr(scheduler.config , 'skip_prk_steps' ) and scheduler.config.skip_prk_steps is False: lowerCAmelCase = ( F'The configuration file of this scheduler: {scheduler} has not set the configuration' ' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make' ' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to' ' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face' ' Hub, it would be very nice if you could open a Pull request for the' ' `scheduler/scheduler_config.json` file' ) deprecate('skip_prk_steps not set' , '1.0.0' , _snake_case , standard_warn=_snake_case ) lowerCAmelCase = dict(scheduler.config ) lowerCAmelCase = True lowerCAmelCase = FrozenDict(_snake_case ) if safety_checker is None: logger.warning( F'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure' ' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered' ' results in services or applications open to the public. Both the diffusers team and Hugging Face' ' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling' ' it only for use-cases that involve analyzing network behavior or auditing its results. For more' ' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' ) self.register_modules( segmentation_model=_snake_case , segmentation_processor=_snake_case , vae=_snake_case , text_encoder=_snake_case , tokenizer=_snake_case , unet=_snake_case , scheduler=_snake_case , safety_checker=_snake_case , feature_extractor=_snake_case , ) def UpperCamelCase__ ( self , _snake_case = "auto" ): """simple docstring""" if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory lowerCAmelCase = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" self.enable_attention_slicing(_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('Please install accelerate via `pip install accelerate`' ) lowerCAmelCase = torch.device('cuda' ) for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]: if cpu_offloaded_model is not None: cpu_offload(_snake_case , _snake_case ) @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def UpperCamelCase__ ( self ): """simple docstring""" if self.device != torch.device('meta' ) or not hasattr(self.unet , '_hf_hook' ): return self.device for module in self.unet.modules(): if ( hasattr(_snake_case , '_hf_hook' ) and hasattr(module._hf_hook , 'execution_device' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() def __call__( self , _snake_case , _snake_case , _snake_case , _snake_case = 5_12 , _snake_case = 5_12 , _snake_case = 50 , _snake_case = 7.5 , _snake_case = None , _snake_case = 1 , _snake_case = 0.0 , _snake_case = None , _snake_case = None , _snake_case = "pil" , _snake_case = True , _snake_case = None , _snake_case = 1 , **_snake_case , ): """simple docstring""" lowerCAmelCase = self.segmentation_processor( text=[text] , images=[image] , padding='max_length' , return_tensors='pt' ).to(self.device ) lowerCAmelCase = self.segmentation_model(**_snake_case ) lowerCAmelCase = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy() lowerCAmelCase = self.numpy_to_pil(_snake_case )[0].resize(image.size ) # Run inpainting pipeline with the generated mask lowerCAmelCase = StableDiffusionInpaintPipeline( vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , ) return inpainting_pipeline( prompt=_snake_case , image=_snake_case , mask_image=_snake_case , height=_snake_case , width=_snake_case , num_inference_steps=_snake_case , guidance_scale=_snake_case , negative_prompt=_snake_case , num_images_per_prompt=_snake_case , eta=_snake_case , generator=_snake_case , latents=_snake_case , output_type=_snake_case , return_dict=_snake_case , callback=_snake_case , callback_steps=_snake_case , )
309
"""simple docstring""" # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. __UpperCamelCase : Any = abspath(join(dirname(dirname(__file__)), '''src''')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='''ignore''', category=FutureWarning) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[Any] ): from diffusers.utils.testing_utils import pytest_addoption_shared pytest_addoption_shared(_UpperCAmelCase ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ): from diffusers.utils.testing_utils import pytest_terminal_summary_main lowerCAmelCase = terminalreporter.config.getoption('--make-reports' ) if make_reports: pytest_terminal_summary_main(_UpperCAmelCase , id=_UpperCAmelCase )
309
1
"""simple docstring""" import os import sys __UpperCamelCase : Any = os.path.join(os.path.dirname(__file__), '''src''') sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) __UpperCamelCase : Any = [ '''torch''', '''numpy''', '''tokenizers''', '''filelock''', '''requests''', '''tqdm''', '''regex''', '''sentencepiece''', '''sacremoses''', '''importlib_metadata''', '''huggingface_hub''', ] @add_start_docstrings(AutoConfig.__doc__ ) def _SCREAMING_SNAKE_CASE (*_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : str ): return AutoConfig.from_pretrained(*_UpperCAmelCase , **_UpperCAmelCase ) @add_start_docstrings(AutoTokenizer.__doc__ ) def _SCREAMING_SNAKE_CASE (*_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : List[str] ): return AutoTokenizer.from_pretrained(*_UpperCAmelCase , **_UpperCAmelCase ) @add_start_docstrings(AutoModel.__doc__ ) def _SCREAMING_SNAKE_CASE (*_UpperCAmelCase : str , **_UpperCAmelCase : Optional[Any] ): return AutoModel.from_pretrained(*_UpperCAmelCase , **_UpperCAmelCase ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def _SCREAMING_SNAKE_CASE (*_UpperCAmelCase : Dict , **_UpperCAmelCase : Optional[int] ): return AutoModelForCausalLM.from_pretrained(*_UpperCAmelCase , **_UpperCAmelCase ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def _SCREAMING_SNAKE_CASE (*_UpperCAmelCase : int , **_UpperCAmelCase : Tuple ): return AutoModelForMaskedLM.from_pretrained(*_UpperCAmelCase , **_UpperCAmelCase ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def _SCREAMING_SNAKE_CASE (*_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : int ): return AutoModelForSequenceClassification.from_pretrained(*_UpperCAmelCase , **_UpperCAmelCase ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def _SCREAMING_SNAKE_CASE (*_UpperCAmelCase : Dict , **_UpperCAmelCase : str ): return AutoModelForQuestionAnswering.from_pretrained(*_UpperCAmelCase , **_UpperCAmelCase )
309
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class a ( unittest.TestCase ): def __init__( self , _snake_case , _snake_case=7 , _snake_case=3 , _snake_case=18 , _snake_case=30 , _snake_case=4_00 , _snake_case=True , _snake_case=None , _snake_case=True , _snake_case=None , _snake_case=True , ): """simple docstring""" lowerCAmelCase = size if size is not None else {'shortest_edge': 20} lowerCAmelCase = crop_size if crop_size is not None else {'height': 18, 'width': 18} lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = num_channels lowerCAmelCase = image_size lowerCAmelCase = min_resolution lowerCAmelCase = max_resolution lowerCAmelCase = do_resize lowerCAmelCase = size lowerCAmelCase = do_center_crop lowerCAmelCase = crop_size lowerCAmelCase = do_flip_channel_order def UpperCamelCase__ ( self ): """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class a ( a__ , unittest.TestCase ): snake_case__ = MobileViTImageProcessor if is_vision_available() else None def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = MobileViTImageProcessingTester(self ) @property def UpperCamelCase__ ( self ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_snake_case , 'do_resize' ) ) self.assertTrue(hasattr(_snake_case , 'size' ) ) self.assertTrue(hasattr(_snake_case , 'do_center_crop' ) ) self.assertTrue(hasattr(_snake_case , 'center_crop' ) ) self.assertTrue(hasattr(_snake_case , 'do_flip_channel_order' ) ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 20} ) self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} ) lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'shortest_edge': 42} ) self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} ) def UpperCamelCase__ ( self ): """simple docstring""" pass def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case ) for image in image_inputs: self.assertIsInstance(_snake_case , Image.Image ) # Test not batched input lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , numpify=_snake_case ) for image in image_inputs: self.assertIsInstance(_snake_case , np.ndarray ) # Test not batched input lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , torchify=_snake_case ) for image in image_inputs: self.assertIsInstance(_snake_case , torch.Tensor ) # Test not batched input lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , )
309
1
"""simple docstring""" # This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/ import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, DDIMScheduler, StableDiffusionControlNetImgaImgPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class a ( a__ , a__ , a__ , unittest.TestCase ): snake_case__ = StableDiffusionControlNetImgaImgPipeline snake_case__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''} snake_case__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS snake_case__ = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'''control_image'''} ) snake_case__ = IMAGE_TO_IMAGE_IMAGE_PARAMS def UpperCamelCase__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCAmelCase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) torch.manual_seed(0 ) lowerCAmelCase = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) torch.manual_seed(0 ) lowerCAmelCase = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_snake_case , set_alpha_to_one=_snake_case , ) torch.manual_seed(0 ) lowerCAmelCase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) torch.manual_seed(0 ) lowerCAmelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) lowerCAmelCase = CLIPTextModel(_snake_case ) lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) lowerCAmelCase = { 'unet': unet, 'controlnet': controlnet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def UpperCamelCase__ ( self , _snake_case , _snake_case=0 ): """simple docstring""" if str(_snake_case ).startswith('mps' ): lowerCAmelCase = torch.manual_seed(_snake_case ) else: lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case ) lowerCAmelCase = 2 lowerCAmelCase = randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case ) , ) lowerCAmelCase = floats_tensor(control_image.shape , rng=random.Random(_snake_case ) ).to(_snake_case ) lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCAmelCase = Image.fromarray(np.uinta(_snake_case ) ).convert('RGB' ).resize((64, 64) ) lowerCAmelCase = { 'prompt': 'A painting of a squirrel eating a burger', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'numpy', 'image': image, 'control_image': control_image, } return inputs def UpperCamelCase__ ( self ): """simple docstring""" return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def UpperCamelCase__ ( self ): """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 ) def UpperCamelCase__ ( self ): """simple docstring""" self._test_inference_batch_single_identical(expected_max_diff=2E-3 ) class a ( a__ , a__ , unittest.TestCase ): snake_case__ = StableDiffusionControlNetImgaImgPipeline snake_case__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''} snake_case__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS snake_case__ = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess def UpperCamelCase__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCAmelCase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) torch.manual_seed(0 ) def init_weights(_snake_case ): if isinstance(_snake_case , torch.nn.Convad ): torch.nn.init.normal(m.weight ) m.bias.data.fill_(1.0 ) lowerCAmelCase = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(_snake_case ) torch.manual_seed(0 ) lowerCAmelCase = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(_snake_case ) torch.manual_seed(0 ) lowerCAmelCase = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_snake_case , set_alpha_to_one=_snake_case , ) torch.manual_seed(0 ) lowerCAmelCase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) torch.manual_seed(0 ) lowerCAmelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) lowerCAmelCase = CLIPTextModel(_snake_case ) lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) lowerCAmelCase = MultiControlNetModel([controlneta, controlneta] ) lowerCAmelCase = { 'unet': unet, 'controlnet': controlnet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def UpperCamelCase__ ( self , _snake_case , _snake_case=0 ): """simple docstring""" if str(_snake_case ).startswith('mps' ): lowerCAmelCase = torch.manual_seed(_snake_case ) else: lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case ) lowerCAmelCase = 2 lowerCAmelCase = [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case ) , ), randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case ) , ), ] lowerCAmelCase = floats_tensor(control_image[0].shape , rng=random.Random(_snake_case ) ).to(_snake_case ) lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCAmelCase = Image.fromarray(np.uinta(_snake_case ) ).convert('RGB' ).resize((64, 64) ) lowerCAmelCase = { 'prompt': 'A painting of a squirrel eating a burger', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'numpy', 'image': image, 'control_image': control_image, } return inputs def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.get_dummy_components() lowerCAmelCase = self.pipeline_class(**_snake_case ) pipe.to(_snake_case ) lowerCAmelCase = 10.0 lowerCAmelCase = 4 lowerCAmelCase = self.get_dummy_inputs(_snake_case ) lowerCAmelCase = steps lowerCAmelCase = scale lowerCAmelCase = pipe(**_snake_case )[0] lowerCAmelCase = self.get_dummy_inputs(_snake_case ) lowerCAmelCase = steps lowerCAmelCase = scale lowerCAmelCase = pipe(**_snake_case , control_guidance_start=0.1 , control_guidance_end=0.2 )[0] lowerCAmelCase = self.get_dummy_inputs(_snake_case ) lowerCAmelCase = steps lowerCAmelCase = scale lowerCAmelCase = pipe(**_snake_case , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0] lowerCAmelCase = self.get_dummy_inputs(_snake_case ) lowerCAmelCase = steps lowerCAmelCase = scale lowerCAmelCase = pipe(**_snake_case , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0] # make sure that all outputs are different assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 def UpperCamelCase__ ( self ): """simple docstring""" return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def UpperCamelCase__ ( self ): """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 ) def UpperCamelCase__ ( self ): """simple docstring""" self._test_inference_batch_single_identical(expected_max_diff=2E-3 ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.get_dummy_components() lowerCAmelCase = self.pipeline_class(**_snake_case ) pipe.to(_snake_case ) pipe.set_progress_bar_config(disable=_snake_case ) with tempfile.TemporaryDirectory() as tmpdir: try: # save_pretrained is not implemented for Multi-ControlNet pipe.save_pretrained(_snake_case ) except NotImplementedError: pass @slow @require_torch_gpu class a ( unittest.TestCase ): def UpperCamelCase__ ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = ControlNetModel.from_pretrained('lllyasviel/sd-controlnet-canny' ) lowerCAmelCase = StableDiffusionControlNetImgaImgPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' , safety_checker=_snake_case , controlnet=_snake_case ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=_snake_case ) lowerCAmelCase = torch.Generator(device='cpu' ).manual_seed(0 ) lowerCAmelCase = 'evil space-punk bird' lowerCAmelCase = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' ).resize((5_12, 5_12) ) lowerCAmelCase = load_image( 'https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png' ).resize((5_12, 5_12) ) lowerCAmelCase = pipe( _snake_case , _snake_case , control_image=_snake_case , generator=_snake_case , output_type='np' , num_inference_steps=50 , strength=0.6 , ) lowerCAmelCase = output.images[0] assert image.shape == (5_12, 5_12, 3) lowerCAmelCase = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy' ) assert np.abs(expected_image - image ).max() < 9E-2
309
"""simple docstring""" import unittest from accelerate import debug_launcher from accelerate.test_utils import require_cpu, test_ops, test_script @require_cpu class a ( unittest.TestCase ): def UpperCamelCase__ ( self ): """simple docstring""" debug_launcher(test_script.main ) def UpperCamelCase__ ( self ): """simple docstring""" debug_launcher(test_ops.main )
309
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCamelCase : Union[str, Any] = { '''configuration_megatron_bert''': ['''MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegatronBertConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Dict = [ '''MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MegatronBertForCausalLM''', '''MegatronBertForMaskedLM''', '''MegatronBertForMultipleChoice''', '''MegatronBertForNextSentencePrediction''', '''MegatronBertForPreTraining''', '''MegatronBertForQuestionAnswering''', '''MegatronBertForSequenceClassification''', '''MegatronBertForTokenClassification''', '''MegatronBertModel''', '''MegatronBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_megatron_bert import ( MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, MegatronBertPreTrainedModel, ) else: import sys __UpperCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
309
"""simple docstring""" from __future__ import annotations from decimal import Decimal from numpy import array def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[list[float]] ): lowerCAmelCase = Decimal # Check if the provided matrix has 2 rows and 2 columns # since this implementation only works for 2x2 matrices if len(_UpperCAmelCase ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2: # Calculate the determinant of the matrix lowerCAmelCase = float( d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) ) if determinant == 0: raise ValueError('This matrix has no inverse.' ) # Creates a copy of the matrix with swapped positions of the elements lowerCAmelCase = [[0.0, 0.0], [0.0, 0.0]] lowerCAmelCase ,lowerCAmelCase = matrix[1][1], matrix[0][0] lowerCAmelCase ,lowerCAmelCase = -matrix[1][0], -matrix[0][1] # Calculate the inverse of the matrix return [ [(float(d(_UpperCAmelCase ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix ] elif ( len(_UpperCAmelCase ) == 3 and len(matrix[0] ) == 3 and len(matrix[1] ) == 3 and len(matrix[2] ) == 3 ): # Calculate the determinant of the matrix using Sarrus rule lowerCAmelCase = float( ( (d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] )) + (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] )) + (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] )) ) - ( (d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] )) + (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] )) + (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] )) ) ) if determinant == 0: raise ValueError('This matrix has no inverse.' ) # Creating cofactor matrix lowerCAmelCase = [ [d(0.0 ), d(0.0 ), d(0.0 )], [d(0.0 ), d(0.0 ), d(0.0 )], [d(0.0 ), d(0.0 ), d(0.0 )], ] lowerCAmelCase = (d(matrix[1][1] ) * d(matrix[2][2] )) - ( d(matrix[1][2] ) * d(matrix[2][1] ) ) lowerCAmelCase = -( (d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] )) ) lowerCAmelCase = (d(matrix[1][0] ) * d(matrix[2][1] )) - ( d(matrix[1][1] ) * d(matrix[2][0] ) ) lowerCAmelCase = -( (d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] )) ) lowerCAmelCase = (d(matrix[0][0] ) * d(matrix[2][2] )) - ( d(matrix[0][2] ) * d(matrix[2][0] ) ) lowerCAmelCase = -( (d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] )) ) lowerCAmelCase = (d(matrix[0][1] ) * d(matrix[1][2] )) - ( d(matrix[0][2] ) * d(matrix[1][1] ) ) lowerCAmelCase = -( (d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] )) ) lowerCAmelCase = (d(matrix[0][0] ) * d(matrix[1][1] )) - ( d(matrix[0][1] ) * d(matrix[1][0] ) ) # Transpose the cofactor matrix (Adjoint matrix) lowerCAmelCase = array(_UpperCAmelCase ) for i in range(3 ): for j in range(3 ): lowerCAmelCase = cofactor_matrix[j][i] # Inverse of the matrix using the formula (1/determinant) * adjoint matrix lowerCAmelCase = array(_UpperCAmelCase ) for i in range(3 ): for j in range(3 ): inverse_matrix[i][j] /= d(_UpperCAmelCase ) # Calculate the inverse of the matrix return [[float(d(_UpperCAmelCase ) ) or 0.0 for n in row] for row in inverse_matrix] raise ValueError('Please provide a matrix of size 2x2 or 3x3.' )
309
1
"""simple docstring""" from PIL import Image def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Image , _UpperCAmelCase : float ): def brightness(_UpperCAmelCase : int ) -> float: return 128 + level + (c - 128) if not -255.0 <= level <= 255.0: raise ValueError('level must be between -255.0 (black) and 255.0 (white)' ) return img.point(_UpperCAmelCase ) if __name__ == "__main__": # Load image with Image.open('''image_data/lena.jpg''') as img: # Change brightness to 100 __UpperCamelCase : Dict = change_brightness(img, 100) brigt_img.save('''image_data/lena_brightness.png''', format='''png''')
309
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCamelCase : Dict = { '''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''], '''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''], '''processing_mctct''': ['''MCTCTProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Tuple = [ '''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MCTCTForCTC''', '''MCTCTModel''', '''MCTCTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys __UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
309
1
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL __UpperCamelCase : int = logging.get_logger(__name__) class a ( a__ ): snake_case__ = ['''pixel_values'''] def __init__( self , _snake_case = True , _snake_case = None , _snake_case = None , _snake_case = PILImageResampling.BILINEAR , _snake_case = True , _snake_case = 1 / 2_55 , _snake_case = True , _snake_case = None , _snake_case = None , **_snake_case , ): """simple docstring""" super().__init__(**_snake_case ) lowerCAmelCase = size if size is not None else {'shortest_edge': 3_84} lowerCAmelCase = get_size_dict(_snake_case , default_to_square=_snake_case ) lowerCAmelCase = do_resize lowerCAmelCase = size # Default value set here for backwards compatibility where the value in config is None lowerCAmelCase = crop_pct if crop_pct is not None else 2_24 / 2_56 lowerCAmelCase = resample lowerCAmelCase = do_rescale lowerCAmelCase = rescale_factor lowerCAmelCase = do_normalize lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowerCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case = PILImageResampling.BICUBIC , _snake_case = None , **_snake_case , ): """simple docstring""" lowerCAmelCase = get_size_dict(_snake_case , default_to_square=_snake_case ) if "shortest_edge" not in size: raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' ) lowerCAmelCase = size['shortest_edge'] if shortest_edge < 3_84: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct lowerCAmelCase = int(shortest_edge / crop_pct ) lowerCAmelCase = get_resize_output_image_size(_snake_case , size=_snake_case , default_to_square=_snake_case ) lowerCAmelCase = resize(image=_snake_case , size=_snake_case , resample=_snake_case , data_format=_snake_case , **_snake_case ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=_snake_case , size=(shortest_edge, shortest_edge) , data_format=_snake_case , **_snake_case ) else: # warping (no cropping) when evaluated at 384 or larger return resize( _snake_case , size=(shortest_edge, shortest_edge) , resample=_snake_case , data_format=_snake_case , **_snake_case ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case = None , **_snake_case , ): """simple docstring""" return rescale(_snake_case , scale=_snake_case , data_format=_snake_case , **_snake_case ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case = None , **_snake_case , ): """simple docstring""" return normalize(_snake_case , mean=_snake_case , std=_snake_case , data_format=_snake_case , **_snake_case ) def UpperCamelCase__ ( self , _snake_case , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = ChannelDimension.FIRST , **_snake_case , ): """simple docstring""" lowerCAmelCase = do_resize if do_resize is not None else self.do_resize lowerCAmelCase = crop_pct if crop_pct is not None else self.crop_pct lowerCAmelCase = resample if resample is not None else self.resample lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize lowerCAmelCase = image_mean if image_mean is not None else self.image_mean lowerCAmelCase = image_std if image_std is not None else self.image_std lowerCAmelCase = size if size is not None else self.size lowerCAmelCase = get_size_dict(_snake_case , default_to_square=_snake_case ) lowerCAmelCase = make_list_of_images(_snake_case ) if not valid_images(_snake_case ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.' ) if do_resize and size["shortest_edge"] < 3_84 and crop_pct is None: raise ValueError('crop_pct must be specified if size < 384.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. lowerCAmelCase = [to_numpy_array(_snake_case ) for image in images] if do_resize: lowerCAmelCase = [self.resize(image=_snake_case , size=_snake_case , crop_pct=_snake_case , resample=_snake_case ) for image in images] if do_rescale: lowerCAmelCase = [self.rescale(image=_snake_case , scale=_snake_case ) for image in images] if do_normalize: lowerCAmelCase = [self.normalize(image=_snake_case , mean=_snake_case , std=_snake_case ) for image in images] lowerCAmelCase = [to_channel_dimension_format(_snake_case , _snake_case ) for image in images] lowerCAmelCase = {'pixel_values': images} return BatchFeature(data=_snake_case , tensor_type=_snake_case )
309
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_distilbert import DistilBertTokenizer __UpperCamelCase : Dict = logging.get_logger(__name__) __UpperCamelCase : str = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} __UpperCamelCase : Optional[int] = { '''vocab_file''': { '''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''', '''distilbert-base-uncased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt''' ), '''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''', '''distilbert-base-cased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt''' ), '''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''', '''distilbert-base-multilingual-cased''': ( '''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''', '''distilbert-base-uncased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json''' ), '''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''', '''distilbert-base-cased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json''' ), '''distilbert-base-german-cased''': ( '''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json''' ), '''distilbert-base-multilingual-cased''': ( '''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json''' ), }, } __UpperCamelCase : str = { '''distilbert-base-uncased''': 512, '''distilbert-base-uncased-distilled-squad''': 512, '''distilbert-base-cased''': 512, '''distilbert-base-cased-distilled-squad''': 512, '''distilbert-base-german-cased''': 512, '''distilbert-base-multilingual-cased''': 512, } __UpperCamelCase : Any = { '''distilbert-base-uncased''': {'''do_lower_case''': True}, '''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True}, '''distilbert-base-cased''': {'''do_lower_case''': False}, '''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False}, '''distilbert-base-german-cased''': {'''do_lower_case''': False}, '''distilbert-base-multilingual-cased''': {'''do_lower_case''': False}, } class a ( a__ ): snake_case__ = VOCAB_FILES_NAMES snake_case__ = PRETRAINED_VOCAB_FILES_MAP snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case__ = PRETRAINED_INIT_CONFIGURATION snake_case__ = ['''input_ids''', '''attention_mask'''] snake_case__ = DistilBertTokenizer def __init__( self , _snake_case=None , _snake_case=None , _snake_case=True , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case=True , _snake_case=None , **_snake_case , ): """simple docstring""" super().__init__( _snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , tokenize_chinese_chars=_snake_case , strip_accents=_snake_case , **_snake_case , ) lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , _snake_case ) != do_lower_case or normalizer_state.get('strip_accents' , _snake_case ) != strip_accents or normalizer_state.get('handle_chinese_chars' , _snake_case ) != tokenize_chinese_chars ): lowerCAmelCase = getattr(_snake_case , normalizer_state.pop('type' ) ) lowerCAmelCase = do_lower_case lowerCAmelCase = strip_accents lowerCAmelCase = tokenize_chinese_chars lowerCAmelCase = normalizer_class(**_snake_case ) lowerCAmelCase = do_lower_case def UpperCamelCase__ ( self , _snake_case , _snake_case=None ): """simple docstring""" lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCamelCase__ ( self , _snake_case , _snake_case = None ): """simple docstring""" lowerCAmelCase = [self.sep_token_id] lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase__ ( self , _snake_case , _snake_case = None ): """simple docstring""" lowerCAmelCase = self._tokenizer.model.save(_snake_case , name=_snake_case ) return tuple(_snake_case )
309
1
"""simple docstring""" import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device __UpperCamelCase : Union[str, Any] = False class a ( unittest.TestCase ): pass @nightly @require_torch_gpu class a ( unittest.TestCase ): def UpperCamelCase__ ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa ) pipe.to(_snake_case ) pipe.set_progress_bar_config(disable=_snake_case ) lowerCAmelCase = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' ) lowerCAmelCase = torch.manual_seed(0 ) lowerCAmelCase = pipe.dual_guided( prompt='first prompt' , image=_snake_case , text_to_image_strength=0.75 , generator=_snake_case , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(_snake_case ) lowerCAmelCase = VersatileDiffusionPipeline.from_pretrained(_snake_case , torch_dtype=torch.floataa ) pipe.to(_snake_case ) pipe.set_progress_bar_config(disable=_snake_case ) lowerCAmelCase = generator.manual_seed(0 ) lowerCAmelCase = pipe.dual_guided( prompt='first prompt' , image=_snake_case , text_to_image_strength=0.75 , generator=_snake_case , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass" def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa ) pipe.to(_snake_case ) pipe.set_progress_bar_config(disable=_snake_case ) lowerCAmelCase = 'cyberpunk 2077' lowerCAmelCase = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' ) lowerCAmelCase = torch.manual_seed(0 ) lowerCAmelCase = pipe.dual_guided( prompt=_snake_case , image=_snake_case , text_to_image_strength=0.75 , generator=_snake_case , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images lowerCAmelCase = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) lowerCAmelCase = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 lowerCAmelCase = 'A painting of a squirrel eating a burger ' lowerCAmelCase = torch.manual_seed(0 ) lowerCAmelCase = pipe.text_to_image( prompt=_snake_case , generator=_snake_case , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images lowerCAmelCase = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) lowerCAmelCase = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 lowerCAmelCase = pipe.image_variation(_snake_case , generator=_snake_case , output_type='numpy' ).images lowerCAmelCase = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) lowerCAmelCase = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
309
"""simple docstring""" from __future__ import annotations def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] | None = None ): lowerCAmelCase = word_bank or [] # create a table lowerCAmelCase = len(_UpperCAmelCase ) + 1 lowerCAmelCase = [] for _ in range(_UpperCAmelCase ): table.append([] ) # seed value lowerCAmelCase = [[]] # because empty string has empty combination # iterate through the indices for i in range(_UpperCAmelCase ): # condition if table[i] != []: for word in word_bank: # slice condition if target[i : i + len(_UpperCAmelCase )] == word: lowerCAmelCase = [ [word, *way] for way in table[i] ] # adds the word to every combination the current position holds # now,push that combination to the table[i+len(word)] table[i + len(_UpperCAmelCase )] += new_combinations # combinations are in reverse order so reverse for better output for combination in table[len(_UpperCAmelCase )]: combination.reverse() return table[len(_UpperCAmelCase )] if __name__ == "__main__": print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa'''])) print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t'''])) print( all_construct( '''hexagonosaurus''', ['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''], ) )
309
1
"""simple docstring""" from manim import * class a ( a__ ): def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = Rectangle(height=0.5 , width=0.5 ) lowerCAmelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) lowerCAmelCase = [mem.copy() for i in range(6 )] lowerCAmelCase = [mem.copy() for i in range(6 )] lowerCAmelCase = VGroup(*_snake_case ).arrange(_snake_case , buff=0 ) lowerCAmelCase = VGroup(*_snake_case ).arrange(_snake_case , buff=0 ) lowerCAmelCase = VGroup(_snake_case , _snake_case ).arrange(_snake_case , buff=0 ) lowerCAmelCase = Text('CPU' , font_size=24 ) lowerCAmelCase = Group(_snake_case , _snake_case ).arrange(_snake_case , buff=0.5 , aligned_edge=_snake_case ) cpu.move_to([-2.5, -0.5, 0] ) self.add(_snake_case ) lowerCAmelCase = [mem.copy() for i in range(1 )] lowerCAmelCase = VGroup(*_snake_case ).arrange(_snake_case , buff=0 ) lowerCAmelCase = Text('GPU' , font_size=24 ) lowerCAmelCase = Group(_snake_case , _snake_case ).arrange(_snake_case , buff=0.5 , aligned_edge=_snake_case ) gpu.align_to(_snake_case , _snake_case ) gpu.set_x(gpu.get_x() - 1 ) self.add(_snake_case ) lowerCAmelCase = [mem.copy() for i in range(6 )] lowerCAmelCase = VGroup(*_snake_case ).arrange(_snake_case , buff=0 ) lowerCAmelCase = Text('Model' , font_size=24 ) lowerCAmelCase = Group(_snake_case , _snake_case ).arrange(_snake_case , buff=0.5 , aligned_edge=_snake_case ) model.move_to([3, -1.0, 0] ) self.play( Create(_snake_case , run_time=1 ) , Create(_snake_case , run_time=1 ) , Create(_snake_case , run_time=1 ) , ) lowerCAmelCase = MarkupText( F'First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.' , font_size=24 , ) lowerCAmelCase = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) lowerCAmelCase = MarkupText( F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) step_a.move_to([2, 2, 0] ) self.play(Write(_snake_case , run_time=2.5 ) , Write(_snake_case ) , Write(_snake_case ) ) self.add(_snake_case ) lowerCAmelCase = [] lowerCAmelCase = [] lowerCAmelCase = [] for i, rect in enumerate(_snake_case ): lowerCAmelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_snake_case , opacity=0.7 ) cpu_target.move_to(_snake_case ) cpu_target.generate_target() lowerCAmelCase = 0.46 / 4 lowerCAmelCase = 0.46 / 3 if i == 0: cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_snake_case ) cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 ) elif i == 3: cpu_target.target.next_to(cpu_targs[0].target , direction=_snake_case , buff=0.0 ) else: cpu_target.target.next_to(cpu_targs[i - 1].target , direction=_snake_case , buff=0.0 ) cpu_targs.append(_snake_case ) first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_snake_case ) ) second_animations.append(MoveToTarget(_snake_case , run_time=1.5 ) ) self.play(*_snake_case ) self.play(*_snake_case ) self.wait()
309
"""simple docstring""" import re def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ): if len(re.findall('[ATCG]' , _UpperCAmelCase ) ) != len(_UpperCAmelCase ): raise ValueError('Invalid Strand' ) return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) ) if __name__ == "__main__": import doctest doctest.testmod()
309
1
"""simple docstring""" import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class a ( unittest.TestCase ): def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = inspect.getfile(accelerate.test_utils ) lowerCAmelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] ) lowerCAmelCase = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] ) lowerCAmelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] ) @require_multi_gpu def UpperCamelCase__ ( self ): """simple docstring""" print(F'Found {torch.cuda.device_count()} devices.' ) lowerCAmelCase = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(_snake_case , env=os.environ.copy() ) @require_multi_gpu def UpperCamelCase__ ( self ): """simple docstring""" print(F'Found {torch.cuda.device_count()} devices.' ) lowerCAmelCase = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', self.operation_file_path] print(F'Command: {cmd}' ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(_snake_case , env=os.environ.copy() ) @require_multi_gpu def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(_snake_case , env=os.environ.copy() ) @require_multi_gpu def UpperCamelCase__ ( self ): """simple docstring""" print(F'Found {torch.cuda.device_count()} devices, using 2 devices only' ) lowerCAmelCase = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices='0,1' ): execute_subprocess_async(_snake_case , env=os.environ.copy() ) if __name__ == "__main__": __UpperCamelCase : Optional[Any] = Accelerator() __UpperCamelCase : Optional[int] = (accelerator.state.process_index + 2, 10) __UpperCamelCase : Union[str, Any] = torch.randint(0, 10, shape).to(accelerator.device) __UpperCamelCase : List[Any] = '''''' __UpperCamelCase : str = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." __UpperCamelCase : Optional[int] = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." __UpperCamelCase : List[Any] = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
309
"""simple docstring""" import numpy as np import skfuzzy as fuzz if __name__ == "__main__": # Create universe of discourse in Python using linspace () __UpperCamelCase : List[Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False) # Create two fuzzy sets by defining any membership function # (trapmf(), gbellmf(), gaussmf(), etc). __UpperCamelCase : str = [0, 25, 50] __UpperCamelCase : int = [25, 50, 75] __UpperCamelCase : str = fuzz.membership.trimf(X, abca) __UpperCamelCase : Tuple = fuzz.membership.trimf(X, abca) # Compute the different operations using inbuilt functions. __UpperCamelCase : Dict = np.ones(75) __UpperCamelCase : str = np.zeros((75,)) # 1. Union = max(µA(x), µB(x)) __UpperCamelCase : Optional[Any] = fuzz.fuzzy_or(X, young, X, middle_aged)[1] # 2. Intersection = min(µA(x), µB(x)) __UpperCamelCase : Dict = fuzz.fuzzy_and(X, young, X, middle_aged)[1] # 3. Complement (A) = (1- min(µA(x)) __UpperCamelCase : Dict = fuzz.fuzzy_not(young) # 4. Difference (A/B) = min(µA(x),(1- µB(x))) __UpperCamelCase : List[str] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1] # 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))] __UpperCamelCase : List[str] = young + middle_aged - (young * middle_aged) # 6. Algebraic Product = (µA(x) * µB(x)) __UpperCamelCase : Tuple = young * middle_aged # 7. Bounded Sum = min[1,(µA(x), µB(x))] __UpperCamelCase : Union[str, Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1] # 8. Bounded difference = min[0,(µA(x), µB(x))] __UpperCamelCase : Dict = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1] # max-min composition # max-product composition # Plot each set A, set B and each operation result using plot() and subplot(). from matplotlib import pyplot as plt plt.figure() plt.subplot(4, 3, 1) plt.plot(X, young) plt.title('''Young''') plt.grid(True) plt.subplot(4, 3, 2) plt.plot(X, middle_aged) plt.title('''Middle aged''') plt.grid(True) plt.subplot(4, 3, 3) plt.plot(X, union) plt.title('''union''') plt.grid(True) plt.subplot(4, 3, 4) plt.plot(X, intersection) plt.title('''intersection''') plt.grid(True) plt.subplot(4, 3, 5) plt.plot(X, complement_a) plt.title('''complement_a''') plt.grid(True) plt.subplot(4, 3, 6) plt.plot(X, difference) plt.title('''difference a/b''') plt.grid(True) plt.subplot(4, 3, 7) plt.plot(X, alg_sum) plt.title('''alg_sum''') plt.grid(True) plt.subplot(4, 3, 8) plt.plot(X, alg_product) plt.title('''alg_product''') plt.grid(True) plt.subplot(4, 3, 9) plt.plot(X, bdd_sum) plt.title('''bdd_sum''') plt.grid(True) plt.subplot(4, 3, 10) plt.plot(X, bdd_difference) plt.title('''bdd_difference''') plt.grid(True) plt.subplots_adjust(hspace=0.5) plt.show()
309
1
"""simple docstring""" import pprint import requests __UpperCamelCase : Any = '''https://zenquotes.io/api''' def _SCREAMING_SNAKE_CASE (): return requests.get(API_ENDPOINT_URL + '/today' ).json() def _SCREAMING_SNAKE_CASE (): return requests.get(API_ENDPOINT_URL + '/random' ).json() if __name__ == "__main__": __UpperCamelCase : Any = random_quotes() pprint.pprint(response)
309
"""simple docstring""" def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[int] , _UpperCAmelCase : str ): lowerCAmelCase = int(_UpperCAmelCase ) # Initialize Result lowerCAmelCase = [] # Traverse through all denomination for denomination in reversed(_UpperCAmelCase ): # Find denominations while int(_UpperCAmelCase ) >= int(_UpperCAmelCase ): total_value -= int(_UpperCAmelCase ) answer.append(_UpperCAmelCase ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": __UpperCamelCase : Any = [] __UpperCamelCase : List[Any] = '''0''' if ( input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower() == "y" ): __UpperCamelCase : Any = int(input('''Enter the number of denominations you want to add: ''').strip()) for i in range(0, n): denominations.append(int(input(f'''Denomination {i}: ''').strip())) __UpperCamelCase : int = input('''Enter the change you want to make in Indian Currency: ''').strip() else: # All denominations of Indian Currency if user does not enter __UpperCamelCase : List[str] = [1, 2, 5, 10, 20, 50, 100, 500, 2000] __UpperCamelCase : Any = input('''Enter the change you want to make: ''').strip() if int(value) == 0 or int(value) < 0: print('''The total value cannot be zero or negative.''') else: print(f'''Following is minimal change for {value}: ''') __UpperCamelCase : List[str] = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=''' ''')
309
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCamelCase : Union[str, Any] = {'''configuration_ibert''': ['''IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''IBertConfig''', '''IBertOnnxConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Optional[int] = [ '''IBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''IBertForMaskedLM''', '''IBertForMultipleChoice''', '''IBertForQuestionAnswering''', '''IBertForSequenceClassification''', '''IBertForTokenClassification''', '''IBertModel''', '''IBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ibert import ( IBERT_PRETRAINED_MODEL_ARCHIVE_LIST, IBertForMaskedLM, IBertForMultipleChoice, IBertForQuestionAnswering, IBertForSequenceClassification, IBertForTokenClassification, IBertModel, IBertPreTrainedModel, ) else: import sys __UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
309
"""simple docstring""" from __future__ import annotations import unittest from transformers import EsmConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers.models.esm.modeling_tf_esm import ( TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, TFEsmModel, ) class a : def __init__( self , _snake_case , ): """simple docstring""" lowerCAmelCase = parent lowerCAmelCase = 13 lowerCAmelCase = 7 lowerCAmelCase = True lowerCAmelCase = True lowerCAmelCase = True lowerCAmelCase = 99 lowerCAmelCase = 32 lowerCAmelCase = 2 lowerCAmelCase = 4 lowerCAmelCase = 37 lowerCAmelCase = 'gelu' lowerCAmelCase = 0.1 lowerCAmelCase = 0.1 lowerCAmelCase = 5_12 lowerCAmelCase = 16 lowerCAmelCase = 2 lowerCAmelCase = 0.02 lowerCAmelCase = 3 lowerCAmelCase = 4 lowerCAmelCase = None def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase = None if self.use_input_mask: lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase = None lowerCAmelCase = None lowerCAmelCase = None if self.use_labels: lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase = EsmConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase__ ( self ): """simple docstring""" ( ( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) , ) = self.prepare_config_and_inputs() lowerCAmelCase = True lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = TFEsmModel(config=_snake_case ) lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask} lowerCAmelCase = model(_snake_case ) lowerCAmelCase = [input_ids, input_mask] lowerCAmelCase = model(_snake_case ) lowerCAmelCase = model(_snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ): """simple docstring""" lowerCAmelCase = True lowerCAmelCase = TFEsmModel(config=_snake_case ) lowerCAmelCase = { 'input_ids': input_ids, 'attention_mask': input_mask, 'encoder_hidden_states': encoder_hidden_states, 'encoder_attention_mask': encoder_attention_mask, } lowerCAmelCase = model(_snake_case ) lowerCAmelCase = [input_ids, input_mask] lowerCAmelCase = model(_snake_case , encoder_hidden_states=_snake_case ) # Also check the case where encoder outputs are not passed lowerCAmelCase = model(_snake_case , attention_mask=_snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = TFEsmForMaskedLM(config=_snake_case ) lowerCAmelCase = model([input_ids, input_mask] ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = self.num_labels lowerCAmelCase = TFEsmForTokenClassification(config=_snake_case ) lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask} lowerCAmelCase = model(_snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.prepare_config_and_inputs() ( ( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) , ) = config_and_inputs lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_tf class a ( a__ , a__ , unittest.TestCase ): snake_case__ = ( ( TFEsmModel, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, ) if is_tf_available() else () ) snake_case__ = ( { '''feature-extraction''': TFEsmModel, '''fill-mask''': TFEsmForMaskedLM, '''text-classification''': TFEsmForSequenceClassification, '''token-classification''': TFEsmForTokenClassification, '''zero-shot''': TFEsmForSequenceClassification, } if is_tf_available() else {} ) snake_case__ = False snake_case__ = False def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = TFEsmModelTester(self ) lowerCAmelCase = ConfigTester(self , config_class=_snake_case , hidden_size=37 ) def UpperCamelCase__ ( self ): """simple docstring""" self.config_tester.run_common_tests() def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_snake_case ) @slow def UpperCamelCase__ ( self ): """simple docstring""" for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase = TFEsmModel.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) @unittest.skip('Protein models do not support embedding resizing.' ) def UpperCamelCase__ ( self ): """simple docstring""" pass @unittest.skip('Protein models do not support embedding resizing.' ) def UpperCamelCase__ ( self ): """simple docstring""" pass def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase = model_class(_snake_case ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class is TFEsmForMaskedLM: # Output embedding test differs from the main test because they're a matrix, not a layer lowerCAmelCase = model.get_bias() assert isinstance(_snake_case , _snake_case ) for k, v in name.items(): assert isinstance(_snake_case , tf.Variable ) else: lowerCAmelCase = model.get_output_embeddings() assert x is None lowerCAmelCase = model.get_bias() assert name is None @require_tf class a ( unittest.TestCase ): @slow def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' ) lowerCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] ) lowerCAmelCase = model(_snake_case )[0] lowerCAmelCase = [1, 6, 33] self.assertEqual(list(output.numpy().shape ) , _snake_case ) # compare the actual values for a slice. lowerCAmelCase = tf.constant( [ [ [8.921_518, -10.589_814, -6.4_671_307], [-6.3_967_156, -13.911_377, -1.1_211_915], [-7.781_247, -13.951_557, -3.740_592], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) ) @slow def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' ) lowerCAmelCase = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) lowerCAmelCase = model(_snake_case )[0] # compare the actual values for a slice. lowerCAmelCase = tf.constant( [ [ [0.14_443_092, 0.54_125_327, 0.3_247_739], [0.30_340_484, 0.00_526_676, 0.31_077_722], [0.32_278_043, -0.24_987_096, 0.3_414_628], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
309
1
"""simple docstring""" def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : int ): lowerCAmelCase = len(_UpperCAmelCase ) lowerCAmelCase = [[0] * n for i in range(_UpperCAmelCase )] for i in range(_UpperCAmelCase ): lowerCAmelCase = y_points[i] for i in range(2 , _UpperCAmelCase ): for j in range(_UpperCAmelCase , _UpperCAmelCase ): lowerCAmelCase = ( (xa - x_points[j - i + 1]) * q[j][i - 1] - (xa - x_points[j]) * q[j - 1][i - 1] ) / (x_points[j] - x_points[j - i + 1]) return [q[n - 1][n - 1], q] if __name__ == "__main__": import doctest doctest.testmod()
309
"""simple docstring""" import io import os import unicodedata from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) __UpperCamelCase : Dict = '''▁''' __UpperCamelCase : Optional[int] = {'''vocab_file''': '''vocab.txt''', '''sentencepiece_model_ckpt''': '''sentencepiece.bpe.model'''} __UpperCamelCase : str = { '''sentencepiece_model_file''': '''sentencepiece.bpe.model''', '''vocab_file''': '''vocab.txt''', } __UpperCamelCase : Tuple = { '''vocab_file''': { '''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''', '''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''', }, '''sentencepiece_model_file''': { '''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''', '''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''', }, } __UpperCamelCase : Optional[Any] = { '''ernie-m-base''': 514, '''ernie-m-large''': 514, } __UpperCamelCase : str = { '''ernie-m-base''': {'''do_lower_case''': False}, '''ernie-m-large''': {'''do_lower_case''': False}, } class a ( a__ ): snake_case__ = ["input_ids"] snake_case__ = VOCAB_FILES_NAMES snake_case__ = PRETRAINED_INIT_CONFIGURATION snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case__ = PRETRAINED_VOCAB_FILES_MAP snake_case__ = RESOURCE_FILES_NAMES def __init__( self , _snake_case , _snake_case=None , _snake_case=False , _snake_case="utf8" , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case = None , **_snake_case , ): """simple docstring""" lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , vocab_file=_snake_case , encoding=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , ) lowerCAmelCase = do_lower_case lowerCAmelCase = sentencepiece_model_ckpt lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_snake_case ) # to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning if vocab_file is not None: lowerCAmelCase = self.load_vocab(filepath=_snake_case ) else: lowerCAmelCase = {self.sp_model.id_to_piece(_snake_case ): id for id in range(self.sp_model.get_piece_size() )} lowerCAmelCase = {v: k for k, v in self.vocab.items()} def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" if text is None: return None lowerCAmelCase = self.tokenize(_snake_case ) lowerCAmelCase ,lowerCAmelCase = '', [] for i, ch in enumerate(_snake_case ): if ch in self.SP_CHAR_MAPPING: lowerCAmelCase = self.SP_CHAR_MAPPING.get(_snake_case ) else: lowerCAmelCase = unicodedata.normalize('NFKC' , _snake_case ) if self.is_whitespace(_snake_case ): continue normalized_text += ch char_mapping.extend([i] * len(_snake_case ) ) lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = normalized_text, [], 0 if self.do_lower_case: lowerCAmelCase = text.lower() for token in split_tokens: if token[:1] == "▁": lowerCAmelCase = token[1:] lowerCAmelCase = text[offset:].index(_snake_case ) + offset lowerCAmelCase = start + len(_snake_case ) token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) ) lowerCAmelCase = end return token_mapping @property def UpperCamelCase__ ( self ): """simple docstring""" return len(self.vocab ) def UpperCamelCase__ ( self ): """simple docstring""" return dict(self.vocab , **self.added_tokens_encoder ) def __getstate__( self ): """simple docstring""" lowerCAmelCase = self.__dict__.copy() lowerCAmelCase = None return state def __setstate__( self , _snake_case ): """simple docstring""" lowerCAmelCase = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): lowerCAmelCase = {} lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.sentencepiece_model_ckpt ) def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" return "".join((self.SP_CHAR_MAPPING.get(_snake_case , _snake_case ) for c in text) ) def UpperCamelCase__ ( self , _snake_case , _snake_case=False , _snake_case=64 , _snake_case=0.1 ): """simple docstring""" if self.sp_model_kwargs.get('enable_sampling' ) is True: lowerCAmelCase = True if self.sp_model_kwargs.get('alpha' ) is not None: lowerCAmelCase = self.sp_model_kwargs.get('alpha' ) if self.sp_model_kwargs.get('nbest_size' ) is not None: lowerCAmelCase = self.sp_model_kwargs.get('nbest_size' ) if not enable_sampling: lowerCAmelCase = self.sp_model.EncodeAsPieces(_snake_case ) else: lowerCAmelCase = self.sp_model.SampleEncodeAsPieces(_snake_case , _snake_case , _snake_case ) lowerCAmelCase = [] for pi, piece in enumerate(_snake_case ): if piece == SPIECE_UNDERLINE: if not pieces[pi + 1].startswith(_snake_case ) and pi != 0: new_pieces.append(_snake_case ) continue else: continue lowerCAmelCase = 0 for i, chunk in enumerate(_snake_case ): if chunk == SPIECE_UNDERLINE: continue if self.is_ch_char(_snake_case ) or self.is_punct(_snake_case ): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) new_pieces.append(_snake_case ) lowerCAmelCase = i + 1 elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) lowerCAmelCase = i elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) lowerCAmelCase = i if len(_snake_case ) > lst_i: new_pieces.append(piece[lst_i:] ) return new_pieces def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" lowerCAmelCase = ''.join(_snake_case ).replace(_snake_case , ' ' ).strip() return out_string def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" lowerCAmelCase = self.convert_ids_to_tokens(_snake_case ) lowerCAmelCase = ''.join(_snake_case ).replace(_snake_case , ' ' ).strip() return out_string def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" return self.vocab.get(_snake_case , self.vocab.get(self.unk_token ) ) def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" return self.reverse_vocab.get(_snake_case , self.unk_token ) def UpperCamelCase__ ( self , _snake_case , _snake_case=None ): """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCAmelCase = [self.cls_token_id] lowerCAmelCase = [self.sep_token_id] return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep def UpperCamelCase__ ( self , _snake_case , _snake_case=None ): """simple docstring""" if offset_mapping_a is None: return [(0, 0)] + offset_mapping_a + [(0, 0)] return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)] def UpperCamelCase__ ( self , _snake_case , _snake_case=None , _snake_case=False ): """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( 'You should not supply a second sequence if the provided sequence of ' 'ids is already formatted with special tokens for the model.' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case )) + [1] return [1] + ([0] * len(_snake_case )) + [1] def UpperCamelCase__ ( self , _snake_case , _snake_case = None ): """simple docstring""" if token_ids_a is None: # [CLS] X [SEP] return (len(_snake_case ) + 2) * [0] # [CLS] A [SEP] [SEP] B [SEP] return [0] * (len(_snake_case ) + 1) + [1] * (len(_snake_case ) + 3) def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" if "\u4e00" <= char <= "\u9fff": return True return False def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" if ("a" <= char <= "z") or ("A" <= char <= "Z"): return True return False def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" if char in ",;:.?!~,;:。?!《》【】": return True return False def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" if char == " " or char == "\t" or char == "\n" or char == "\r": return True if len(_snake_case ) == 1: lowerCAmelCase = unicodedata.category(_snake_case ) if cat == "Zs": return True return False def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" lowerCAmelCase = {} with io.open(_snake_case , 'r' , encoding='utf-8' ) as f: for index, line in enumerate(_snake_case ): lowerCAmelCase = line.rstrip('\n' ) lowerCAmelCase = int(_snake_case ) return token_to_idx def UpperCamelCase__ ( self , _snake_case , _snake_case = None ): """simple docstring""" lowerCAmelCase = 0 if os.path.isdir(_snake_case ): lowerCAmelCase = os.path.join( _snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) else: lowerCAmelCase = (filename_prefix + '-' if filename_prefix else '') + save_directory with open(_snake_case , 'w' , encoding='utf-8' ) as writer: for token, token_index in sorted(self.vocab.items() , key=lambda _snake_case : kv[1] ): if index != token_index: logger.warning( F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.' ' Please check that the vocabulary is not corrupted!' ) lowerCAmelCase = token_index writer.write(token + '\n' ) index += 1 lowerCAmelCase = os.path.join(_snake_case , 'sentencepiece.bpe.model' ) with open(_snake_case , 'wb' ) as fi: lowerCAmelCase = self.sp_model.serialized_model_proto() fi.write(_snake_case ) return (vocab_file,)
309
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __UpperCamelCase : Dict = {'''configuration_encoder_decoder''': ['''EncoderDecoderConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Tuple = ['''EncoderDecoderModel'''] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Optional[int] = ['''TFEncoderDecoderModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Dict = ['''FlaxEncoderDecoderModel'''] if TYPE_CHECKING: from .configuration_encoder_decoder import EncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encoder_decoder import EncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_encoder_decoder import TFEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel else: import sys __UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
309
"""simple docstring""" import argparse import os import torch from transformers.utils import WEIGHTS_NAME __UpperCamelCase : int = ['''small''', '''medium''', '''large'''] __UpperCamelCase : str = '''lm_head.decoder.weight''' __UpperCamelCase : Dict = '''lm_head.weight''' def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ): lowerCAmelCase = torch.load(_UpperCAmelCase ) lowerCAmelCase = d.pop(_UpperCAmelCase ) os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase ) torch.save(_UpperCAmelCase , os.path.join(_UpperCAmelCase , _UpperCAmelCase ) ) if __name__ == "__main__": __UpperCamelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument('''--dialogpt_path''', default='''.''', type=str) __UpperCamelCase : Optional[int] = parser.parse_args() for MODEL in DIALOGPT_MODELS: __UpperCamelCase : Dict = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''') __UpperCamelCase : str = f'''./DialoGPT-{MODEL}''' convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
309
1
"""simple docstring""" import io import os import unicodedata from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) __UpperCamelCase : Dict = '''▁''' __UpperCamelCase : Optional[int] = {'''vocab_file''': '''vocab.txt''', '''sentencepiece_model_ckpt''': '''sentencepiece.bpe.model'''} __UpperCamelCase : str = { '''sentencepiece_model_file''': '''sentencepiece.bpe.model''', '''vocab_file''': '''vocab.txt''', } __UpperCamelCase : Tuple = { '''vocab_file''': { '''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''', '''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''', }, '''sentencepiece_model_file''': { '''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''', '''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''', }, } __UpperCamelCase : Optional[Any] = { '''ernie-m-base''': 514, '''ernie-m-large''': 514, } __UpperCamelCase : str = { '''ernie-m-base''': {'''do_lower_case''': False}, '''ernie-m-large''': {'''do_lower_case''': False}, } class a ( a__ ): snake_case__ = ["input_ids"] snake_case__ = VOCAB_FILES_NAMES snake_case__ = PRETRAINED_INIT_CONFIGURATION snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case__ = PRETRAINED_VOCAB_FILES_MAP snake_case__ = RESOURCE_FILES_NAMES def __init__( self , _snake_case , _snake_case=None , _snake_case=False , _snake_case="utf8" , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case = None , **_snake_case , ): """simple docstring""" lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , vocab_file=_snake_case , encoding=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , ) lowerCAmelCase = do_lower_case lowerCAmelCase = sentencepiece_model_ckpt lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_snake_case ) # to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning if vocab_file is not None: lowerCAmelCase = self.load_vocab(filepath=_snake_case ) else: lowerCAmelCase = {self.sp_model.id_to_piece(_snake_case ): id for id in range(self.sp_model.get_piece_size() )} lowerCAmelCase = {v: k for k, v in self.vocab.items()} def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" if text is None: return None lowerCAmelCase = self.tokenize(_snake_case ) lowerCAmelCase ,lowerCAmelCase = '', [] for i, ch in enumerate(_snake_case ): if ch in self.SP_CHAR_MAPPING: lowerCAmelCase = self.SP_CHAR_MAPPING.get(_snake_case ) else: lowerCAmelCase = unicodedata.normalize('NFKC' , _snake_case ) if self.is_whitespace(_snake_case ): continue normalized_text += ch char_mapping.extend([i] * len(_snake_case ) ) lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = normalized_text, [], 0 if self.do_lower_case: lowerCAmelCase = text.lower() for token in split_tokens: if token[:1] == "▁": lowerCAmelCase = token[1:] lowerCAmelCase = text[offset:].index(_snake_case ) + offset lowerCAmelCase = start + len(_snake_case ) token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) ) lowerCAmelCase = end return token_mapping @property def UpperCamelCase__ ( self ): """simple docstring""" return len(self.vocab ) def UpperCamelCase__ ( self ): """simple docstring""" return dict(self.vocab , **self.added_tokens_encoder ) def __getstate__( self ): """simple docstring""" lowerCAmelCase = self.__dict__.copy() lowerCAmelCase = None return state def __setstate__( self , _snake_case ): """simple docstring""" lowerCAmelCase = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): lowerCAmelCase = {} lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.sentencepiece_model_ckpt ) def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" return "".join((self.SP_CHAR_MAPPING.get(_snake_case , _snake_case ) for c in text) ) def UpperCamelCase__ ( self , _snake_case , _snake_case=False , _snake_case=64 , _snake_case=0.1 ): """simple docstring""" if self.sp_model_kwargs.get('enable_sampling' ) is True: lowerCAmelCase = True if self.sp_model_kwargs.get('alpha' ) is not None: lowerCAmelCase = self.sp_model_kwargs.get('alpha' ) if self.sp_model_kwargs.get('nbest_size' ) is not None: lowerCAmelCase = self.sp_model_kwargs.get('nbest_size' ) if not enable_sampling: lowerCAmelCase = self.sp_model.EncodeAsPieces(_snake_case ) else: lowerCAmelCase = self.sp_model.SampleEncodeAsPieces(_snake_case , _snake_case , _snake_case ) lowerCAmelCase = [] for pi, piece in enumerate(_snake_case ): if piece == SPIECE_UNDERLINE: if not pieces[pi + 1].startswith(_snake_case ) and pi != 0: new_pieces.append(_snake_case ) continue else: continue lowerCAmelCase = 0 for i, chunk in enumerate(_snake_case ): if chunk == SPIECE_UNDERLINE: continue if self.is_ch_char(_snake_case ) or self.is_punct(_snake_case ): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) new_pieces.append(_snake_case ) lowerCAmelCase = i + 1 elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) lowerCAmelCase = i elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) lowerCAmelCase = i if len(_snake_case ) > lst_i: new_pieces.append(piece[lst_i:] ) return new_pieces def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" lowerCAmelCase = ''.join(_snake_case ).replace(_snake_case , ' ' ).strip() return out_string def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" lowerCAmelCase = self.convert_ids_to_tokens(_snake_case ) lowerCAmelCase = ''.join(_snake_case ).replace(_snake_case , ' ' ).strip() return out_string def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" return self.vocab.get(_snake_case , self.vocab.get(self.unk_token ) ) def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" return self.reverse_vocab.get(_snake_case , self.unk_token ) def UpperCamelCase__ ( self , _snake_case , _snake_case=None ): """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCAmelCase = [self.cls_token_id] lowerCAmelCase = [self.sep_token_id] return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep def UpperCamelCase__ ( self , _snake_case , _snake_case=None ): """simple docstring""" if offset_mapping_a is None: return [(0, 0)] + offset_mapping_a + [(0, 0)] return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)] def UpperCamelCase__ ( self , _snake_case , _snake_case=None , _snake_case=False ): """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( 'You should not supply a second sequence if the provided sequence of ' 'ids is already formatted with special tokens for the model.' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case )) + [1] return [1] + ([0] * len(_snake_case )) + [1] def UpperCamelCase__ ( self , _snake_case , _snake_case = None ): """simple docstring""" if token_ids_a is None: # [CLS] X [SEP] return (len(_snake_case ) + 2) * [0] # [CLS] A [SEP] [SEP] B [SEP] return [0] * (len(_snake_case ) + 1) + [1] * (len(_snake_case ) + 3) def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" if "\u4e00" <= char <= "\u9fff": return True return False def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" if ("a" <= char <= "z") or ("A" <= char <= "Z"): return True return False def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" if char in ",;:.?!~,;:。?!《》【】": return True return False def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" if char == " " or char == "\t" or char == "\n" or char == "\r": return True if len(_snake_case ) == 1: lowerCAmelCase = unicodedata.category(_snake_case ) if cat == "Zs": return True return False def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" lowerCAmelCase = {} with io.open(_snake_case , 'r' , encoding='utf-8' ) as f: for index, line in enumerate(_snake_case ): lowerCAmelCase = line.rstrip('\n' ) lowerCAmelCase = int(_snake_case ) return token_to_idx def UpperCamelCase__ ( self , _snake_case , _snake_case = None ): """simple docstring""" lowerCAmelCase = 0 if os.path.isdir(_snake_case ): lowerCAmelCase = os.path.join( _snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) else: lowerCAmelCase = (filename_prefix + '-' if filename_prefix else '') + save_directory with open(_snake_case , 'w' , encoding='utf-8' ) as writer: for token, token_index in sorted(self.vocab.items() , key=lambda _snake_case : kv[1] ): if index != token_index: logger.warning( F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.' ' Please check that the vocabulary is not corrupted!' ) lowerCAmelCase = token_index writer.write(token + '\n' ) index += 1 lowerCAmelCase = os.path.join(_snake_case , 'sentencepiece.bpe.model' ) with open(_snake_case , 'wb' ) as fi: lowerCAmelCase = self.sp_model.serialized_model_proto() fi.write(_snake_case ) return (vocab_file,)
309
"""simple docstring""" __UpperCamelCase : Dict = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []} __UpperCamelCase : str = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]} def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] , _UpperCAmelCase : int , _UpperCAmelCase : list[bool] ): lowerCAmelCase = True lowerCAmelCase = [] for neighbour in graph[vert]: if not visited[neighbour]: order += topology_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) order.append(_UpperCAmelCase ) return order def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] , _UpperCAmelCase : int , _UpperCAmelCase : list[bool] ): lowerCAmelCase = True lowerCAmelCase = [vert] for neighbour in reversed_graph[vert]: if not visited[neighbour]: component += find_components(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) return component def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] ): lowerCAmelCase = len(_UpperCAmelCase ) * [False] lowerCAmelCase = {vert: [] for vert in range(len(_UpperCAmelCase ) )} for vert, neighbours in graph.items(): for neighbour in neighbours: reversed_graph[neighbour].append(_UpperCAmelCase ) lowerCAmelCase = [] for i, was_visited in enumerate(_UpperCAmelCase ): if not was_visited: order += topology_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) lowerCAmelCase = [] lowerCAmelCase = len(_UpperCAmelCase ) * [False] for i in range(len(_UpperCAmelCase ) ): lowerCAmelCase = order[len(_UpperCAmelCase ) - i - 1] if not visited[vert]: lowerCAmelCase = find_components(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) components_list.append(_UpperCAmelCase ) return components_list
309
1
"""simple docstring""" from math import loga def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ): if a < 0: raise ValueError('Input value must be a positive integer' ) elif isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise TypeError('Input value must be a \'int\' type' ) return 0 if (a == 0) else int(loga(a & -a ) ) if __name__ == "__main__": import doctest doctest.testmod()
309
"""simple docstring""" import os import time import warnings from dataclasses import dataclass, field from enum import Enum from typing import List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import logging from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors from ..processors.utils import InputFeatures __UpperCamelCase : Optional[Any] = logging.get_logger(__name__) @dataclass class a : snake_case__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(glue_processors.keys() )} ) snake_case__ = field( metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} ) snake_case__ = field( default=1_2_8 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) snake_case__ = field( default=a__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.task_name.lower() class a ( a__ ): snake_case__ = '''train''' snake_case__ = '''dev''' snake_case__ = '''test''' class a ( a__ ): snake_case__ = 42 snake_case__ = 42 snake_case__ = 42 def __init__( self , _snake_case , _snake_case , _snake_case = None , _snake_case = Split.train , _snake_case = None , ): """simple docstring""" warnings.warn( 'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets ' 'library. You can have a look at this example script for pointers: ' 'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , _snake_case , ) lowerCAmelCase = args lowerCAmelCase = glue_processors[args.task_name]() lowerCAmelCase = glue_output_modes[args.task_name] if isinstance(_snake_case , _snake_case ): try: lowerCAmelCase = Split[mode] except KeyError: raise KeyError('mode is not a valid split name' ) # Load data features from cache or dataset file lowerCAmelCase = os.path.join( cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , ) lowerCAmelCase = self.processor.get_labels() if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in ( "RobertaTokenizer", "RobertaTokenizerFast", "XLMRobertaTokenizer", "BartTokenizer", "BartTokenizerFast", ): # HACK(label indices are swapped in RoBERTa pretrained model) lowerCAmelCase ,lowerCAmelCase = label_list[2], label_list[1] lowerCAmelCase = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. lowerCAmelCase = cached_features_file + '.lock' with FileLock(_snake_case ): if os.path.exists(_snake_case ) and not args.overwrite_cache: lowerCAmelCase = time.time() lowerCAmelCase = torch.load(_snake_case ) logger.info( F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start ) else: logger.info(F'Creating features from dataset file at {args.data_dir}' ) if mode == Split.dev: lowerCAmelCase = self.processor.get_dev_examples(args.data_dir ) elif mode == Split.test: lowerCAmelCase = self.processor.get_test_examples(args.data_dir ) else: lowerCAmelCase = self.processor.get_train_examples(args.data_dir ) if limit_length is not None: lowerCAmelCase = examples[:limit_length] lowerCAmelCase = glue_convert_examples_to_features( _snake_case , _snake_case , max_length=args.max_seq_length , label_list=_snake_case , output_mode=self.output_mode , ) lowerCAmelCase = time.time() torch.save(self.features , _snake_case ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' ) def __len__( self ): """simple docstring""" return len(self.features ) def __getitem__( self , _snake_case ): """simple docstring""" return self.features[i] def UpperCamelCase__ ( self ): """simple docstring""" return self.label_list
309
1
"""simple docstring""" import numpy as np import torch import tqdm from ...models.unet_ad import UNetaDModel from ...pipelines import DiffusionPipeline from ...utils import randn_tensor from ...utils.dummy_pt_objects import DDPMScheduler class a ( a__ ): def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case , ): """simple docstring""" super().__init__() lowerCAmelCase = value_function lowerCAmelCase = unet lowerCAmelCase = scheduler lowerCAmelCase = env lowerCAmelCase = env.get_dataset() lowerCAmelCase = {} for key in self.data.keys(): try: lowerCAmelCase = self.data[key].mean() except: # noqa: E722 pass lowerCAmelCase = {} for key in self.data.keys(): try: lowerCAmelCase = self.data[key].std() except: # noqa: E722 pass lowerCAmelCase = env.observation_space.shape[0] lowerCAmelCase = env.action_space.shape[0] def UpperCamelCase__ ( self , _snake_case , _snake_case ): """simple docstring""" return (x_in - self.means[key]) / self.stds[key] def UpperCamelCase__ ( self , _snake_case , _snake_case ): """simple docstring""" return x_in * self.stds[key] + self.means[key] def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" if type(_snake_case ) is dict: return {k: self.to_torch(_snake_case ) for k, v in x_in.items()} elif torch.is_tensor(_snake_case ): return x_in.to(self.unet.device ) return torch.tensor(_snake_case , device=self.unet.device ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case ): """simple docstring""" for key, val in cond.items(): lowerCAmelCase = val.clone() return x_in def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = x.shape[0] lowerCAmelCase = None for i in tqdm.tqdm(self.scheduler.timesteps ): # create batch of timesteps to pass into model lowerCAmelCase = torch.full((batch_size,) , _snake_case , device=self.unet.device , dtype=torch.long ) for _ in range(_snake_case ): with torch.enable_grad(): x.requires_grad_() # permute to match dimension for pre-trained models lowerCAmelCase = self.value_function(x.permute(0 , 2 , 1 ) , _snake_case ).sample lowerCAmelCase = torch.autograd.grad([y.sum()] , [x] )[0] lowerCAmelCase = self.scheduler._get_variance(_snake_case ) lowerCAmelCase = torch.exp(0.5 * posterior_variance ) lowerCAmelCase = model_std * grad lowerCAmelCase = 0 lowerCAmelCase = x.detach() lowerCAmelCase = x + scale * grad lowerCAmelCase = self.reset_xa(_snake_case , _snake_case , self.action_dim ) lowerCAmelCase = self.unet(x.permute(0 , 2 , 1 ) , _snake_case ).sample.permute(0 , 2 , 1 ) # TODO: verify deprecation of this kwarg lowerCAmelCase = self.scheduler.step(_snake_case , _snake_case , _snake_case , predict_epsilon=_snake_case )['prev_sample'] # apply conditions to the trajectory (set the initial state) lowerCAmelCase = self.reset_xa(_snake_case , _snake_case , self.action_dim ) lowerCAmelCase = self.to_torch(_snake_case ) return x, y def __call__( self , _snake_case , _snake_case=64 , _snake_case=32 , _snake_case=2 , _snake_case=0.1 ): """simple docstring""" lowerCAmelCase = self.normalize(_snake_case , 'observations' ) lowerCAmelCase = obs[None].repeat(_snake_case , axis=0 ) lowerCAmelCase = {0: self.to_torch(_snake_case )} lowerCAmelCase = (batch_size, planning_horizon, self.state_dim + self.action_dim) # generate initial noise and apply our conditions (to make the trajectories start at current state) lowerCAmelCase = randn_tensor(_snake_case , device=self.unet.device ) lowerCAmelCase = self.reset_xa(_snake_case , _snake_case , self.action_dim ) lowerCAmelCase = self.to_torch(_snake_case ) # run the diffusion process lowerCAmelCase ,lowerCAmelCase = self.run_diffusion(_snake_case , _snake_case , _snake_case , _snake_case ) # sort output trajectories by value lowerCAmelCase = y.argsort(0 , descending=_snake_case ).squeeze() lowerCAmelCase = x[sorted_idx] lowerCAmelCase = sorted_values[:, :, : self.action_dim] lowerCAmelCase = actions.detach().cpu().numpy() lowerCAmelCase = self.de_normalize(_snake_case , key='actions' ) # select the action with the highest value if y is not None: lowerCAmelCase = 0 else: # if we didn't run value guiding, select a random action lowerCAmelCase = np.random.randint(0 , _snake_case ) lowerCAmelCase = denorm_actions[selected_index, 0] return denorm_actions
309
"""simple docstring""" import os from collections.abc import Iterator def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "." ): for dir_path, dir_names, filenames in os.walk(_UpperCAmelCase ): lowerCAmelCase = [d for d in dir_names if d != 'scripts' and d[0] not in '._'] for filename in filenames: if filename == "__init__.py": continue if os.path.splitext(_UpperCAmelCase )[1] in (".py", ".ipynb"): yield os.path.join(_UpperCAmelCase , _UpperCAmelCase ).lstrip('./' ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ): return F'{i * " "}*' if i else "\n##" def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ): lowerCAmelCase = old_path.split(os.sep ) for i, new_part in enumerate(new_path.split(os.sep ) ): if (i + 1 > len(_UpperCAmelCase ) or old_parts[i] != new_part) and new_part: print(F'{md_prefix(_UpperCAmelCase )} {new_part.replace("_" , " " ).title()}' ) return new_path def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "." ): lowerCAmelCase = '' for filepath in sorted(good_file_paths(_UpperCAmelCase ) ): lowerCAmelCase ,lowerCAmelCase = os.path.split(_UpperCAmelCase ) if filepath != old_path: lowerCAmelCase = print_path(_UpperCAmelCase , _UpperCAmelCase ) lowerCAmelCase = (filepath.count(os.sep ) + 1) if filepath else 0 lowerCAmelCase = F'{filepath}/{filename}'.replace(' ' , '%20' ) lowerCAmelCase = os.path.splitext(filename.replace('_' , ' ' ).title() )[0] print(F'{md_prefix(_UpperCAmelCase )} [{filename}]({url})' ) if __name__ == "__main__": print_directory_md('''.''')
309
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_donut import DonutImageProcessor __UpperCamelCase : Optional[int] = logging.get_logger(__name__) class a ( a__ ): def __init__( self , *_snake_case , **_snake_case ): """simple docstring""" warnings.warn( 'The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use DonutImageProcessor instead.' , _snake_case , ) super().__init__(*_snake_case , **_snake_case )
309
"""simple docstring""" import os from datetime import datetime as dt from github import Github __UpperCamelCase : int = [ '''good first issue''', '''good second issue''', '''good difficult issue''', '''enhancement''', '''new pipeline/model''', '''new scheduler''', '''wip''', ] def _SCREAMING_SNAKE_CASE (): lowerCAmelCase = Github(os.environ['GITHUB_TOKEN'] ) lowerCAmelCase = g.get_repo('huggingface/diffusers' ) lowerCAmelCase = repo.get_issues(state='open' ) for issue in open_issues: lowerCAmelCase = sorted(issue.get_comments() , key=lambda _UpperCAmelCase : i.created_at , reverse=_UpperCAmelCase ) lowerCAmelCase = comments[0] if len(_UpperCAmelCase ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Closes the issue after 7 days of inactivity since the Stalebot notification. issue.edit(state='closed' ) elif ( "stale" in issue.get_labels() and last_comment is not None and last_comment.user.login != "github-actions[bot]" ): # Opens the issue if someone other than Stalebot commented. issue.edit(state='open' ) issue.remove_from_labels('stale' ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( 'This issue has been automatically marked as stale because it has not had ' 'recent activity. If you think this still needs to be addressed ' 'please comment on this thread.\n\nPlease note that issues that do not follow the ' '[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) ' 'are likely to be ignored.' ) issue.add_to_labels('stale' ) if __name__ == "__main__": main()
309
1
"""simple docstring""" import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class a : def __init__( self , _snake_case , _snake_case=13 , _snake_case=32 , _snake_case=2 , _snake_case=3 , _snake_case=16 , _snake_case=[1, 2, 1] , _snake_case=[2, 2, 4] , _snake_case=2 , _snake_case=2.0 , _snake_case=True , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.1 , _snake_case="gelu" , _snake_case=False , _snake_case=True , _snake_case=0.02 , _snake_case=1E-5 , _snake_case=True , _snake_case=None , _snake_case=True , _snake_case=10 , _snake_case=8 , _snake_case=["stage1", "stage2", "stage3"] , _snake_case=[1, 2, 3] , ): """simple docstring""" lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = image_size lowerCAmelCase = patch_size lowerCAmelCase = num_channels lowerCAmelCase = embed_dim lowerCAmelCase = depths lowerCAmelCase = num_heads lowerCAmelCase = window_size lowerCAmelCase = mlp_ratio lowerCAmelCase = qkv_bias lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = drop_path_rate lowerCAmelCase = hidden_act lowerCAmelCase = use_absolute_embeddings lowerCAmelCase = patch_norm lowerCAmelCase = layer_norm_eps lowerCAmelCase = initializer_range lowerCAmelCase = is_training lowerCAmelCase = scope lowerCAmelCase = use_labels lowerCAmelCase = type_sequence_label_size lowerCAmelCase = encoder_stride lowerCAmelCase = out_features lowerCAmelCase = out_indices def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase = None if self.use_labels: lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase = self.get_config() return config, pixel_values, labels def UpperCamelCase__ ( self ): """simple docstring""" return MaskFormerSwinConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = MaskFormerSwinModel(config=_snake_case ) model.to(_snake_case ) model.eval() lowerCAmelCase = model(_snake_case ) lowerCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) lowerCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = MaskFormerSwinBackbone(config=_snake_case ) model.to(_snake_case ) model.eval() lowerCAmelCase = model(_snake_case ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , [16, 32, 64] ) # verify ValueError with self.parent.assertRaises(_snake_case ): lowerCAmelCase = ['stem'] lowerCAmelCase = MaskFormerSwinBackbone(config=_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.prepare_config_and_inputs() lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = config_and_inputs lowerCAmelCase = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class a ( a__ , a__ , unittest.TestCase ): snake_case__ = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) snake_case__ = {'''feature-extraction''': MaskFormerSwinModel} if is_torch_available() else {} snake_case__ = False snake_case__ = False snake_case__ = False snake_case__ = False snake_case__ = False def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = MaskFormerSwinModelTester(self ) lowerCAmelCase = ConfigTester(self , config_class=_snake_case , embed_dim=37 ) @require_torch_multi_gpu @unittest.skip( reason=( '`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with' ' `nn.DataParallel`' ) ) def UpperCamelCase__ ( self ): """simple docstring""" pass def UpperCamelCase__ ( self ): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCamelCase__ ( self ): """simple docstring""" return def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*_snake_case ) @unittest.skip('Swin does not use inputs_embeds' ) def UpperCamelCase__ ( self ): """simple docstring""" pass @unittest.skip('Swin does not support feedforward chunking' ) def UpperCamelCase__ ( self ): """simple docstring""" pass def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase = model_class(_snake_case ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCAmelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_snake_case , nn.Linear ) ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase = model_class(_snake_case ) lowerCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase = [*signature.parameters.keys()] lowerCAmelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , _snake_case ) @unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' ) def UpperCamelCase__ ( self ): """simple docstring""" pass @unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' ) def UpperCamelCase__ ( self ): """simple docstring""" pass def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = model_class(_snake_case ) model.to(_snake_case ) model.eval() with torch.no_grad(): lowerCAmelCase = model(**self._prepare_for_class(_snake_case , _snake_case ) ) lowerCAmelCase = outputs.hidden_states lowerCAmelCase = getattr( self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(_snake_case ) , _snake_case ) # Swin has a different seq_length lowerCAmelCase = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: lowerCAmelCase = True self.check_hidden_states_output(_snake_case , _snake_case , _snake_case , _snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCAmelCase = True self.check_hidden_states_output(_snake_case , _snake_case , _snake_case , _snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase = 3 lowerCAmelCase = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) lowerCAmelCase = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) lowerCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: lowerCAmelCase = True self.check_hidden_states_output(_snake_case , _snake_case , _snake_case , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCAmelCase = True self.check_hidden_states_output(_snake_case , _snake_case , _snake_case , (padded_height, padded_width) ) @unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' ) def UpperCamelCase__ ( self ): """simple docstring""" pass @unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' ) def UpperCamelCase__ ( self ): """simple docstring""" pass @unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' ) def UpperCamelCase__ ( self ): """simple docstring""" pass def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(_snake_case ): lowerCAmelCase = 0 return t def check_equivalence(_snake_case , _snake_case , _snake_case , _snake_case={} ): with torch.no_grad(): lowerCAmelCase = model(**_snake_case , return_dict=_snake_case , **_snake_case ) lowerCAmelCase = model(**_snake_case , return_dict=_snake_case , **_snake_case ).to_tuple() def recursive_check(_snake_case , _snake_case ): if isinstance(_snake_case , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(_snake_case , _snake_case ): recursive_check(_snake_case , _snake_case ) elif isinstance(_snake_case , _snake_case ): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() , dict_object.values() ): recursive_check(_snake_case , _snake_case ) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(_snake_case ) , set_nan_tensor_to_zero(_snake_case ) , atol=1E-5 ) , msg=( 'Tuple and dict output are not equal. Difference:' F' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:' F' {torch.isnan(_snake_case ).any()} and `inf`: {torch.isinf(_snake_case )}. Dict has' F' `nan`: {torch.isnan(_snake_case ).any()} and `inf`: {torch.isinf(_snake_case )}.' ) , ) recursive_check(_snake_case , _snake_case ) for model_class in self.all_model_classes: lowerCAmelCase = model_class(_snake_case ) model.to(_snake_case ) model.eval() lowerCAmelCase = self._prepare_for_class(_snake_case , _snake_case ) lowerCAmelCase = self._prepare_for_class(_snake_case , _snake_case ) check_equivalence(_snake_case , _snake_case , _snake_case ) lowerCAmelCase = self._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case ) lowerCAmelCase = self._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case ) check_equivalence(_snake_case , _snake_case , _snake_case ) lowerCAmelCase = self._prepare_for_class(_snake_case , _snake_case ) lowerCAmelCase = self._prepare_for_class(_snake_case , _snake_case ) check_equivalence(_snake_case , _snake_case , _snake_case , {'output_hidden_states': True} ) lowerCAmelCase = self._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case ) lowerCAmelCase = self._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case ) check_equivalence(_snake_case , _snake_case , _snake_case , {'output_hidden_states': True} ) @require_torch class a ( unittest.TestCase , a__ ): snake_case__ = (MaskFormerSwinBackbone,) if is_torch_available() else () snake_case__ = MaskFormerSwinConfig def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = MaskFormerSwinModelTester(self ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase = inputs_dict['pixel_values'].shape[0] for backbone_class in self.all_model_classes: lowerCAmelCase = backbone_class(_snake_case ) backbone.to(_snake_case ) backbone.eval() lowerCAmelCase = backbone(**_snake_case ) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps , _snake_case ) self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) ) for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ): self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) ) self.assertIsNone(outputs.hidden_states ) self.assertIsNone(outputs.attentions ) # Test output_hidden_states=True lowerCAmelCase = backbone(**_snake_case , output_hidden_states=_snake_case ) self.assertIsNotNone(outputs.hidden_states ) self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) ) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) ) # Test output_attentions=True if self.has_attentions: lowerCAmelCase = backbone(**_snake_case , output_attentions=_snake_case ) self.assertIsNotNone(outputs.attentions )
309
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) __UpperCamelCase : Any = { '''configuration_layoutlmv2''': ['''LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LayoutLMv2Config'''], '''processing_layoutlmv2''': ['''LayoutLMv2Processor'''], '''tokenization_layoutlmv2''': ['''LayoutLMv2Tokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Tuple = ['''LayoutLMv2TokenizerFast'''] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Optional[int] = ['''LayoutLMv2FeatureExtractor'''] __UpperCamelCase : Optional[int] = ['''LayoutLMv2ImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Any = [ '''LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LayoutLMv2ForQuestionAnswering''', '''LayoutLMv2ForSequenceClassification''', '''LayoutLMv2ForTokenClassification''', '''LayoutLMv2Layer''', '''LayoutLMv2Model''', '''LayoutLMv2PreTrainedModel''', ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys __UpperCamelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
309
1
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor from ..utils import is_datasets_available from .base import PipelineTool if is_datasets_available(): from datasets import load_dataset class a ( a__ ): snake_case__ = '''microsoft/speecht5_tts''' snake_case__ = ( '''This is a tool that reads an English text out loud. It takes an input named `text` which should contain the ''' '''text to read (in English) and returns a waveform object containing the sound.''' ) snake_case__ = '''text_reader''' snake_case__ = SpeechTaProcessor snake_case__ = SpeechTaForTextToSpeech snake_case__ = SpeechTaHifiGan snake_case__ = ['''text'''] snake_case__ = ['''audio'''] def UpperCamelCase__ ( self ): """simple docstring""" if self.post_processor is None: lowerCAmelCase = 'microsoft/speecht5_hifigan' super().setup() def UpperCamelCase__ ( self , _snake_case , _snake_case=None ): """simple docstring""" lowerCAmelCase = self.pre_processor(text=_snake_case , return_tensors='pt' , truncation=_snake_case ) if speaker_embeddings is None: if not is_datasets_available(): raise ImportError('Datasets needs to be installed if not passing speaker embeddings.' ) lowerCAmelCase = load_dataset('Matthijs/cmu-arctic-xvectors' , split='validation' ) lowerCAmelCase = torch.tensor(embeddings_dataset[73_05]['xvector'] ).unsqueeze(0 ) return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings} def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" with torch.no_grad(): return self.model.generate_speech(**_snake_case ) def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" with torch.no_grad(): return self.post_processor(_snake_case ).cpu().detach()
309
"""simple docstring""" import warnings from ...utils import logging from .image_processing_poolformer import PoolFormerImageProcessor __UpperCamelCase : Optional[Any] = logging.get_logger(__name__) class a ( a__ ): def __init__( self , *_snake_case , **_snake_case ): """simple docstring""" warnings.warn( 'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use PoolFormerImageProcessor instead.' , _snake_case , ) super().__init__(*_snake_case , **_snake_case )
309
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCamelCase : Any = { '''configuration_clipseg''': [ '''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CLIPSegConfig''', '''CLIPSegTextConfig''', '''CLIPSegVisionConfig''', ], '''processing_clipseg''': ['''CLIPSegProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Tuple = [ '''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CLIPSegModel''', '''CLIPSegPreTrainedModel''', '''CLIPSegTextModel''', '''CLIPSegVisionModel''', '''CLIPSegForImageSegmentation''', ] if TYPE_CHECKING: from .configuration_clipseg import ( CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig, ) from .processing_clipseg import CLIPSegProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clipseg import ( CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, CLIPSegTextModel, CLIPSegVisionModel, ) else: import sys __UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
309
"""simple docstring""" from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. __UpperCamelCase : str = 200 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. __UpperCamelCase : Optional[Any] = 50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. __UpperCamelCase : Dict = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 1000)) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ): lowerCAmelCase = len([g for position, g in enumerate(_UpperCAmelCase ) if g == main_target[position]] ) return (item, float(_UpperCAmelCase )) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ): lowerCAmelCase = random.randint(0 , len(_UpperCAmelCase ) - 1 ) lowerCAmelCase = parent_a[:random_slice] + parent_a[random_slice:] lowerCAmelCase = parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] ): lowerCAmelCase = list(_UpperCAmelCase ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: lowerCAmelCase = random.choice(_UpperCAmelCase ) return "".join(_UpperCAmelCase ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : tuple[str, float] , _UpperCAmelCase : list[tuple[str, float]] , _UpperCAmelCase : list[str] , ): lowerCAmelCase = [] # Generate more children proportionally to the fitness score. lowerCAmelCase = int(parent_a[1] * 100 ) + 1 lowerCAmelCase = 10 if child_n >= 10 else child_n for _ in range(_UpperCAmelCase ): lowerCAmelCase = population_score[random.randint(0 , _UpperCAmelCase )][0] lowerCAmelCase ,lowerCAmelCase = crossover(parent_a[0] , _UpperCAmelCase ) # Append new string to the population list. pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) ) pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) ) return pop def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] , _UpperCAmelCase : bool = True ): # Verify if N_POPULATION is bigger than N_SELECTED if N_POPULATION < N_SELECTED: lowerCAmelCase = F'{N_POPULATION} must be bigger than {N_SELECTED}' raise ValueError(_UpperCAmelCase ) # Verify that the target contains no genes besides the ones inside genes variable. lowerCAmelCase = sorted({c for c in target if c not in genes} ) if not_in_genes_list: lowerCAmelCase = F'{not_in_genes_list} is not in genes list, evolution cannot converge' raise ValueError(_UpperCAmelCase ) # Generate random starting population. lowerCAmelCase = [] for _ in range(_UpperCAmelCase ): population.append(''.join([random.choice(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) )] ) ) # Just some logs to know what the algorithms is doing. lowerCAmelCase ,lowerCAmelCase = 0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(_UpperCAmelCase ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. lowerCAmelCase = [evaluate(_UpperCAmelCase , _UpperCAmelCase ) for item in population] # Check if there is a matching evolution. lowerCAmelCase = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[1] , reverse=_UpperCAmelCase ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( F'\nGeneration: {generation}' F'\nTotal Population:{total_population}' F'\nBest score: {population_score[0][1]}' F'\nBest string: {population_score[0][0]}' ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. lowerCAmelCase = population[: int(N_POPULATION / 3 )] population.clear() population.extend(_UpperCAmelCase ) # Normalize population score to be between 0 and 1. lowerCAmelCase = [ (item, score / len(_UpperCAmelCase )) for item, score in population_score ] # This is selection for i in range(_UpperCAmelCase ): population.extend(select(population_score[int(_UpperCAmelCase )] , _UpperCAmelCase , _UpperCAmelCase ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(_UpperCAmelCase ) > N_POPULATION: break if __name__ == "__main__": __UpperCamelCase : Tuple = ( '''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!''' ) __UpperCamelCase : str = list( ''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm''' '''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\''' ) __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase : Dict = basic(target_str, genes_list) print( f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}''' )
309
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCamelCase : Optional[Any] = { '''configuration_x_clip''': [ '''XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XCLIPConfig''', '''XCLIPTextConfig''', '''XCLIPVisionConfig''', ], '''processing_x_clip''': ['''XCLIPProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Union[str, Any] = [ '''XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XCLIPModel''', '''XCLIPPreTrainedModel''', '''XCLIPTextModel''', '''XCLIPVisionModel''', ] if TYPE_CHECKING: from .configuration_x_clip import ( XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, XCLIPConfig, XCLIPTextConfig, XCLIPVisionConfig, ) from .processing_x_clip import XCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_x_clip import ( XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, XCLIPModel, XCLIPPreTrainedModel, XCLIPTextModel, XCLIPVisionModel, ) else: import sys __UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
309
"""simple docstring""" import copy import os import cva import numpy as np from matplotlib import pyplot as plt class a : def __init__( self ): """simple docstring""" lowerCAmelCase = '' lowerCAmelCase = '' lowerCAmelCase = [] lowerCAmelCase = 0 lowerCAmelCase = 2_56 lowerCAmelCase = 0 lowerCAmelCase = 0 lowerCAmelCase = 0 lowerCAmelCase = 0 def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" lowerCAmelCase = cva.imread(_snake_case , 0 ) lowerCAmelCase = copy.deepcopy(self.img ) lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label='x' ) lowerCAmelCase = np.sum(_snake_case ) for i in range(len(_snake_case ) ): lowerCAmelCase = x[i] / self.k self.sk += prk lowerCAmelCase = (self.L - 1) * self.sk if self.rem != 0: lowerCAmelCase = int(last % last ) lowerCAmelCase = int(last + 1 if self.rem >= 0.5 else last ) self.last_list.append(_snake_case ) lowerCAmelCase = int(np.ma.count(self.img ) / self.img[1].size ) lowerCAmelCase = self.img[1].size for i in range(self.number_of_cols ): for j in range(self.number_of_rows ): lowerCAmelCase = self.img[j][i] if num != self.last_list[num]: lowerCAmelCase = self.last_list[num] cva.imwrite('output_data/output.jpg' , self.img ) def UpperCamelCase__ ( self ): """simple docstring""" plt.hist(self.img.ravel() , 2_56 , [0, 2_56] ) def UpperCamelCase__ ( self ): """simple docstring""" cva.imshow('Output-Image' , self.img ) cva.imshow('Input-Image' , self.original_image ) cva.waitKey(50_00 ) cva.destroyAllWindows() if __name__ == "__main__": __UpperCamelCase : int = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''') __UpperCamelCase : List[Any] = ConstantStretch() stretcher.stretch(file_path) stretcher.plot_histogram() stretcher.show_image()
309
1
"""simple docstring""" def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : List[Any] ): lowerCAmelCase = [0 for i in range(r + 1 )] # nc0 = 1 lowerCAmelCase = 1 for i in range(1 , n + 1 ): # to compute current row from previous row. lowerCAmelCase = min(_UpperCAmelCase , _UpperCAmelCase ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
309
"""simple docstring""" import pytest from datasets.splits import SplitDict, SplitInfo from datasets.utils.py_utils import asdict @pytest.mark.parametrize( 'split_dict' , [ SplitDict(), SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 , dataset_name='my_dataset' )} ), SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 )} ), SplitDict({'train': SplitInfo()} ), ] , ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : SplitDict ): lowerCAmelCase = split_dict._to_yaml_list() assert len(_UpperCAmelCase ) == len(_UpperCAmelCase ) lowerCAmelCase = SplitDict._from_yaml_list(_UpperCAmelCase ) for split_name, split_info in split_dict.items(): # dataset_name field is deprecated, and is therefore not part of the YAML dump lowerCAmelCase = None # the split name of split_dict takes over the name of the split info object lowerCAmelCase = split_name assert split_dict == reloaded @pytest.mark.parametrize( 'split_info' , [SplitInfo(), SplitInfo(dataset_name=_UpperCAmelCase ), SplitInfo(dataset_name='my_dataset' )] ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] ): # For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name" # field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files lowerCAmelCase = asdict(SplitDict({'train': split_info} ) ) assert "dataset_name" in split_dict_asdict["train"] assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
309
1
"""simple docstring""" import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipaConfig, BlipaForConditionalGeneration, BlipaProcessor, BlipaVisionConfig, BlipImageProcessor, OPTConfig, TaConfig, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def _SCREAMING_SNAKE_CASE (): lowerCAmelCase = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png' lowerCAmelCase = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert('RGB' ) return image def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any ): lowerCAmelCase = [] # fmt: off # vision encoder rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') ) rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') ) rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') ) rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') ) rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') ) rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((F'visual_encoder.blocks.{i}.norm1.weight', F'vision_model.encoder.layers.{i}.layer_norm1.weight') ) rename_keys.append((F'visual_encoder.blocks.{i}.norm1.bias', F'vision_model.encoder.layers.{i}.layer_norm1.bias') ) rename_keys.append((F'visual_encoder.blocks.{i}.norm2.weight', F'vision_model.encoder.layers.{i}.layer_norm2.weight') ) rename_keys.append((F'visual_encoder.blocks.{i}.norm2.bias', F'vision_model.encoder.layers.{i}.layer_norm2.bias') ) rename_keys.append((F'visual_encoder.blocks.{i}.attn.qkv.weight', F'vision_model.encoder.layers.{i}.self_attn.qkv.weight') ) rename_keys.append((F'visual_encoder.blocks.{i}.attn.proj.weight', F'vision_model.encoder.layers.{i}.self_attn.projection.weight',) ) rename_keys.append((F'visual_encoder.blocks.{i}.attn.proj.bias', F'vision_model.encoder.layers.{i}.self_attn.projection.bias') ) rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc1.weight', F'vision_model.encoder.layers.{i}.mlp.fc1.weight') ) rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc1.bias', F'vision_model.encoder.layers.{i}.mlp.fc1.bias') ) rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc2.weight', F'vision_model.encoder.layers.{i}.mlp.fc2.weight') ) rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc2.bias', F'vision_model.encoder.layers.{i}.mlp.fc2.bias') ) # QFormer rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') ) rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') ) # fmt: on return rename_keys def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] ): lowerCAmelCase = dct.pop(_UpperCAmelCase ) lowerCAmelCase = val def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ): for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases lowerCAmelCase = state_dict.pop(F'visual_encoder.blocks.{i}.attn.q_bias' ) lowerCAmelCase = state_dict.pop(F'visual_encoder.blocks.{i}.attn.v_bias' ) # next, set bias in the state dict lowerCAmelCase = torch.cat((q_bias, torch.zeros_like(_UpperCAmelCase , requires_grad=_UpperCAmelCase ), v_bias) ) lowerCAmelCase = qkv_bias def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : str ): lowerCAmelCase = 364 if 'coco' in model_name else 224 lowerCAmelCase = BlipaVisionConfig(image_size=_UpperCAmelCase ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "opt-2.7b" in model_name: lowerCAmelCase = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=_UpperCAmelCase ).to_dict() elif "opt-6.7b" in model_name: lowerCAmelCase = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=_UpperCAmelCase ).to_dict() elif "t5-xl" in model_name: lowerCAmelCase = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: lowerCAmelCase = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict() lowerCAmelCase = BlipaConfig(vision_config=_UpperCAmelCase , text_config=_UpperCAmelCase ) return config, image_size @torch.no_grad() def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : int=None , _UpperCAmelCase : Any=False ): lowerCAmelCase = ( AutoTokenizer.from_pretrained('facebook/opt-2.7b' ) if 'opt' in model_name else AutoTokenizer.from_pretrained('google/flan-t5-xl' ) ) lowerCAmelCase = tokenizer('\n' , add_special_tokens=_UpperCAmelCase ).input_ids[0] lowerCAmelCase ,lowerCAmelCase = get_blipa_config(_UpperCAmelCase , eos_token_id=_UpperCAmelCase ) lowerCAmelCase = BlipaForConditionalGeneration(_UpperCAmelCase ).eval() lowerCAmelCase = { 'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'), 'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'), 'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'), 'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'), 'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'), 'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'), 'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'), } lowerCAmelCase ,lowerCAmelCase = model_name_to_original[model_name] # load original model print('Loading original model...' ) lowerCAmelCase = 'cuda' if torch.cuda.is_available() else 'cpu' lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = load_model_and_preprocess( name=_UpperCAmelCase , model_type=_UpperCAmelCase , is_eval=_UpperCAmelCase , device=_UpperCAmelCase ) original_model.eval() print('Done!' ) # update state dict keys lowerCAmelCase = original_model.state_dict() lowerCAmelCase = create_rename_keys(_UpperCAmelCase ) for src, dest in rename_keys: rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): lowerCAmelCase = state_dict.pop(_UpperCAmelCase ) if key.startswith('Qformer.bert' ): lowerCAmelCase = key.replace('Qformer.bert' , 'qformer' ) if "attention.self" in key: lowerCAmelCase = key.replace('self' , 'attention' ) if "opt_proj" in key: lowerCAmelCase = key.replace('opt_proj' , 'language_projection' ) if "t5_proj" in key: lowerCAmelCase = key.replace('t5_proj' , 'language_projection' ) if key.startswith('opt' ): lowerCAmelCase = key.replace('opt' , 'language' ) if key.startswith('t5' ): lowerCAmelCase = key.replace('t5' , 'language' ) lowerCAmelCase = val # read in qv biases read_in_q_v_bias(_UpperCAmelCase , _UpperCAmelCase ) lowerCAmelCase ,lowerCAmelCase = hf_model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase ) assert len(_UpperCAmelCase ) == 0 assert unexpected_keys == ["qformer.embeddings.position_ids"] lowerCAmelCase = load_demo_image() lowerCAmelCase = vis_processors['eval'](_UpperCAmelCase ).unsqueeze(0 ).to(_UpperCAmelCase ) lowerCAmelCase = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(_UpperCAmelCase ) # create processor lowerCAmelCase = BlipImageProcessor( size={'height': image_size, 'width': image_size} , image_mean=_UpperCAmelCase , image_std=_UpperCAmelCase ) lowerCAmelCase = BlipaProcessor(image_processor=_UpperCAmelCase , tokenizer=_UpperCAmelCase ) lowerCAmelCase = processor(images=_UpperCAmelCase , return_tensors='pt' ).pixel_values.to(_UpperCAmelCase ) # make sure processor creates exact same pixel values assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase ) original_model.to(_UpperCAmelCase ) hf_model.to(_UpperCAmelCase ) with torch.no_grad(): if "opt" in model_name: lowerCAmelCase = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits lowerCAmelCase = hf_model(_UpperCAmelCase , _UpperCAmelCase ).logits else: lowerCAmelCase = original_model( {'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits lowerCAmelCase = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 ) lowerCAmelCase = hf_model(_UpperCAmelCase , _UpperCAmelCase , labels=_UpperCAmelCase ).logits assert original_logits.shape == logits.shape print('First values of original logits:' , original_logits[0, :3, :3] ) print('First values of HF logits:' , logits[0, :3, :3] ) # assert values if model_name == "blip2-flan-t5-xl": lowerCAmelCase = torch.tensor( [[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=_UpperCAmelCase ) assert torch.allclose(logits[0, :3, :3] , _UpperCAmelCase , atol=1e-4 ) elif model_name == "blip2-flan-t5-xl-coco": lowerCAmelCase = torch.tensor( [[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=_UpperCAmelCase ) else: # cast to same type lowerCAmelCase = logits.dtype assert torch.allclose(original_logits.to(_UpperCAmelCase ) , _UpperCAmelCase , atol=1e-2 ) print('Looks ok!' ) print('Generating a caption...' ) lowerCAmelCase = '' lowerCAmelCase = tokenizer(_UpperCAmelCase , return_tensors='pt' ).input_ids.to(_UpperCAmelCase ) lowerCAmelCase = original_model.generate({'image': original_pixel_values} ) lowerCAmelCase = hf_model.generate( _UpperCAmelCase , _UpperCAmelCase , do_sample=_UpperCAmelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , ) print('Original generation:' , _UpperCAmelCase ) lowerCAmelCase = input_ids.shape[1] lowerCAmelCase = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_UpperCAmelCase ) lowerCAmelCase = [text.strip() for text in output_text] print('HF generation:' , _UpperCAmelCase ) if pytorch_dump_folder_path is not None: processor.save_pretrained(_UpperCAmelCase ) hf_model.save_pretrained(_UpperCAmelCase ) if push_to_hub: processor.push_to_hub(F'nielsr/{model_name}' ) hf_model.push_to_hub(F'nielsr/{model_name}' ) if __name__ == "__main__": __UpperCamelCase : Tuple = argparse.ArgumentParser() __UpperCamelCase : Dict = [ '''blip2-opt-2.7b''', '''blip2-opt-6.7b''', '''blip2-opt-2.7b-coco''', '''blip2-opt-6.7b-coco''', '''blip2-flan-t5-xl''', '''blip2-flan-t5-xl-coco''', '''blip2-flan-t5-xxl''', ] parser.add_argument( '''--model_name''', default='''blip2-opt-2.7b''', choices=choices, type=str, help='''Path to hf config.json of model to convert''', ) parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub after converting''', ) __UpperCamelCase : List[Any] = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
309
"""simple docstring""" # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. __UpperCamelCase : Any = abspath(join(dirname(dirname(__file__)), '''src''')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='''ignore''', category=FutureWarning) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[Any] ): from diffusers.utils.testing_utils import pytest_addoption_shared pytest_addoption_shared(_UpperCAmelCase ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ): from diffusers.utils.testing_utils import pytest_terminal_summary_main lowerCAmelCase = terminalreporter.config.getoption('--make-reports' ) if make_reports: pytest_terminal_summary_main(_UpperCAmelCase , id=_UpperCAmelCase )
309
1
"""simple docstring""" import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import is_tpu_available, set_seed __UpperCamelCase : Dict = '''true''' def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : Tuple=82 , _UpperCAmelCase : Tuple=16 ): set_seed(42 ) lowerCAmelCase = RegressionModel() lowerCAmelCase = deepcopy(_UpperCAmelCase ) lowerCAmelCase = RegressionDataset(length=_UpperCAmelCase ) lowerCAmelCase = DataLoader(_UpperCAmelCase , batch_size=_UpperCAmelCase ) model.to(accelerator.device ) lowerCAmelCase ,lowerCAmelCase = accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase ) return model, ddp_model, dataloader def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Accelerator , _UpperCAmelCase : Dict=False ): lowerCAmelCase = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' ) lowerCAmelCase = load_dataset('glue' , 'mrpc' , split='validation' ) def tokenize_function(_UpperCAmelCase : Dict ): lowerCAmelCase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase ) return outputs with accelerator.main_process_first(): lowerCAmelCase = dataset.map( _UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , ) lowerCAmelCase = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(_UpperCAmelCase : int ): if use_longest: return tokenizer.pad(_UpperCAmelCase , padding='longest' , return_tensors='pt' ) return tokenizer.pad(_UpperCAmelCase , padding='max_length' , max_length=128 , return_tensors='pt' ) return DataLoader(_UpperCAmelCase , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=16 ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] ): lowerCAmelCase = Accelerator(dispatch_batches=_UpperCAmelCase , split_batches=_UpperCAmelCase ) lowerCAmelCase = get_dataloader(_UpperCAmelCase , not dispatch_batches ) lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained( 'hf-internal-testing/mrpc-bert-base-cased' , return_dict=_UpperCAmelCase ) lowerCAmelCase ,lowerCAmelCase = accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase ) return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int ): lowerCAmelCase = [] for batch in dataloader: lowerCAmelCase ,lowerCAmelCase = batch.values() with torch.no_grad(): lowerCAmelCase = model(_UpperCAmelCase ) lowerCAmelCase ,lowerCAmelCase = accelerator.gather_for_metrics((logit, target) ) logits_and_targets.append((logit, target) ) lowerCAmelCase ,lowerCAmelCase = [], [] for logit, targ in logits_and_targets: logits.append(_UpperCAmelCase ) targs.append(_UpperCAmelCase ) lowerCAmelCase ,lowerCAmelCase = torch.cat(_UpperCAmelCase ), torch.cat(_UpperCAmelCase ) return logits, targs def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Accelerator , _UpperCAmelCase : str=82 , _UpperCAmelCase : List[str]=False , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : Any=16 ): lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = get_basic_setup(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) lowerCAmelCase ,lowerCAmelCase = generate_predictions(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) assert ( len(_UpperCAmelCase ) == num_samples ), F'Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(_UpperCAmelCase )}' def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False ): lowerCAmelCase = evaluate.load('glue' , 'mrpc' ) lowerCAmelCase ,lowerCAmelCase = get_mrpc_setup(_UpperCAmelCase , _UpperCAmelCase ) # First do baseline lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = setup['no'] model.to(_UpperCAmelCase ) model.eval() for batch in dataloader: batch.to(_UpperCAmelCase ) with torch.inference_mode(): lowerCAmelCase = model(**_UpperCAmelCase ) lowerCAmelCase = outputs.logits.argmax(dim=-1 ) metric.add_batch(predictions=_UpperCAmelCase , references=batch['labels'] ) lowerCAmelCase = metric.compute() # Then do distributed lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = setup['ddp'] model.eval() for batch in dataloader: with torch.inference_mode(): lowerCAmelCase = model(**_UpperCAmelCase ) lowerCAmelCase = outputs.logits.argmax(dim=-1 ) lowerCAmelCase = batch['labels'] lowerCAmelCase ,lowerCAmelCase = accelerator.gather_for_metrics((preds, references) ) metric.add_batch(predictions=_UpperCAmelCase , references=_UpperCAmelCase ) lowerCAmelCase = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key] , distributed[key] ), F'Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n' def _SCREAMING_SNAKE_CASE (): lowerCAmelCase = Accelerator(split_batches=_UpperCAmelCase , dispatch_batches=_UpperCAmelCase ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if torch.cuda.is_available() or is_tpu_available(): if accelerator.is_local_main_process: print('**Testing gather_for_metrics**' ) for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(F'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`' ) test_mrpc(_UpperCAmelCase , _UpperCAmelCase ) accelerator.state._reset_state() if accelerator.is_local_main_process: print('**Test torch metrics**' ) for split_batches in [True, False]: for dispatch_batches in [True, False]: lowerCAmelCase = Accelerator(split_batches=_UpperCAmelCase , dispatch_batches=_UpperCAmelCase ) if accelerator.is_local_main_process: print(F'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99' ) test_torch_metrics(_UpperCAmelCase , 99 ) accelerator.state._reset_state() if accelerator.is_local_main_process: print('**Test last batch is not dropped when perfectly divisible**' ) lowerCAmelCase = Accelerator() test_torch_metrics(_UpperCAmelCase , 512 ) accelerator.state._reset_state() def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
309
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class a ( unittest.TestCase ): def __init__( self , _snake_case , _snake_case=7 , _snake_case=3 , _snake_case=18 , _snake_case=30 , _snake_case=4_00 , _snake_case=True , _snake_case=None , _snake_case=True , _snake_case=None , _snake_case=True , ): """simple docstring""" lowerCAmelCase = size if size is not None else {'shortest_edge': 20} lowerCAmelCase = crop_size if crop_size is not None else {'height': 18, 'width': 18} lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = num_channels lowerCAmelCase = image_size lowerCAmelCase = min_resolution lowerCAmelCase = max_resolution lowerCAmelCase = do_resize lowerCAmelCase = size lowerCAmelCase = do_center_crop lowerCAmelCase = crop_size lowerCAmelCase = do_flip_channel_order def UpperCamelCase__ ( self ): """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class a ( a__ , unittest.TestCase ): snake_case__ = MobileViTImageProcessor if is_vision_available() else None def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = MobileViTImageProcessingTester(self ) @property def UpperCamelCase__ ( self ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_snake_case , 'do_resize' ) ) self.assertTrue(hasattr(_snake_case , 'size' ) ) self.assertTrue(hasattr(_snake_case , 'do_center_crop' ) ) self.assertTrue(hasattr(_snake_case , 'center_crop' ) ) self.assertTrue(hasattr(_snake_case , 'do_flip_channel_order' ) ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 20} ) self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} ) lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'shortest_edge': 42} ) self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} ) def UpperCamelCase__ ( self ): """simple docstring""" pass def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case ) for image in image_inputs: self.assertIsInstance(_snake_case , Image.Image ) # Test not batched input lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , numpify=_snake_case ) for image in image_inputs: self.assertIsInstance(_snake_case , np.ndarray ) # Test not batched input lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , torchify=_snake_case ) for image in image_inputs: self.assertIsInstance(_snake_case , torch.Tensor ) # Test not batched input lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , )
309
1
"""simple docstring""" __UpperCamelCase : Tuple = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict ): # Return True if there is node that has not iterated. lowerCAmelCase = [False] * len(_UpperCAmelCase ) lowerCAmelCase = [s] lowerCAmelCase = True while queue: lowerCAmelCase = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(_UpperCAmelCase ) lowerCAmelCase = True lowerCAmelCase = u return visited[t] def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ): lowerCAmelCase = [-1] * (len(_UpperCAmelCase )) lowerCAmelCase = 0 lowerCAmelCase = [] lowerCAmelCase = [i[:] for i in graph] # Record original cut, copy. while bfs(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowerCAmelCase = float('Inf' ) lowerCAmelCase = sink while s != source: # Find the minimum value in select path lowerCAmelCase = min(_UpperCAmelCase , graph[parent[s]][s] ) lowerCAmelCase = parent[s] max_flow += path_flow lowerCAmelCase = sink while v != source: lowerCAmelCase = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow lowerCAmelCase = parent[v] for i in range(len(_UpperCAmelCase ) ): for j in range(len(graph[0] ) ): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j) ) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
309
"""simple docstring""" import unittest from accelerate import debug_launcher from accelerate.test_utils import require_cpu, test_ops, test_script @require_cpu class a ( unittest.TestCase ): def UpperCamelCase__ ( self ): """simple docstring""" debug_launcher(test_script.main ) def UpperCamelCase__ ( self ): """simple docstring""" debug_launcher(test_ops.main )
309
1
"""simple docstring""" from __future__ import annotations from collections import namedtuple from dataclasses import dataclass @dataclass class a : snake_case__ = 42 snake_case__ = None snake_case__ = None __UpperCamelCase : Tuple = namedtuple('''CoinsDistribResult''', '''moves excess''') def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : TreeNode | None ): if root is None: return 0 # Validation def count_nodes(_UpperCAmelCase : TreeNode | None ) -> int: if node is None: return 0 return count_nodes(node.left ) + count_nodes(node.right ) + 1 def count_coins(_UpperCAmelCase : TreeNode | None ) -> int: if node is None: return 0 return count_coins(node.left ) + count_coins(node.right ) + node.data if count_nodes(_UpperCAmelCase ) != count_coins(_UpperCAmelCase ): raise ValueError('The nodes number should be same as the number of coins' ) # Main calculation def get_distrib(_UpperCAmelCase : TreeNode | None ) -> CoinsDistribResult: if node is None: return CoinsDistribResult(0 , 1 ) lowerCAmelCase ,lowerCAmelCase = get_distrib(node.left ) lowerCAmelCase ,lowerCAmelCase = get_distrib(node.right ) lowerCAmelCase = 1 - left_distrib_excess lowerCAmelCase = 1 - right_distrib_excess lowerCAmelCase = ( left_distrib_moves + right_distrib_moves + abs(_UpperCAmelCase ) + abs(_UpperCAmelCase ) ) lowerCAmelCase = node.data - coins_to_left - coins_to_right return CoinsDistribResult(_UpperCAmelCase , _UpperCAmelCase ) return get_distrib(_UpperCAmelCase )[0] if __name__ == "__main__": import doctest doctest.testmod()
309
"""simple docstring""" from __future__ import annotations from decimal import Decimal from numpy import array def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[list[float]] ): lowerCAmelCase = Decimal # Check if the provided matrix has 2 rows and 2 columns # since this implementation only works for 2x2 matrices if len(_UpperCAmelCase ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2: # Calculate the determinant of the matrix lowerCAmelCase = float( d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) ) if determinant == 0: raise ValueError('This matrix has no inverse.' ) # Creates a copy of the matrix with swapped positions of the elements lowerCAmelCase = [[0.0, 0.0], [0.0, 0.0]] lowerCAmelCase ,lowerCAmelCase = matrix[1][1], matrix[0][0] lowerCAmelCase ,lowerCAmelCase = -matrix[1][0], -matrix[0][1] # Calculate the inverse of the matrix return [ [(float(d(_UpperCAmelCase ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix ] elif ( len(_UpperCAmelCase ) == 3 and len(matrix[0] ) == 3 and len(matrix[1] ) == 3 and len(matrix[2] ) == 3 ): # Calculate the determinant of the matrix using Sarrus rule lowerCAmelCase = float( ( (d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] )) + (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] )) + (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] )) ) - ( (d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] )) + (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] )) + (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] )) ) ) if determinant == 0: raise ValueError('This matrix has no inverse.' ) # Creating cofactor matrix lowerCAmelCase = [ [d(0.0 ), d(0.0 ), d(0.0 )], [d(0.0 ), d(0.0 ), d(0.0 )], [d(0.0 ), d(0.0 ), d(0.0 )], ] lowerCAmelCase = (d(matrix[1][1] ) * d(matrix[2][2] )) - ( d(matrix[1][2] ) * d(matrix[2][1] ) ) lowerCAmelCase = -( (d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] )) ) lowerCAmelCase = (d(matrix[1][0] ) * d(matrix[2][1] )) - ( d(matrix[1][1] ) * d(matrix[2][0] ) ) lowerCAmelCase = -( (d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] )) ) lowerCAmelCase = (d(matrix[0][0] ) * d(matrix[2][2] )) - ( d(matrix[0][2] ) * d(matrix[2][0] ) ) lowerCAmelCase = -( (d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] )) ) lowerCAmelCase = (d(matrix[0][1] ) * d(matrix[1][2] )) - ( d(matrix[0][2] ) * d(matrix[1][1] ) ) lowerCAmelCase = -( (d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] )) ) lowerCAmelCase = (d(matrix[0][0] ) * d(matrix[1][1] )) - ( d(matrix[0][1] ) * d(matrix[1][0] ) ) # Transpose the cofactor matrix (Adjoint matrix) lowerCAmelCase = array(_UpperCAmelCase ) for i in range(3 ): for j in range(3 ): lowerCAmelCase = cofactor_matrix[j][i] # Inverse of the matrix using the formula (1/determinant) * adjoint matrix lowerCAmelCase = array(_UpperCAmelCase ) for i in range(3 ): for j in range(3 ): inverse_matrix[i][j] /= d(_UpperCAmelCase ) # Calculate the inverse of the matrix return [[float(d(_UpperCAmelCase ) ) or 0.0 for n in row] for row in inverse_matrix] raise ValueError('Please provide a matrix of size 2x2 or 3x3.' )
309
1
"""simple docstring""" from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType __UpperCamelCase : Any = logging.get_logger(__name__) __UpperCamelCase : str = { '''microsoft/deberta-v2-xlarge''': '''https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json''', '''microsoft/deberta-v2-xxlarge''': '''https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json''', '''microsoft/deberta-v2-xlarge-mnli''': ( '''https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json''' ), '''microsoft/deberta-v2-xxlarge-mnli''': ( '''https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json''' ), } class a ( a__ ): snake_case__ = '''deberta-v2''' def __init__( self , _snake_case=12_81_00 , _snake_case=15_36 , _snake_case=24 , _snake_case=24 , _snake_case=61_44 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=0 , _snake_case=0.02 , _snake_case=1E-7 , _snake_case=False , _snake_case=-1 , _snake_case=0 , _snake_case=True , _snake_case=None , _snake_case=0 , _snake_case="gelu" , **_snake_case , ): """simple docstring""" super().__init__(**_snake_case ) lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_act lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = type_vocab_size lowerCAmelCase = initializer_range lowerCAmelCase = relative_attention lowerCAmelCase = max_relative_positions lowerCAmelCase = pad_token_id lowerCAmelCase = position_biased_input # Backwards compatibility if type(_snake_case ) == str: lowerCAmelCase = [x.strip() for x in pos_att_type.lower().split('|' )] lowerCAmelCase = pos_att_type lowerCAmelCase = vocab_size lowerCAmelCase = layer_norm_eps lowerCAmelCase = kwargs.get('pooler_hidden_size' , _snake_case ) lowerCAmelCase = pooler_dropout lowerCAmelCase = pooler_hidden_act class a ( a__ ): @property def UpperCamelCase__ ( self ): """simple docstring""" if self.task == "multiple-choice": lowerCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'} else: lowerCAmelCase = {0: 'batch', 1: 'sequence'} if self._config.type_vocab_size > 0: return OrderedDict( [('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)] ) else: return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)] ) @property def UpperCamelCase__ ( self ): """simple docstring""" return 12 def UpperCamelCase__ ( self , _snake_case , _snake_case = -1 , _snake_case = -1 , _snake_case = -1 , _snake_case = False , _snake_case = None , _snake_case = 3 , _snake_case = 40 , _snake_case = 40 , _snake_case = None , ): """simple docstring""" lowerCAmelCase = super().generate_dummy_inputs(preprocessor=_snake_case , framework=_snake_case ) if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs: del dummy_inputs["token_type_ids"] return dummy_inputs
309
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCamelCase : Dict = { '''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''], '''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''], '''processing_mctct''': ['''MCTCTProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Tuple = [ '''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MCTCTForCTC''', '''MCTCTModel''', '''MCTCTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys __UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
309
1
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import _LazyModule __UpperCamelCase : Union[str, Any] = {'''tokenization_tapex''': ['''TapexTokenizer''']} if TYPE_CHECKING: from .tokenization_tapex import TapexTokenizer else: import sys __UpperCamelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
309
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_distilbert import DistilBertTokenizer __UpperCamelCase : Dict = logging.get_logger(__name__) __UpperCamelCase : str = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} __UpperCamelCase : Optional[int] = { '''vocab_file''': { '''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''', '''distilbert-base-uncased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt''' ), '''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''', '''distilbert-base-cased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt''' ), '''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''', '''distilbert-base-multilingual-cased''': ( '''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''', '''distilbert-base-uncased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json''' ), '''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''', '''distilbert-base-cased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json''' ), '''distilbert-base-german-cased''': ( '''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json''' ), '''distilbert-base-multilingual-cased''': ( '''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json''' ), }, } __UpperCamelCase : str = { '''distilbert-base-uncased''': 512, '''distilbert-base-uncased-distilled-squad''': 512, '''distilbert-base-cased''': 512, '''distilbert-base-cased-distilled-squad''': 512, '''distilbert-base-german-cased''': 512, '''distilbert-base-multilingual-cased''': 512, } __UpperCamelCase : Any = { '''distilbert-base-uncased''': {'''do_lower_case''': True}, '''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True}, '''distilbert-base-cased''': {'''do_lower_case''': False}, '''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False}, '''distilbert-base-german-cased''': {'''do_lower_case''': False}, '''distilbert-base-multilingual-cased''': {'''do_lower_case''': False}, } class a ( a__ ): snake_case__ = VOCAB_FILES_NAMES snake_case__ = PRETRAINED_VOCAB_FILES_MAP snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case__ = PRETRAINED_INIT_CONFIGURATION snake_case__ = ['''input_ids''', '''attention_mask'''] snake_case__ = DistilBertTokenizer def __init__( self , _snake_case=None , _snake_case=None , _snake_case=True , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case=True , _snake_case=None , **_snake_case , ): """simple docstring""" super().__init__( _snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , tokenize_chinese_chars=_snake_case , strip_accents=_snake_case , **_snake_case , ) lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , _snake_case ) != do_lower_case or normalizer_state.get('strip_accents' , _snake_case ) != strip_accents or normalizer_state.get('handle_chinese_chars' , _snake_case ) != tokenize_chinese_chars ): lowerCAmelCase = getattr(_snake_case , normalizer_state.pop('type' ) ) lowerCAmelCase = do_lower_case lowerCAmelCase = strip_accents lowerCAmelCase = tokenize_chinese_chars lowerCAmelCase = normalizer_class(**_snake_case ) lowerCAmelCase = do_lower_case def UpperCamelCase__ ( self , _snake_case , _snake_case=None ): """simple docstring""" lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCamelCase__ ( self , _snake_case , _snake_case = None ): """simple docstring""" lowerCAmelCase = [self.sep_token_id] lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase__ ( self , _snake_case , _snake_case = None ): """simple docstring""" lowerCAmelCase = self._tokenizer.model.save(_snake_case , name=_snake_case ) return tuple(_snake_case )
309
1
"""simple docstring""" from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging __UpperCamelCase : Optional[int] = logging.get_logger(__name__) class a ( a__ ): snake_case__ = ['''pixel_values'''] def __init__( self , _snake_case = True , _snake_case = 1 / 2_55 , _snake_case = True , _snake_case = 8 , **_snake_case , ): """simple docstring""" super().__init__(**_snake_case ) lowerCAmelCase = do_rescale lowerCAmelCase = rescale_factor lowerCAmelCase = do_pad lowerCAmelCase = pad_size def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case = None , **_snake_case ): """simple docstring""" return rescale(_snake_case , scale=_snake_case , data_format=_snake_case , **_snake_case ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case = None ): """simple docstring""" lowerCAmelCase ,lowerCAmelCase = get_image_size(_snake_case ) lowerCAmelCase = (old_height // size + 1) * size - old_height lowerCAmelCase = (old_width // size + 1) * size - old_width return pad(_snake_case , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=_snake_case ) def UpperCamelCase__ ( self , _snake_case , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = ChannelDimension.FIRST , **_snake_case , ): """simple docstring""" lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCAmelCase = do_pad if do_pad is not None else self.do_pad lowerCAmelCase = pad_size if pad_size is not None else self.pad_size lowerCAmelCase = make_list_of_images(_snake_case ) if not valid_images(_snake_case ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) # All transformations expect numpy arrays. lowerCAmelCase = [to_numpy_array(_snake_case ) for image in images] if do_rescale: lowerCAmelCase = [self.rescale(image=_snake_case , scale=_snake_case ) for image in images] if do_pad: lowerCAmelCase = [self.pad(_snake_case , size=_snake_case ) for image in images] lowerCAmelCase = [to_channel_dimension_format(_snake_case , _snake_case ) for image in images] lowerCAmelCase = {'pixel_values': images} return BatchFeature(data=_snake_case , tensor_type=_snake_case )
309
"""simple docstring""" from __future__ import annotations def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] | None = None ): lowerCAmelCase = word_bank or [] # create a table lowerCAmelCase = len(_UpperCAmelCase ) + 1 lowerCAmelCase = [] for _ in range(_UpperCAmelCase ): table.append([] ) # seed value lowerCAmelCase = [[]] # because empty string has empty combination # iterate through the indices for i in range(_UpperCAmelCase ): # condition if table[i] != []: for word in word_bank: # slice condition if target[i : i + len(_UpperCAmelCase )] == word: lowerCAmelCase = [ [word, *way] for way in table[i] ] # adds the word to every combination the current position holds # now,push that combination to the table[i+len(word)] table[i + len(_UpperCAmelCase )] += new_combinations # combinations are in reverse order so reverse for better output for combination in table[len(_UpperCAmelCase )]: combination.reverse() return table[len(_UpperCAmelCase )] if __name__ == "__main__": print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa'''])) print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t'''])) print( all_construct( '''hexagonosaurus''', ['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''], ) )
309
1
"""simple docstring""" import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class a ( a__ ): snake_case__ = (DPMSolverSDEScheduler,) snake_case__ = 1_0 def UpperCamelCase__ ( self , **_snake_case ): """simple docstring""" lowerCAmelCase = { 'num_train_timesteps': 11_00, 'beta_start': 0.0_001, 'beta_end': 0.02, 'beta_schedule': 'linear', 'noise_sampler_seed': 0, } config.update(**_snake_case ) return config def UpperCamelCase__ ( self ): """simple docstring""" for timesteps in [10, 50, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ): self.check_over_configs(beta_start=_snake_case , beta_end=_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**_snake_case ) scheduler.set_timesteps(self.num_inference_steps ) lowerCAmelCase = self.dummy_model() lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCAmelCase = sample.to(_snake_case ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase = scheduler.scale_model_input(_snake_case , _snake_case ) lowerCAmelCase = model(_snake_case , _snake_case ) lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case ) lowerCAmelCase = output.prev_sample lowerCAmelCase = torch.sum(torch.abs(_snake_case ) ) lowerCAmelCase = torch.mean(torch.abs(_snake_case ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.47_821_044_921_875 ) < 1E-2 assert abs(result_mean.item() - 0.2_178_705_964_565_277 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59_352_111_816_406 ) < 1E-2 assert abs(result_mean.item() - 0.22_342_906_892_299_652 ) < 1E-3 else: assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1E-2 assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1E-3 def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config(prediction_type='v_prediction' ) lowerCAmelCase = scheduler_class(**_snake_case ) scheduler.set_timesteps(self.num_inference_steps ) lowerCAmelCase = self.dummy_model() lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCAmelCase = sample.to(_snake_case ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase = scheduler.scale_model_input(_snake_case , _snake_case ) lowerCAmelCase = model(_snake_case , _snake_case ) lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case ) lowerCAmelCase = output.prev_sample lowerCAmelCase = torch.sum(torch.abs(_snake_case ) ) lowerCAmelCase = torch.mean(torch.abs(_snake_case ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 124.77_149_200_439_453 ) < 1E-2 assert abs(result_mean.item() - 0.16_226_289_014_816_284 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 128.1_663_360_595_703 ) < 1E-2 assert abs(result_mean.item() - 0.16_688_326_001_167_297 ) < 1E-3 else: assert abs(result_sum.item() - 119.8_487_548_828_125 ) < 1E-2 assert abs(result_mean.item() - 0.1_560_530_662_536_621 ) < 1E-3 def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**_snake_case ) scheduler.set_timesteps(self.num_inference_steps , device=_snake_case ) lowerCAmelCase = self.dummy_model() lowerCAmelCase = self.dummy_sample_deter.to(_snake_case ) * scheduler.init_noise_sigma for t in scheduler.timesteps: lowerCAmelCase = scheduler.scale_model_input(_snake_case , _snake_case ) lowerCAmelCase = model(_snake_case , _snake_case ) lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case ) lowerCAmelCase = output.prev_sample lowerCAmelCase = torch.sum(torch.abs(_snake_case ) ) lowerCAmelCase = torch.mean(torch.abs(_snake_case ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.46_957_397_460_938 ) < 1E-2 assert abs(result_mean.item() - 0.21_805_934_607_982_635 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59_353_637_695_312 ) < 1E-2 assert abs(result_mean.item() - 0.22_342_908_382_415_771 ) < 1E-3 else: assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1E-2 assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1E-3 def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**_snake_case , use_karras_sigmas=_snake_case ) scheduler.set_timesteps(self.num_inference_steps , device=_snake_case ) lowerCAmelCase = self.dummy_model() lowerCAmelCase = self.dummy_sample_deter.to(_snake_case ) * scheduler.init_noise_sigma lowerCAmelCase = sample.to(_snake_case ) for t in scheduler.timesteps: lowerCAmelCase = scheduler.scale_model_input(_snake_case , _snake_case ) lowerCAmelCase = model(_snake_case , _snake_case ) lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case ) lowerCAmelCase = output.prev_sample lowerCAmelCase = torch.sum(torch.abs(_snake_case ) ) lowerCAmelCase = torch.mean(torch.abs(_snake_case ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 176.66_974_135_742_188 ) < 1E-2 assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 177.63_653_564_453_125 ) < 1E-2 assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2 else: assert abs(result_sum.item() - 170.3_135_223_388_672 ) < 1E-2 assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
309
"""simple docstring""" import re def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ): if len(re.findall('[ATCG]' , _UpperCAmelCase ) ) != len(_UpperCAmelCase ): raise ValueError('Invalid Strand' ) return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) ) if __name__ == "__main__": import doctest doctest.testmod()
309
1
"""simple docstring""" from __future__ import annotations import math def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : list ): if len(_UpperCAmelCase ) != 2 or len(a[0] ) != 2 or len(_UpperCAmelCase ) != 2 or len(b[0] ) != 2: raise Exception('Matrices are not 2x2' ) lowerCAmelCase = [ [a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]], [a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]], ] return new_matrix def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : list ): return [ [matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )] for row in range(len(_UpperCAmelCase ) ) ] def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : list ): return [ [matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )] for row in range(len(_UpperCAmelCase ) ) ] def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list ): if len(_UpperCAmelCase ) % 2 != 0 or len(a[0] ) % 2 != 0: raise Exception('Odd matrices are not supported!' ) lowerCAmelCase = len(_UpperCAmelCase ) lowerCAmelCase = matrix_length // 2 lowerCAmelCase = [[a[i][j] for j in range(_UpperCAmelCase , _UpperCAmelCase )] for i in range(_UpperCAmelCase )] lowerCAmelCase = [ [a[i][j] for j in range(_UpperCAmelCase , _UpperCAmelCase )] for i in range(_UpperCAmelCase , _UpperCAmelCase ) ] lowerCAmelCase = [[a[i][j] for j in range(_UpperCAmelCase )] for i in range(_UpperCAmelCase )] lowerCAmelCase = [[a[i][j] for j in range(_UpperCAmelCase )] for i in range(_UpperCAmelCase , _UpperCAmelCase )] return top_left, top_right, bot_left, bot_right def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list ): return len(_UpperCAmelCase ), len(matrix[0] ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list ): print('\n'.join(str(_UpperCAmelCase ) for line in matrix ) ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : list ): if matrix_dimensions(_UpperCAmelCase ) == (2, 2): return default_matrix_multiplication(_UpperCAmelCase , _UpperCAmelCase ) lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = split_matrix(_UpperCAmelCase ) lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = split_matrix(_UpperCAmelCase ) lowerCAmelCase = actual_strassen(_UpperCAmelCase , matrix_subtraction(_UpperCAmelCase , _UpperCAmelCase ) ) lowerCAmelCase = actual_strassen(matrix_addition(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase ) lowerCAmelCase = actual_strassen(matrix_addition(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase ) lowerCAmelCase = actual_strassen(_UpperCAmelCase , matrix_subtraction(_UpperCAmelCase , _UpperCAmelCase ) ) lowerCAmelCase = actual_strassen(matrix_addition(_UpperCAmelCase , _UpperCAmelCase ) , matrix_addition(_UpperCAmelCase , _UpperCAmelCase ) ) lowerCAmelCase = actual_strassen(matrix_subtraction(_UpperCAmelCase , _UpperCAmelCase ) , matrix_addition(_UpperCAmelCase , _UpperCAmelCase ) ) lowerCAmelCase = actual_strassen(matrix_subtraction(_UpperCAmelCase , _UpperCAmelCase ) , matrix_addition(_UpperCAmelCase , _UpperCAmelCase ) ) lowerCAmelCase = matrix_addition(matrix_subtraction(matrix_addition(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase ) , _UpperCAmelCase ) lowerCAmelCase = matrix_addition(_UpperCAmelCase , _UpperCAmelCase ) lowerCAmelCase = matrix_addition(_UpperCAmelCase , _UpperCAmelCase ) lowerCAmelCase = matrix_subtraction(matrix_subtraction(matrix_addition(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase ) , _UpperCAmelCase ) # construct the new matrix from our 4 quadrants lowerCAmelCase = [] for i in range(len(_UpperCAmelCase ) ): new_matrix.append(top_left[i] + top_right[i] ) for i in range(len(_UpperCAmelCase ) ): new_matrix.append(bot_left[i] + bot_right[i] ) return new_matrix def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : list ): if matrix_dimensions(_UpperCAmelCase )[1] != matrix_dimensions(_UpperCAmelCase )[0]: lowerCAmelCase = ( 'Unable to multiply these matrices, please check the dimensions.\n' F'Matrix A: {matrixa}\n' F'Matrix B: {matrixa}' ) raise Exception(_UpperCAmelCase ) lowerCAmelCase = matrix_dimensions(_UpperCAmelCase ) lowerCAmelCase = matrix_dimensions(_UpperCAmelCase ) if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]: return [matrixa, matrixa] lowerCAmelCase = max(*_UpperCAmelCase , *_UpperCAmelCase ) lowerCAmelCase = int(math.pow(2 , math.ceil(math.loga(_UpperCAmelCase ) ) ) ) lowerCAmelCase = matrixa lowerCAmelCase = matrixa # Adding zeros to the matrices so that the arrays dimensions are the same and also # power of 2 for i in range(0 , _UpperCAmelCase ): if i < dimensiona[0]: for _ in range(dimensiona[1] , _UpperCAmelCase ): new_matrixa[i].append(0 ) else: new_matrixa.append([0] * maxim ) if i < dimensiona[0]: for _ in range(dimensiona[1] , _UpperCAmelCase ): new_matrixa[i].append(0 ) else: new_matrixa.append([0] * maxim ) lowerCAmelCase = actual_strassen(_UpperCAmelCase , _UpperCAmelCase ) # Removing the additional zeros for i in range(0 , _UpperCAmelCase ): if i < dimensiona[0]: for _ in range(dimensiona[1] , _UpperCAmelCase ): final_matrix[i].pop() else: final_matrix.pop() return final_matrix if __name__ == "__main__": __UpperCamelCase : Optional[int] = [ [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 2, 3, 1], ] __UpperCamelCase : Union[str, Any] = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]] print(strassen(matrixa, matrixa))
309
"""simple docstring""" import numpy as np import skfuzzy as fuzz if __name__ == "__main__": # Create universe of discourse in Python using linspace () __UpperCamelCase : List[Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False) # Create two fuzzy sets by defining any membership function # (trapmf(), gbellmf(), gaussmf(), etc). __UpperCamelCase : str = [0, 25, 50] __UpperCamelCase : int = [25, 50, 75] __UpperCamelCase : str = fuzz.membership.trimf(X, abca) __UpperCamelCase : Tuple = fuzz.membership.trimf(X, abca) # Compute the different operations using inbuilt functions. __UpperCamelCase : Dict = np.ones(75) __UpperCamelCase : str = np.zeros((75,)) # 1. Union = max(µA(x), µB(x)) __UpperCamelCase : Optional[Any] = fuzz.fuzzy_or(X, young, X, middle_aged)[1] # 2. Intersection = min(µA(x), µB(x)) __UpperCamelCase : Dict = fuzz.fuzzy_and(X, young, X, middle_aged)[1] # 3. Complement (A) = (1- min(µA(x)) __UpperCamelCase : Dict = fuzz.fuzzy_not(young) # 4. Difference (A/B) = min(µA(x),(1- µB(x))) __UpperCamelCase : List[str] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1] # 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))] __UpperCamelCase : List[str] = young + middle_aged - (young * middle_aged) # 6. Algebraic Product = (µA(x) * µB(x)) __UpperCamelCase : Tuple = young * middle_aged # 7. Bounded Sum = min[1,(µA(x), µB(x))] __UpperCamelCase : Union[str, Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1] # 8. Bounded difference = min[0,(µA(x), µB(x))] __UpperCamelCase : Dict = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1] # max-min composition # max-product composition # Plot each set A, set B and each operation result using plot() and subplot(). from matplotlib import pyplot as plt plt.figure() plt.subplot(4, 3, 1) plt.plot(X, young) plt.title('''Young''') plt.grid(True) plt.subplot(4, 3, 2) plt.plot(X, middle_aged) plt.title('''Middle aged''') plt.grid(True) plt.subplot(4, 3, 3) plt.plot(X, union) plt.title('''union''') plt.grid(True) plt.subplot(4, 3, 4) plt.plot(X, intersection) plt.title('''intersection''') plt.grid(True) plt.subplot(4, 3, 5) plt.plot(X, complement_a) plt.title('''complement_a''') plt.grid(True) plt.subplot(4, 3, 6) plt.plot(X, difference) plt.title('''difference a/b''') plt.grid(True) plt.subplot(4, 3, 7) plt.plot(X, alg_sum) plt.title('''alg_sum''') plt.grid(True) plt.subplot(4, 3, 8) plt.plot(X, alg_product) plt.title('''alg_product''') plt.grid(True) plt.subplot(4, 3, 9) plt.plot(X, bdd_sum) plt.title('''bdd_sum''') plt.grid(True) plt.subplot(4, 3, 10) plt.plot(X, bdd_difference) plt.title('''bdd_difference''') plt.grid(True) plt.subplots_adjust(hspace=0.5) plt.show()
309
1
"""simple docstring""" import argparse import collections import json import os import re import string import sys import numpy as np __UpperCamelCase : Optional[int] = re.compile(R'''\b(a|an|the)\b''', re.UNICODE) __UpperCamelCase : Any = None def _SCREAMING_SNAKE_CASE (): lowerCAmelCase = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.' ) parser.add_argument('data_file' , metavar='data.json' , help='Input data JSON file.' ) parser.add_argument('pred_file' , metavar='pred.json' , help='Model predictions.' ) parser.add_argument( '--out-file' , '-o' , metavar='eval.json' , help='Write accuracy metrics to file (default is stdout).' ) parser.add_argument( '--na-prob-file' , '-n' , metavar='na_prob.json' , help='Model estimates of probability of no answer.' ) parser.add_argument( '--na-prob-thresh' , '-t' , type=_UpperCAmelCase , default=1.0 , help='Predict "" if no-answer probability exceeds this (default = 1.0).' , ) parser.add_argument( '--out-image-dir' , '-p' , metavar='out_images' , default=_UpperCAmelCase , help='Save precision-recall curves to directory.' ) parser.add_argument('--verbose' , '-v' , action='store_true' ) if len(sys.argv ) == 1: parser.print_help() sys.exit(1 ) return parser.parse_args() def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, Any] ): lowerCAmelCase = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: lowerCAmelCase = bool(qa['answers']['text'] ) return qid_to_has_ans def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[int] ): def remove_articles(_UpperCAmelCase : Optional[Any] ): return ARTICLES_REGEX.sub(' ' , _UpperCAmelCase ) def white_space_fix(_UpperCAmelCase : Dict ): return " ".join(text.split() ) def remove_punc(_UpperCAmelCase : Optional[Any] ): lowerCAmelCase = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(_UpperCAmelCase : Union[str, Any] ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(_UpperCAmelCase ) ) ) ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any ): if not s: return [] return normalize_answer(_UpperCAmelCase ).split() def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Dict , _UpperCAmelCase : int ): return int(normalize_answer(_UpperCAmelCase ) == normalize_answer(_UpperCAmelCase ) ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[int] ): lowerCAmelCase = get_tokens(_UpperCAmelCase ) lowerCAmelCase = get_tokens(_UpperCAmelCase ) lowerCAmelCase = collections.Counter(_UpperCAmelCase ) & collections.Counter(_UpperCAmelCase ) lowerCAmelCase = sum(common.values() ) if len(_UpperCAmelCase ) == 0 or len(_UpperCAmelCase ) == 0: # If either is no-answer, then F1 is 1 if they agree, 0 otherwise return int(gold_toks == pred_toks ) if num_same == 0: return 0 lowerCAmelCase = 1.0 * num_same / len(_UpperCAmelCase ) lowerCAmelCase = 1.0 * num_same / len(_UpperCAmelCase ) lowerCAmelCase = (2 * precision * recall) / (precision + recall) return fa def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple ): lowerCAmelCase = {} lowerCAmelCase = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: lowerCAmelCase = qa['id'] lowerCAmelCase = [t for t in qa['answers']['text'] if normalize_answer(_UpperCAmelCase )] if not gold_answers: # For unanswerable questions, only correct answer is empty string lowerCAmelCase = [''] if qid not in preds: print(F'Missing prediction for {qid}' ) continue lowerCAmelCase = preds[qid] # Take max over all gold answers lowerCAmelCase = max(compute_exact(_UpperCAmelCase , _UpperCAmelCase ) for a in gold_answers ) lowerCAmelCase = max(compute_fa(_UpperCAmelCase , _UpperCAmelCase ) for a in gold_answers ) return exact_scores, fa_scores def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict ): lowerCAmelCase = {} for qid, s in scores.items(): lowerCAmelCase = na_probs[qid] > na_prob_thresh if pred_na: lowerCAmelCase = float(not qid_to_has_ans[qid] ) else: lowerCAmelCase = s return new_scores def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict=None ): if not qid_list: lowerCAmelCase = len(_UpperCAmelCase ) return collections.OrderedDict( [ ('exact', 100.0 * sum(exact_scores.values() ) / total), ('f1', 100.0 * sum(fa_scores.values() ) / total), ('total', total), ] ) else: lowerCAmelCase = len(_UpperCAmelCase ) return collections.OrderedDict( [ ('exact', 100.0 * sum(exact_scores[k] for k in qid_list ) / total), ('f1', 100.0 * sum(fa_scores[k] for k in qid_list ) / total), ('total', total), ] ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict ): for k in new_eval: lowerCAmelCase = new_eval[k] def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : int ): plt.step(_UpperCAmelCase , _UpperCAmelCase , color='b' , alpha=0.2 , where='post' ) plt.fill_between(_UpperCAmelCase , _UpperCAmelCase , step='post' , alpha=0.2 , color='b' ) plt.xlabel('Recall' ) plt.ylabel('Precision' ) plt.xlim([0.0, 1.05] ) plt.ylim([0.0, 1.05] ) plt.title(_UpperCAmelCase ) plt.savefig(_UpperCAmelCase ) plt.clf() def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str=None , _UpperCAmelCase : Optional[Any]=None ): lowerCAmelCase = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : na_probs[k] ) lowerCAmelCase = 0.0 lowerCAmelCase = 1.0 lowerCAmelCase = 0.0 lowerCAmelCase = [1.0] lowerCAmelCase = [0.0] lowerCAmelCase = 0.0 for i, qid in enumerate(_UpperCAmelCase ): if qid_to_has_ans[qid]: true_pos += scores[qid] lowerCAmelCase = true_pos / float(i + 1 ) lowerCAmelCase = true_pos / float(_UpperCAmelCase ) if i == len(_UpperCAmelCase ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]: # i.e., if we can put a threshold after this point avg_prec += cur_p * (cur_r - recalls[-1]) precisions.append(_UpperCAmelCase ) recalls.append(_UpperCAmelCase ) if out_image: plot_pr_curve(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) return {"ap": 100.0 * avg_prec} def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] ): if out_image_dir and not os.path.exists(_UpperCAmelCase ): os.makedirs(_UpperCAmelCase ) lowerCAmelCase = sum(1 for v in qid_to_has_ans.values() if v ) if num_true_pos == 0: return lowerCAmelCase = make_precision_recall_eval( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , out_image=os.path.join(_UpperCAmelCase , 'pr_exact.png' ) , title='Precision-Recall curve for Exact Match score' , ) lowerCAmelCase = make_precision_recall_eval( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , out_image=os.path.join(_UpperCAmelCase , 'pr_f1.png' ) , title='Precision-Recall curve for F1 score' , ) lowerCAmelCase = {k: float(_UpperCAmelCase ) for k, v in qid_to_has_ans.items()} lowerCAmelCase = make_precision_recall_eval( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , out_image=os.path.join(_UpperCAmelCase , 'pr_oracle.png' ) , title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)' , ) merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'pr_exact' ) merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'pr_f1' ) merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'pr_oracle' ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Dict , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] ): if not qid_list: return lowerCAmelCase = [na_probs[k] for k in qid_list] lowerCAmelCase = np.ones_like(_UpperCAmelCase ) / float(len(_UpperCAmelCase ) ) plt.hist(_UpperCAmelCase , weights=_UpperCAmelCase , bins=20 , range=(0.0, 1.0) ) plt.xlabel('Model probability of no-answer' ) plt.ylabel('Proportion of dataset' ) plt.title(F'Histogram of no-answer probability: {name}' ) plt.savefig(os.path.join(_UpperCAmelCase , F'na_prob_hist_{name}.png' ) ) plt.clf() def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : List[str] ): lowerCAmelCase = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] ) lowerCAmelCase = num_no_ans lowerCAmelCase = cur_score lowerCAmelCase = 0.0 lowerCAmelCase = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : na_probs[k] ) for i, qid in enumerate(_UpperCAmelCase ): if qid not in scores: continue if qid_to_has_ans[qid]: lowerCAmelCase = scores[qid] else: if preds[qid]: lowerCAmelCase = -1 else: lowerCAmelCase = 0 cur_score += diff if cur_score > best_score: lowerCAmelCase = cur_score lowerCAmelCase = na_probs[qid] return 100.0 * best_score / len(_UpperCAmelCase ), best_thresh def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Any ): lowerCAmelCase ,lowerCAmelCase = find_best_thresh(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) lowerCAmelCase ,lowerCAmelCase = find_best_thresh(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) lowerCAmelCase = best_exact lowerCAmelCase = exact_thresh lowerCAmelCase = best_fa lowerCAmelCase = fa_thresh def _SCREAMING_SNAKE_CASE (): with open(OPTS.data_file ) as f: lowerCAmelCase = json.load(_UpperCAmelCase ) lowerCAmelCase = dataset_json['data'] with open(OPTS.pred_file ) as f: lowerCAmelCase = json.load(_UpperCAmelCase ) if OPTS.na_prob_file: with open(OPTS.na_prob_file ) as f: lowerCAmelCase = json.load(_UpperCAmelCase ) else: lowerCAmelCase = {k: 0.0 for k in preds} lowerCAmelCase = make_qid_to_has_ans(_UpperCAmelCase ) # maps qid to True/False lowerCAmelCase = [k for k, v in qid_to_has_ans.items() if v] lowerCAmelCase = [k for k, v in qid_to_has_ans.items() if not v] lowerCAmelCase ,lowerCAmelCase = get_raw_scores(_UpperCAmelCase , _UpperCAmelCase ) lowerCAmelCase = apply_no_ans_threshold(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , OPTS.na_prob_thresh ) lowerCAmelCase = apply_no_ans_threshold(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , OPTS.na_prob_thresh ) lowerCAmelCase = make_eval_dict(_UpperCAmelCase , _UpperCAmelCase ) if has_ans_qids: lowerCAmelCase = make_eval_dict(_UpperCAmelCase , _UpperCAmelCase , qid_list=_UpperCAmelCase ) merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'HasAns' ) if no_ans_qids: lowerCAmelCase = make_eval_dict(_UpperCAmelCase , _UpperCAmelCase , qid_list=_UpperCAmelCase ) merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'NoAns' ) if OPTS.na_prob_file: find_all_best_thresh(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) if OPTS.na_prob_file and OPTS.out_image_dir: run_precision_recall_analysis(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , OPTS.out_image_dir ) histogram_na_prob(_UpperCAmelCase , _UpperCAmelCase , OPTS.out_image_dir , 'hasAns' ) histogram_na_prob(_UpperCAmelCase , _UpperCAmelCase , OPTS.out_image_dir , 'noAns' ) if OPTS.out_file: with open(OPTS.out_file , 'w' ) as f: json.dump(_UpperCAmelCase , _UpperCAmelCase ) else: print(json.dumps(_UpperCAmelCase , indent=2 ) ) if __name__ == "__main__": __UpperCamelCase : Dict = parse_args() if OPTS.out_image_dir: import matplotlib matplotlib.use('''Agg''') import matplotlib.pyplot as plt main()
309
"""simple docstring""" def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[int] , _UpperCAmelCase : str ): lowerCAmelCase = int(_UpperCAmelCase ) # Initialize Result lowerCAmelCase = [] # Traverse through all denomination for denomination in reversed(_UpperCAmelCase ): # Find denominations while int(_UpperCAmelCase ) >= int(_UpperCAmelCase ): total_value -= int(_UpperCAmelCase ) answer.append(_UpperCAmelCase ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": __UpperCamelCase : Any = [] __UpperCamelCase : List[Any] = '''0''' if ( input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower() == "y" ): __UpperCamelCase : Any = int(input('''Enter the number of denominations you want to add: ''').strip()) for i in range(0, n): denominations.append(int(input(f'''Denomination {i}: ''').strip())) __UpperCamelCase : int = input('''Enter the change you want to make in Indian Currency: ''').strip() else: # All denominations of Indian Currency if user does not enter __UpperCamelCase : List[str] = [1, 2, 5, 10, 20, 50, 100, 500, 2000] __UpperCamelCase : Any = input('''Enter the change you want to make: ''').strip() if int(value) == 0 or int(value) < 0: print('''The total value cannot be zero or negative.''') else: print(f'''Following is minimal change for {value}: ''') __UpperCamelCase : List[str] = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=''' ''')
309
1
"""simple docstring""" import argparse import os import torch from transformers.utils import WEIGHTS_NAME __UpperCamelCase : int = ['''small''', '''medium''', '''large'''] __UpperCamelCase : str = '''lm_head.decoder.weight''' __UpperCamelCase : Dict = '''lm_head.weight''' def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ): lowerCAmelCase = torch.load(_UpperCAmelCase ) lowerCAmelCase = d.pop(_UpperCAmelCase ) os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase ) torch.save(_UpperCAmelCase , os.path.join(_UpperCAmelCase , _UpperCAmelCase ) ) if __name__ == "__main__": __UpperCamelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument('''--dialogpt_path''', default='''.''', type=str) __UpperCamelCase : Optional[int] = parser.parse_args() for MODEL in DIALOGPT_MODELS: __UpperCamelCase : Dict = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''') __UpperCamelCase : str = f'''./DialoGPT-{MODEL}''' convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
309
"""simple docstring""" from __future__ import annotations import unittest from transformers import EsmConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers.models.esm.modeling_tf_esm import ( TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, TFEsmModel, ) class a : def __init__( self , _snake_case , ): """simple docstring""" lowerCAmelCase = parent lowerCAmelCase = 13 lowerCAmelCase = 7 lowerCAmelCase = True lowerCAmelCase = True lowerCAmelCase = True lowerCAmelCase = 99 lowerCAmelCase = 32 lowerCAmelCase = 2 lowerCAmelCase = 4 lowerCAmelCase = 37 lowerCAmelCase = 'gelu' lowerCAmelCase = 0.1 lowerCAmelCase = 0.1 lowerCAmelCase = 5_12 lowerCAmelCase = 16 lowerCAmelCase = 2 lowerCAmelCase = 0.02 lowerCAmelCase = 3 lowerCAmelCase = 4 lowerCAmelCase = None def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase = None if self.use_input_mask: lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase = None lowerCAmelCase = None lowerCAmelCase = None if self.use_labels: lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase = EsmConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase__ ( self ): """simple docstring""" ( ( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) , ) = self.prepare_config_and_inputs() lowerCAmelCase = True lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = TFEsmModel(config=_snake_case ) lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask} lowerCAmelCase = model(_snake_case ) lowerCAmelCase = [input_ids, input_mask] lowerCAmelCase = model(_snake_case ) lowerCAmelCase = model(_snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ): """simple docstring""" lowerCAmelCase = True lowerCAmelCase = TFEsmModel(config=_snake_case ) lowerCAmelCase = { 'input_ids': input_ids, 'attention_mask': input_mask, 'encoder_hidden_states': encoder_hidden_states, 'encoder_attention_mask': encoder_attention_mask, } lowerCAmelCase = model(_snake_case ) lowerCAmelCase = [input_ids, input_mask] lowerCAmelCase = model(_snake_case , encoder_hidden_states=_snake_case ) # Also check the case where encoder outputs are not passed lowerCAmelCase = model(_snake_case , attention_mask=_snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = TFEsmForMaskedLM(config=_snake_case ) lowerCAmelCase = model([input_ids, input_mask] ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = self.num_labels lowerCAmelCase = TFEsmForTokenClassification(config=_snake_case ) lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask} lowerCAmelCase = model(_snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.prepare_config_and_inputs() ( ( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) , ) = config_and_inputs lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_tf class a ( a__ , a__ , unittest.TestCase ): snake_case__ = ( ( TFEsmModel, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, ) if is_tf_available() else () ) snake_case__ = ( { '''feature-extraction''': TFEsmModel, '''fill-mask''': TFEsmForMaskedLM, '''text-classification''': TFEsmForSequenceClassification, '''token-classification''': TFEsmForTokenClassification, '''zero-shot''': TFEsmForSequenceClassification, } if is_tf_available() else {} ) snake_case__ = False snake_case__ = False def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = TFEsmModelTester(self ) lowerCAmelCase = ConfigTester(self , config_class=_snake_case , hidden_size=37 ) def UpperCamelCase__ ( self ): """simple docstring""" self.config_tester.run_common_tests() def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_snake_case ) @slow def UpperCamelCase__ ( self ): """simple docstring""" for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase = TFEsmModel.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) @unittest.skip('Protein models do not support embedding resizing.' ) def UpperCamelCase__ ( self ): """simple docstring""" pass @unittest.skip('Protein models do not support embedding resizing.' ) def UpperCamelCase__ ( self ): """simple docstring""" pass def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase = model_class(_snake_case ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class is TFEsmForMaskedLM: # Output embedding test differs from the main test because they're a matrix, not a layer lowerCAmelCase = model.get_bias() assert isinstance(_snake_case , _snake_case ) for k, v in name.items(): assert isinstance(_snake_case , tf.Variable ) else: lowerCAmelCase = model.get_output_embeddings() assert x is None lowerCAmelCase = model.get_bias() assert name is None @require_tf class a ( unittest.TestCase ): @slow def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' ) lowerCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] ) lowerCAmelCase = model(_snake_case )[0] lowerCAmelCase = [1, 6, 33] self.assertEqual(list(output.numpy().shape ) , _snake_case ) # compare the actual values for a slice. lowerCAmelCase = tf.constant( [ [ [8.921_518, -10.589_814, -6.4_671_307], [-6.3_967_156, -13.911_377, -1.1_211_915], [-7.781_247, -13.951_557, -3.740_592], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) ) @slow def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' ) lowerCAmelCase = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) lowerCAmelCase = model(_snake_case )[0] # compare the actual values for a slice. lowerCAmelCase = tf.constant( [ [ [0.14_443_092, 0.54_125_327, 0.3_247_739], [0.30_340_484, 0.00_526_676, 0.31_077_722], [0.32_278_043, -0.24_987_096, 0.3_414_628], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
309
1
"""simple docstring""" from __future__ import annotations __UpperCamelCase : int = '''#''' class a : def __init__( self ): """simple docstring""" lowerCAmelCase = {} def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" lowerCAmelCase = self._trie for char in text: if char not in trie: lowerCAmelCase = {} lowerCAmelCase = trie[char] lowerCAmelCase = True def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" lowerCAmelCase = self._trie for char in prefix: if char in trie: lowerCAmelCase = trie[char] else: return [] return self._elements(_snake_case ) def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" lowerCAmelCase = [] for c, v in d.items(): lowerCAmelCase = [' '] if c == END else [(c + s) for s in self._elements(_snake_case )] result.extend(_snake_case ) return tuple(_snake_case ) __UpperCamelCase : Dict = Trie() __UpperCamelCase : List[str] = ('''depart''', '''detergent''', '''daring''', '''dog''', '''deer''', '''deal''') for word in words: trie.insert_word(word) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ): lowerCAmelCase = trie.find_word(_UpperCAmelCase ) return tuple(string + word for word in suffixes ) def _SCREAMING_SNAKE_CASE (): print(autocomplete_using_trie('de' ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
309
"""simple docstring""" import io import os import unicodedata from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) __UpperCamelCase : Dict = '''▁''' __UpperCamelCase : Optional[int] = {'''vocab_file''': '''vocab.txt''', '''sentencepiece_model_ckpt''': '''sentencepiece.bpe.model'''} __UpperCamelCase : str = { '''sentencepiece_model_file''': '''sentencepiece.bpe.model''', '''vocab_file''': '''vocab.txt''', } __UpperCamelCase : Tuple = { '''vocab_file''': { '''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''', '''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''', }, '''sentencepiece_model_file''': { '''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''', '''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''', }, } __UpperCamelCase : Optional[Any] = { '''ernie-m-base''': 514, '''ernie-m-large''': 514, } __UpperCamelCase : str = { '''ernie-m-base''': {'''do_lower_case''': False}, '''ernie-m-large''': {'''do_lower_case''': False}, } class a ( a__ ): snake_case__ = ["input_ids"] snake_case__ = VOCAB_FILES_NAMES snake_case__ = PRETRAINED_INIT_CONFIGURATION snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case__ = PRETRAINED_VOCAB_FILES_MAP snake_case__ = RESOURCE_FILES_NAMES def __init__( self , _snake_case , _snake_case=None , _snake_case=False , _snake_case="utf8" , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case = None , **_snake_case , ): """simple docstring""" lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , vocab_file=_snake_case , encoding=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , ) lowerCAmelCase = do_lower_case lowerCAmelCase = sentencepiece_model_ckpt lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_snake_case ) # to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning if vocab_file is not None: lowerCAmelCase = self.load_vocab(filepath=_snake_case ) else: lowerCAmelCase = {self.sp_model.id_to_piece(_snake_case ): id for id in range(self.sp_model.get_piece_size() )} lowerCAmelCase = {v: k for k, v in self.vocab.items()} def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" if text is None: return None lowerCAmelCase = self.tokenize(_snake_case ) lowerCAmelCase ,lowerCAmelCase = '', [] for i, ch in enumerate(_snake_case ): if ch in self.SP_CHAR_MAPPING: lowerCAmelCase = self.SP_CHAR_MAPPING.get(_snake_case ) else: lowerCAmelCase = unicodedata.normalize('NFKC' , _snake_case ) if self.is_whitespace(_snake_case ): continue normalized_text += ch char_mapping.extend([i] * len(_snake_case ) ) lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = normalized_text, [], 0 if self.do_lower_case: lowerCAmelCase = text.lower() for token in split_tokens: if token[:1] == "▁": lowerCAmelCase = token[1:] lowerCAmelCase = text[offset:].index(_snake_case ) + offset lowerCAmelCase = start + len(_snake_case ) token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) ) lowerCAmelCase = end return token_mapping @property def UpperCamelCase__ ( self ): """simple docstring""" return len(self.vocab ) def UpperCamelCase__ ( self ): """simple docstring""" return dict(self.vocab , **self.added_tokens_encoder ) def __getstate__( self ): """simple docstring""" lowerCAmelCase = self.__dict__.copy() lowerCAmelCase = None return state def __setstate__( self , _snake_case ): """simple docstring""" lowerCAmelCase = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): lowerCAmelCase = {} lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.sentencepiece_model_ckpt ) def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" return "".join((self.SP_CHAR_MAPPING.get(_snake_case , _snake_case ) for c in text) ) def UpperCamelCase__ ( self , _snake_case , _snake_case=False , _snake_case=64 , _snake_case=0.1 ): """simple docstring""" if self.sp_model_kwargs.get('enable_sampling' ) is True: lowerCAmelCase = True if self.sp_model_kwargs.get('alpha' ) is not None: lowerCAmelCase = self.sp_model_kwargs.get('alpha' ) if self.sp_model_kwargs.get('nbest_size' ) is not None: lowerCAmelCase = self.sp_model_kwargs.get('nbest_size' ) if not enable_sampling: lowerCAmelCase = self.sp_model.EncodeAsPieces(_snake_case ) else: lowerCAmelCase = self.sp_model.SampleEncodeAsPieces(_snake_case , _snake_case , _snake_case ) lowerCAmelCase = [] for pi, piece in enumerate(_snake_case ): if piece == SPIECE_UNDERLINE: if not pieces[pi + 1].startswith(_snake_case ) and pi != 0: new_pieces.append(_snake_case ) continue else: continue lowerCAmelCase = 0 for i, chunk in enumerate(_snake_case ): if chunk == SPIECE_UNDERLINE: continue if self.is_ch_char(_snake_case ) or self.is_punct(_snake_case ): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) new_pieces.append(_snake_case ) lowerCAmelCase = i + 1 elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) lowerCAmelCase = i elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) lowerCAmelCase = i if len(_snake_case ) > lst_i: new_pieces.append(piece[lst_i:] ) return new_pieces def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" lowerCAmelCase = ''.join(_snake_case ).replace(_snake_case , ' ' ).strip() return out_string def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" lowerCAmelCase = self.convert_ids_to_tokens(_snake_case ) lowerCAmelCase = ''.join(_snake_case ).replace(_snake_case , ' ' ).strip() return out_string def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" return self.vocab.get(_snake_case , self.vocab.get(self.unk_token ) ) def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" return self.reverse_vocab.get(_snake_case , self.unk_token ) def UpperCamelCase__ ( self , _snake_case , _snake_case=None ): """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCAmelCase = [self.cls_token_id] lowerCAmelCase = [self.sep_token_id] return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep def UpperCamelCase__ ( self , _snake_case , _snake_case=None ): """simple docstring""" if offset_mapping_a is None: return [(0, 0)] + offset_mapping_a + [(0, 0)] return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)] def UpperCamelCase__ ( self , _snake_case , _snake_case=None , _snake_case=False ): """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( 'You should not supply a second sequence if the provided sequence of ' 'ids is already formatted with special tokens for the model.' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case )) + [1] return [1] + ([0] * len(_snake_case )) + [1] def UpperCamelCase__ ( self , _snake_case , _snake_case = None ): """simple docstring""" if token_ids_a is None: # [CLS] X [SEP] return (len(_snake_case ) + 2) * [0] # [CLS] A [SEP] [SEP] B [SEP] return [0] * (len(_snake_case ) + 1) + [1] * (len(_snake_case ) + 3) def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" if "\u4e00" <= char <= "\u9fff": return True return False def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" if ("a" <= char <= "z") or ("A" <= char <= "Z"): return True return False def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" if char in ",;:.?!~,;:。?!《》【】": return True return False def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" if char == " " or char == "\t" or char == "\n" or char == "\r": return True if len(_snake_case ) == 1: lowerCAmelCase = unicodedata.category(_snake_case ) if cat == "Zs": return True return False def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" lowerCAmelCase = {} with io.open(_snake_case , 'r' , encoding='utf-8' ) as f: for index, line in enumerate(_snake_case ): lowerCAmelCase = line.rstrip('\n' ) lowerCAmelCase = int(_snake_case ) return token_to_idx def UpperCamelCase__ ( self , _snake_case , _snake_case = None ): """simple docstring""" lowerCAmelCase = 0 if os.path.isdir(_snake_case ): lowerCAmelCase = os.path.join( _snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) else: lowerCAmelCase = (filename_prefix + '-' if filename_prefix else '') + save_directory with open(_snake_case , 'w' , encoding='utf-8' ) as writer: for token, token_index in sorted(self.vocab.items() , key=lambda _snake_case : kv[1] ): if index != token_index: logger.warning( F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.' ' Please check that the vocabulary is not corrupted!' ) lowerCAmelCase = token_index writer.write(token + '\n' ) index += 1 lowerCAmelCase = os.path.join(_snake_case , 'sentencepiece.bpe.model' ) with open(_snake_case , 'wb' ) as fi: lowerCAmelCase = self.sp_model.serialized_model_proto() fi.write(_snake_case ) return (vocab_file,)
309
1
"""simple docstring""" import copy import fnmatch import json import os import pickle as pkl import shutil import sys import tarfile import tempfile from collections import OrderedDict from contextlib import contextmanager from functools import partial from hashlib import shaaaa from io import BytesIO from pathlib import Path from urllib.parse import urlparse from zipfile import ZipFile, is_zipfile import cva import numpy as np import requests import wget from filelock import FileLock from PIL import Image from tqdm.auto import tqdm from yaml import Loader, dump, load try: import torch __UpperCamelCase : Optional[int] = True except ImportError: __UpperCamelCase : Any = False try: from torch.hub import _get_torch_home __UpperCamelCase : str = _get_torch_home() except ImportError: __UpperCamelCase : int = os.path.expanduser( os.getenv('''TORCH_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''torch''')) ) __UpperCamelCase : Tuple = os.path.join(torch_cache_home, '''transformers''') __UpperCamelCase : Optional[Any] = '''https://cdn.huggingface.co''' __UpperCamelCase : Dict = '''https://s3.amazonaws.com/models.huggingface.co/bert''' __UpperCamelCase : Dict = '''/'''.join(str(Path(__file__).resolve()).split('''/''')[:-1]) __UpperCamelCase : Any = os.path.join(PATH, '''config.yaml''') __UpperCamelCase : Any = os.path.join(PATH, '''attributes.txt''') __UpperCamelCase : Optional[Any] = os.path.join(PATH, '''objects.txt''') __UpperCamelCase : Optional[int] = os.getenv('''PYTORCH_PRETRAINED_BERT_CACHE''', default_cache_path) __UpperCamelCase : List[Any] = os.getenv('''PYTORCH_TRANSFORMERS_CACHE''', PYTORCH_PRETRAINED_BERT_CACHE) __UpperCamelCase : List[Any] = os.getenv('''TRANSFORMERS_CACHE''', PYTORCH_TRANSFORMERS_CACHE) __UpperCamelCase : List[Any] = '''pytorch_model.bin''' __UpperCamelCase : int = '''config.yaml''' def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any=OBJECTS , _UpperCAmelCase : Optional[int]=ATTRIBUTES ): lowerCAmelCase = [] with open(_UpperCAmelCase ) as f: for object in f.readlines(): vg_classes.append(object.split(',' )[0].lower().strip() ) lowerCAmelCase = [] with open(_UpperCAmelCase ) as f: for object in f.readlines(): vg_attrs.append(object.split(',' )[0].lower().strip() ) return vg_classes, vg_attrs def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, Any] ): lowerCAmelCase = OrderedDict() with open(_UpperCAmelCase , 'rb' ) as f: lowerCAmelCase = pkl.load(_UpperCAmelCase )['model'] for k in copy.deepcopy(list(ckp.keys() ) ): lowerCAmelCase = ckp.pop(_UpperCAmelCase ) if isinstance(_UpperCAmelCase , np.ndarray ): lowerCAmelCase = torch.tensor(_UpperCAmelCase ) else: assert isinstance(_UpperCAmelCase , torch.tensor ), type(_UpperCAmelCase ) lowerCAmelCase = v return r class a : snake_case__ = {} def __init__( self , _snake_case , _snake_case = "root" , _snake_case=0 ): """simple docstring""" lowerCAmelCase = name lowerCAmelCase = level lowerCAmelCase = {} for k, v in dictionary.items(): if v is None: raise ValueError() lowerCAmelCase = copy.deepcopy(_snake_case ) lowerCAmelCase = copy.deepcopy(_snake_case ) if isinstance(_snake_case , _snake_case ): lowerCAmelCase = Config(_snake_case , name=_snake_case , level=level + 1 ) lowerCAmelCase = v setattr(self , _snake_case , _snake_case ) lowerCAmelCase = d def __repr__( self ): """simple docstring""" return str(list((self._pointer.keys()) ) ) def __setattr__( self , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = val lowerCAmelCase = val lowerCAmelCase = key.split('.' ) lowerCAmelCase = len(_snake_case ) - 1 lowerCAmelCase = self._pointer if len(_snake_case ) > 1: for i, l in enumerate(_snake_case ): if hasattr(self , _snake_case ) and isinstance(getattr(self , _snake_case ) , _snake_case ): setattr(getattr(self , _snake_case ) , '.'.join(levels[i:] ) , _snake_case ) if l == last_level: lowerCAmelCase = val else: lowerCAmelCase = pointer[l] def UpperCamelCase__ ( self ): """simple docstring""" return self._pointer def UpperCamelCase__ ( self , _snake_case , _snake_case ): """simple docstring""" with open(F'{file_name}' , 'w' ) as stream: dump(_snake_case , _snake_case ) def UpperCamelCase__ ( self , _snake_case , _snake_case ): """simple docstring""" with open(F'{file_name}' , 'w' ) as stream: json.dump(_snake_case , _snake_case ) @staticmethod def UpperCamelCase__ ( _snake_case ): """simple docstring""" with open(_snake_case ) as stream: lowerCAmelCase = load(_snake_case , Loader=_snake_case ) return data def __str__( self ): """simple docstring""" lowerCAmelCase = ' ' if self._name != "root": lowerCAmelCase = F'{t * (self._level-1)}{self._name}:\n' else: lowerCAmelCase = '' lowerCAmelCase = self._level for i, (k, v) in enumerate(self._pointer.items() ): if isinstance(_snake_case , _snake_case ): r += F'{t * (self._level)}{v}\n' self._level += 1 else: r += F'{t * (self._level)}{k}: {v} ({type(_snake_case ).__name__})\n' lowerCAmelCase = level return r[:-1] @classmethod def UpperCamelCase__ ( cls , _snake_case , **_snake_case ): """simple docstring""" lowerCAmelCase ,lowerCAmelCase = cls.get_config_dict(_snake_case , **_snake_case ) return cls(_snake_case ) @classmethod def UpperCamelCase__ ( cls , _snake_case , **_snake_case ): """simple docstring""" lowerCAmelCase = kwargs.pop('cache_dir' , _snake_case ) lowerCAmelCase = kwargs.pop('force_download' , _snake_case ) lowerCAmelCase = kwargs.pop('resume_download' , _snake_case ) lowerCAmelCase = kwargs.pop('proxies' , _snake_case ) lowerCAmelCase = kwargs.pop('local_files_only' , _snake_case ) if os.path.isdir(_snake_case ): lowerCAmelCase = os.path.join(_snake_case , _snake_case ) elif os.path.isfile(_snake_case ) or is_remote_url(_snake_case ): lowerCAmelCase = pretrained_model_name_or_path else: lowerCAmelCase = hf_bucket_url(_snake_case , filename=_snake_case , use_cdn=_snake_case ) try: # Load from URL or cache if already cached lowerCAmelCase = cached_path( _snake_case , cache_dir=_snake_case , force_download=_snake_case , proxies=_snake_case , resume_download=_snake_case , local_files_only=_snake_case , ) # Load config dict if resolved_config_file is None: raise EnvironmentError lowerCAmelCase = Config.load_yaml(_snake_case ) except EnvironmentError: lowerCAmelCase = 'Can\'t load config for' raise EnvironmentError(_snake_case ) if resolved_config_file == config_file: print('loading configuration file from path' ) else: print('loading configuration file cache' ) return Config.load_yaml(_snake_case ), kwargs def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ): lowerCAmelCase = torch.load('dump.pt' , map_location=in_tensor.device ) lowerCAmelCase = in_tensor.numpy() lowerCAmelCase = out_tensor.numpy()[0] print(na.shape , na[0, 0, :5] ) print(na.shape , na[0, 0, :5] ) assert np.allclose(_UpperCAmelCase , _UpperCAmelCase , rtol=0.01 , atol=0.1 ), ( F'{sum([1 for x in np.isclose(_UpperCAmelCase , _UpperCAmelCase , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %' " element-wise mismatch" ) raise Exception('tensors are all good' ) # Hugging face functions below def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple ): lowerCAmelCase = urlparse(_UpperCAmelCase ) return parsed.scheme in ("http", "https") def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Tuple=True ): lowerCAmelCase = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX lowerCAmelCase = '/' not in model_id if legacy_format: return F'{endpoint}/{model_id}-{filename}' else: return F'{endpoint}/{model_id}/{filename}' def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : Dict=None , _UpperCAmelCase : int=0 , _UpperCAmelCase : str=None , ): lowerCAmelCase = 'python/{}'.format(sys.version.split()[0] ) if _torch_available: ua += "; torch/{}".format(torch.__version__ ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ): ua += "; " + "; ".join('{}/{}'.format(_UpperCAmelCase , _UpperCAmelCase ) for k, v in user_agent.items() ) elif isinstance(_UpperCAmelCase , _UpperCAmelCase ): ua += "; " + user_agent lowerCAmelCase = {'user-agent': ua} if resume_size > 0: lowerCAmelCase = 'bytes=%d-' % (resume_size,) lowerCAmelCase = requests.get(_UpperCAmelCase , stream=_UpperCAmelCase , proxies=_UpperCAmelCase , headers=_UpperCAmelCase ) if response.status_code == 416: # Range not satisfiable return lowerCAmelCase = response.headers.get('Content-Length' ) lowerCAmelCase = resume_size + int(_UpperCAmelCase ) if content_length is not None else None lowerCAmelCase = tqdm( unit='B' , unit_scale=_UpperCAmelCase , total=_UpperCAmelCase , initial=_UpperCAmelCase , desc='Downloading' , ) for chunk in response.iter_content(chunk_size=1024 ): if chunk: # filter out keep-alive new chunks progress.update(len(_UpperCAmelCase ) ) temp_file.write(_UpperCAmelCase ) progress.close() def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : Any=10 , _UpperCAmelCase : Dict=False , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : Optional[int]=False , ): if cache_dir is None: lowerCAmelCase = TRANSFORMERS_CACHE if isinstance(_UpperCAmelCase , _UpperCAmelCase ): lowerCAmelCase = str(_UpperCAmelCase ) os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase ) lowerCAmelCase = None if not local_files_only: try: lowerCAmelCase = requests.head(_UpperCAmelCase , allow_redirects=_UpperCAmelCase , proxies=_UpperCAmelCase , timeout=_UpperCAmelCase ) if response.status_code == 200: lowerCAmelCase = response.headers.get('ETag' ) except (EnvironmentError, requests.exceptions.Timeout): # etag is already None pass lowerCAmelCase = url_to_filename(_UpperCAmelCase , _UpperCAmelCase ) # get cache path to put the file lowerCAmelCase = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) # etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible. # try to get the last downloaded one if etag is None: if os.path.exists(_UpperCAmelCase ): return cache_path else: lowerCAmelCase = [ file for file in fnmatch.filter(os.listdir(_UpperCAmelCase ) , filename + '.*' ) if not file.endswith('.json' ) and not file.endswith('.lock' ) ] if len(_UpperCAmelCase ) > 0: return os.path.join(_UpperCAmelCase , matching_files[-1] ) else: # If files cannot be found and local_files_only=True, # the models might've been found if local_files_only=False # Notify the user about that if local_files_only: raise ValueError( 'Cannot find the requested files in the cached path and outgoing traffic has been' ' disabled. To enable model look-ups and downloads online, set \'local_files_only\'' ' to False.' ) return None # From now on, etag is not None. if os.path.exists(_UpperCAmelCase ) and not force_download: return cache_path # Prevent parallel downloads of the same file with a lock. lowerCAmelCase = cache_path + '.lock' with FileLock(_UpperCAmelCase ): # If the download just completed while the lock was activated. if os.path.exists(_UpperCAmelCase ) and not force_download: # Even if returning early like here, the lock will be released. return cache_path if resume_download: lowerCAmelCase = cache_path + '.incomplete' @contextmanager def _resumable_file_manager(): with open(_UpperCAmelCase , 'a+b' ) as f: yield f lowerCAmelCase = _resumable_file_manager if os.path.exists(_UpperCAmelCase ): lowerCAmelCase = os.stat(_UpperCAmelCase ).st_size else: lowerCAmelCase = 0 else: lowerCAmelCase = partial(tempfile.NamedTemporaryFile , dir=_UpperCAmelCase , delete=_UpperCAmelCase ) lowerCAmelCase = 0 # Download to temporary file, then copy to cache dir once finished. # Otherwise you get corrupt cache entries if the download gets interrupted. with temp_file_manager() as temp_file: print( '%s not found in cache or force_download set to True, downloading to %s' , _UpperCAmelCase , temp_file.name , ) http_get( _UpperCAmelCase , _UpperCAmelCase , proxies=_UpperCAmelCase , resume_size=_UpperCAmelCase , user_agent=_UpperCAmelCase , ) os.replace(temp_file.name , _UpperCAmelCase ) lowerCAmelCase = {'url': url, 'etag': etag} lowerCAmelCase = cache_path + '.json' with open(_UpperCAmelCase , 'w' ) as meta_file: json.dump(_UpperCAmelCase , _UpperCAmelCase ) return cache_path def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Dict , _UpperCAmelCase : Tuple=None ): lowerCAmelCase = url.encode('utf-8' ) lowerCAmelCase = shaaaa(_UpperCAmelCase ) lowerCAmelCase = url_hash.hexdigest() if etag: lowerCAmelCase = etag.encode('utf-8' ) lowerCAmelCase = shaaaa(_UpperCAmelCase ) filename += "." + etag_hash.hexdigest() if url.endswith('.h5' ): filename += ".h5" return filename def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple , _UpperCAmelCase : str=None , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : str=False , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : Tuple=False , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : Optional[int]=False , ): if cache_dir is None: lowerCAmelCase = TRANSFORMERS_CACHE if isinstance(_UpperCAmelCase , _UpperCAmelCase ): lowerCAmelCase = str(_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ): lowerCAmelCase = str(_UpperCAmelCase ) if is_remote_url(_UpperCAmelCase ): # URL, so get it from the cache (downloading if necessary) lowerCAmelCase = get_from_cache( _UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , proxies=_UpperCAmelCase , resume_download=_UpperCAmelCase , user_agent=_UpperCAmelCase , local_files_only=_UpperCAmelCase , ) elif os.path.exists(_UpperCAmelCase ): # File, and it exists. lowerCAmelCase = url_or_filename elif urlparse(_UpperCAmelCase ).scheme == "": # File, but it doesn't exist. raise EnvironmentError('file {} not found'.format(_UpperCAmelCase ) ) else: # Something unknown raise ValueError('unable to parse {} as a URL or as a local path'.format(_UpperCAmelCase ) ) if extract_compressed_file: if not is_zipfile(_UpperCAmelCase ) and not tarfile.is_tarfile(_UpperCAmelCase ): return output_path # Path where we extract compressed archives # We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/" lowerCAmelCase ,lowerCAmelCase = os.path.split(_UpperCAmelCase ) lowerCAmelCase = output_file.replace('.' , '-' ) + '-extracted' lowerCAmelCase = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) if os.path.isdir(_UpperCAmelCase ) and os.listdir(_UpperCAmelCase ) and not force_extract: return output_path_extracted # Prevent parallel extractions lowerCAmelCase = output_path + '.lock' with FileLock(_UpperCAmelCase ): shutil.rmtree(_UpperCAmelCase , ignore_errors=_UpperCAmelCase ) os.makedirs(_UpperCAmelCase ) if is_zipfile(_UpperCAmelCase ): with ZipFile(_UpperCAmelCase , 'r' ) as zip_file: zip_file.extractall(_UpperCAmelCase ) zip_file.close() elif tarfile.is_tarfile(_UpperCAmelCase ): lowerCAmelCase = tarfile.open(_UpperCAmelCase ) tar_file.extractall(_UpperCAmelCase ) tar_file.close() else: raise EnvironmentError('Archive format of {} could not be identified'.format(_UpperCAmelCase ) ) return output_path_extracted return output_path def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Dict , _UpperCAmelCase : str="," ): assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) if os.path.isfile(_UpperCAmelCase ): with open(_UpperCAmelCase ) as f: lowerCAmelCase = eval(f.read() ) else: lowerCAmelCase = requests.get(_UpperCAmelCase ) try: lowerCAmelCase = requests.json() except Exception: lowerCAmelCase = req.content.decode() assert data is not None, "could not connect" try: lowerCAmelCase = eval(_UpperCAmelCase ) except Exception: lowerCAmelCase = data.split('\n' ) req.close() return data def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[int] ): lowerCAmelCase = requests.get(_UpperCAmelCase ) lowerCAmelCase = np.array(Image.open(BytesIO(response.content ) ) ) return img def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[int] ): lowerCAmelCase = url.split('/' )[-1] if fn not in os.listdir(os.getcwd() ): wget.download(_UpperCAmelCase ) with open(_UpperCAmelCase , 'rb' ) as stream: lowerCAmelCase = pkl.load(_UpperCAmelCase ) lowerCAmelCase = weights.pop('model' ) lowerCAmelCase = {} for k, v in model.items(): lowerCAmelCase = torch.from_numpy(_UpperCAmelCase ) if "running_var" in k: lowerCAmelCase = torch.tensor([0] ) lowerCAmelCase = k.replace('running_var' , 'num_batches_tracked' ) lowerCAmelCase = zero return new def _SCREAMING_SNAKE_CASE (): print(F'{os.path.abspath(os.path.join(_UpperCAmelCase , os.pardir ) )}/demo.ipynb' ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : int="RGB" ): assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) if os.path.isfile(_UpperCAmelCase ): lowerCAmelCase = cva.imread(_UpperCAmelCase ) else: lowerCAmelCase = get_image_from_url(_UpperCAmelCase ) assert img is not None, F'could not connect to: {im}' lowerCAmelCase = cva.cvtColor(_UpperCAmelCase , cva.COLOR_BGR2RGB ) if input_format == "RGB": lowerCAmelCase = img[:, :, ::-1] return img def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : List[str]=1 ): return (images[i : i + batch] for i in range(0 , len(_UpperCAmelCase ) , _UpperCAmelCase ))
309
"""simple docstring""" import argparse import os import torch from transformers.utils import WEIGHTS_NAME __UpperCamelCase : int = ['''small''', '''medium''', '''large'''] __UpperCamelCase : str = '''lm_head.decoder.weight''' __UpperCamelCase : Dict = '''lm_head.weight''' def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ): lowerCAmelCase = torch.load(_UpperCAmelCase ) lowerCAmelCase = d.pop(_UpperCAmelCase ) os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase ) torch.save(_UpperCAmelCase , os.path.join(_UpperCAmelCase , _UpperCAmelCase ) ) if __name__ == "__main__": __UpperCamelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument('''--dialogpt_path''', default='''.''', type=str) __UpperCamelCase : Optional[int] = parser.parse_args() for MODEL in DIALOGPT_MODELS: __UpperCamelCase : Dict = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''') __UpperCamelCase : str = f'''./DialoGPT-{MODEL}''' convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
309
1
"""simple docstring""" from __future__ import annotations import matplotlib.pyplot as plt # type: ignore import numpy # initial triangle of Koch snowflake __UpperCamelCase : Union[str, Any] = numpy.array([0, 0]) __UpperCamelCase : int = numpy.array([0.5, 0.8_66_02_54]) __UpperCamelCase : Any = numpy.array([1, 0]) __UpperCamelCase : int = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1] def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[numpy.ndarray] , _UpperCAmelCase : int ): lowerCAmelCase = initial_vectors for _ in range(_UpperCAmelCase ): lowerCAmelCase = iteration_step(_UpperCAmelCase ) return vectors def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[numpy.ndarray] ): lowerCAmelCase = [] for i, start_vector in enumerate(vectors[:-1] ): lowerCAmelCase = vectors[i + 1] new_vectors.append(_UpperCAmelCase ) lowerCAmelCase = end_vector - start_vector new_vectors.append(start_vector + difference_vector / 3 ) new_vectors.append( start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) ) new_vectors.append(start_vector + difference_vector * 2 / 3 ) new_vectors.append(vectors[-1] ) return new_vectors def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : numpy.ndarray , _UpperCAmelCase : float ): lowerCAmelCase = numpy.radians(_UpperCAmelCase ) lowerCAmelCase ,lowerCAmelCase = numpy.cos(_UpperCAmelCase ), numpy.sin(_UpperCAmelCase ) lowerCAmelCase = numpy.array(((c, -s), (s, c)) ) return numpy.dot(_UpperCAmelCase , _UpperCAmelCase ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[numpy.ndarray] ): lowerCAmelCase = plt.gca() axes.set_aspect('equal' ) # matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all # y-coordinates as inputs, which are constructed from the vector-list using # zip() lowerCAmelCase ,lowerCAmelCase = zip(*_UpperCAmelCase ) plt.plot(_UpperCAmelCase , _UpperCAmelCase ) plt.show() if __name__ == "__main__": import doctest doctest.testmod() __UpperCamelCase : int = iterate(INITIAL_VECTORS, 5) plot(processed_vectors)
309
"""simple docstring""" __UpperCamelCase : Dict = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []} __UpperCamelCase : str = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]} def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] , _UpperCAmelCase : int , _UpperCAmelCase : list[bool] ): lowerCAmelCase = True lowerCAmelCase = [] for neighbour in graph[vert]: if not visited[neighbour]: order += topology_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) order.append(_UpperCAmelCase ) return order def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] , _UpperCAmelCase : int , _UpperCAmelCase : list[bool] ): lowerCAmelCase = True lowerCAmelCase = [vert] for neighbour in reversed_graph[vert]: if not visited[neighbour]: component += find_components(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) return component def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] ): lowerCAmelCase = len(_UpperCAmelCase ) * [False] lowerCAmelCase = {vert: [] for vert in range(len(_UpperCAmelCase ) )} for vert, neighbours in graph.items(): for neighbour in neighbours: reversed_graph[neighbour].append(_UpperCAmelCase ) lowerCAmelCase = [] for i, was_visited in enumerate(_UpperCAmelCase ): if not was_visited: order += topology_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) lowerCAmelCase = [] lowerCAmelCase = len(_UpperCAmelCase ) * [False] for i in range(len(_UpperCAmelCase ) ): lowerCAmelCase = order[len(_UpperCAmelCase ) - i - 1] if not visited[vert]: lowerCAmelCase = find_components(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) components_list.append(_UpperCAmelCase ) return components_list
309
1
"""simple docstring""" import argparse import os import re __UpperCamelCase : List[str] = '''src/diffusers''' # Pattern that looks at the indentation in a line. __UpperCamelCase : int = re.compile(R'''^(\s*)\S''') # Pattern that matches `"key":" and puts `key` in group 0. __UpperCamelCase : str = re.compile(R'''^\s*"([^"]+)":''') # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. __UpperCamelCase : Tuple = re.compile(R'''^\s*_import_structure\["([^"]+)"\]''') # Pattern that matches `"key",` and puts `key` in group 0. __UpperCamelCase : int = re.compile(R'''^\s*"([^"]+)",\s*$''') # Pattern that matches any `[stuff]` and puts `stuff` in group 0. __UpperCamelCase : Tuple = re.compile(R'''\[([^\]]+)\]''') def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Dict ): lowerCAmelCase = _re_indent.search(_UpperCAmelCase ) return "" if search is None else search.groups()[0] def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any]="" , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : List[Any]=None ): lowerCAmelCase = 0 lowerCAmelCase = code.split('\n' ) if start_prompt is not None: while not lines[index].startswith(_UpperCAmelCase ): index += 1 lowerCAmelCase = ['\n'.join(lines[:index] )] else: lowerCAmelCase = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). lowerCAmelCase = [lines[index]] index += 1 while index < len(_UpperCAmelCase ) and (end_prompt is None or not lines[index].startswith(_UpperCAmelCase )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(_UpperCAmelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ): current_block.append(lines[index] ) blocks.append('\n'.join(_UpperCAmelCase ) ) if index < len(_UpperCAmelCase ) - 1: lowerCAmelCase = [lines[index + 1]] index += 1 else: lowerCAmelCase = [] else: blocks.append('\n'.join(_UpperCAmelCase ) ) lowerCAmelCase = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(_UpperCAmelCase ) > 0: blocks.append('\n'.join(_UpperCAmelCase ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(_UpperCAmelCase ): blocks.append('\n'.join(lines[index:] ) ) return blocks def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, Any] ): def _inner(_UpperCAmelCase : Dict ): return key(_UpperCAmelCase ).lower().replace('_' , '' ) return _inner def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any]=None ): # If no key is provided, we use a noop. def noop(_UpperCAmelCase : List[str] ): return x if key is None: lowerCAmelCase = noop # Constants are all uppercase, they go first. lowerCAmelCase = [obj for obj in objects if key(_UpperCAmelCase ).isupper()] # Classes are not all uppercase but start with a capital, they go second. lowerCAmelCase = [obj for obj in objects if key(_UpperCAmelCase )[0].isupper() and not key(_UpperCAmelCase ).isupper()] # Functions begin with a lowercase, they go last. lowerCAmelCase = [obj for obj in objects if not key(_UpperCAmelCase )[0].isupper()] lowerCAmelCase = ignore_underscore(_UpperCAmelCase ) return sorted(_UpperCAmelCase , key=_UpperCAmelCase ) + sorted(_UpperCAmelCase , key=_UpperCAmelCase ) + sorted(_UpperCAmelCase , key=_UpperCAmelCase ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any ): # This inner function sort imports between [ ]. def _replace(_UpperCAmelCase : List[str] ): lowerCAmelCase = match.groups()[0] if "," not in imports: return F'[{imports}]' lowerCAmelCase = [part.strip().replace('"' , '' ) for part in imports.split(',' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: lowerCAmelCase = keys[:-1] return "[" + ", ".join([F'"{k}"' for k in sort_objects(_UpperCAmelCase )] ) + "]" lowerCAmelCase = import_statement.split('\n' ) if len(_UpperCAmelCase ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. lowerCAmelCase = 2 if lines[1].strip() == '[' else 1 lowerCAmelCase = [(i, _re_strip_line.search(_UpperCAmelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] lowerCAmelCase = sort_objects(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[1] ) lowerCAmelCase = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(_UpperCAmelCase ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: lowerCAmelCase = _re_bracket_content.sub(_replace , lines[1] ) else: lowerCAmelCase = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: lowerCAmelCase = keys[:-1] lowerCAmelCase = get_indent(lines[1] ) + ', '.join([F'"{k}"' for k in sort_objects(_UpperCAmelCase )] ) return "\n".join(_UpperCAmelCase ) else: # Finally we have to deal with imports fitting on one line lowerCAmelCase = _re_bracket_content.sub(_replace , _UpperCAmelCase ) return import_statement def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Dict , _UpperCAmelCase : Tuple=True ): with open(_UpperCAmelCase , 'r' ) as f: lowerCAmelCase = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 lowerCAmelCase = split_code_in_indented_blocks( _UpperCAmelCase , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' ) # We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(_UpperCAmelCase ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. lowerCAmelCase = main_blocks[block_idx] lowerCAmelCase = block.split('\n' ) # Get to the start of the imports. lowerCAmelCase = 0 while line_idx < len(_UpperCAmelCase ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: lowerCAmelCase = len(_UpperCAmelCase ) else: line_idx += 1 if line_idx >= len(_UpperCAmelCase ): continue # Ignore beginning and last line: they don't contain anything. lowerCAmelCase = '\n'.join(block_lines[line_idx:-1] ) lowerCAmelCase = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. lowerCAmelCase = split_code_in_indented_blocks(_UpperCAmelCase , indent_level=_UpperCAmelCase ) # We have two categories of import key: list or _import_structure[key].append/extend lowerCAmelCase = _re_direct_key if '_import_structure' in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. lowerCAmelCase = [(pattern.search(_UpperCAmelCase ).groups()[0] if pattern.search(_UpperCAmelCase ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. lowerCAmelCase = [(i, key) for i, key in enumerate(_UpperCAmelCase ) if key is not None] lowerCAmelCase = [x[0] for x in sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. lowerCAmelCase = 0 lowerCAmelCase = [] for i in range(len(_UpperCAmelCase ) ): if keys[i] is None: reordered_blocks.append(internal_blocks[i] ) else: lowerCAmelCase = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reordered_blocks.append(_UpperCAmelCase ) count += 1 # And we put our main block back together with its first and last line. lowerCAmelCase = '\n'.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] ) if code != "\n".join(_UpperCAmelCase ): if check_only: return True else: print(F'Overwriting {file}.' ) with open(_UpperCAmelCase , 'w' ) as f: f.write('\n'.join(_UpperCAmelCase ) ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Dict=True ): lowerCAmelCase = [] for root, _, files in os.walk(_UpperCAmelCase ): if "__init__.py" in files: lowerCAmelCase = sort_imports(os.path.join(_UpperCAmelCase , '__init__.py' ) , check_only=_UpperCAmelCase ) if result: lowerCAmelCase = [os.path.join(_UpperCAmelCase , '__init__.py' )] if len(_UpperCAmelCase ) > 0: raise ValueError(F'Would overwrite {len(_UpperCAmelCase )} files, run `make style`.' ) if __name__ == "__main__": __UpperCamelCase : str = argparse.ArgumentParser() parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''') __UpperCamelCase : Dict = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
309
"""simple docstring""" import os import time import warnings from dataclasses import dataclass, field from enum import Enum from typing import List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import logging from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors from ..processors.utils import InputFeatures __UpperCamelCase : Optional[Any] = logging.get_logger(__name__) @dataclass class a : snake_case__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(glue_processors.keys() )} ) snake_case__ = field( metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} ) snake_case__ = field( default=1_2_8 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) snake_case__ = field( default=a__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.task_name.lower() class a ( a__ ): snake_case__ = '''train''' snake_case__ = '''dev''' snake_case__ = '''test''' class a ( a__ ): snake_case__ = 42 snake_case__ = 42 snake_case__ = 42 def __init__( self , _snake_case , _snake_case , _snake_case = None , _snake_case = Split.train , _snake_case = None , ): """simple docstring""" warnings.warn( 'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets ' 'library. You can have a look at this example script for pointers: ' 'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , _snake_case , ) lowerCAmelCase = args lowerCAmelCase = glue_processors[args.task_name]() lowerCAmelCase = glue_output_modes[args.task_name] if isinstance(_snake_case , _snake_case ): try: lowerCAmelCase = Split[mode] except KeyError: raise KeyError('mode is not a valid split name' ) # Load data features from cache or dataset file lowerCAmelCase = os.path.join( cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , ) lowerCAmelCase = self.processor.get_labels() if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in ( "RobertaTokenizer", "RobertaTokenizerFast", "XLMRobertaTokenizer", "BartTokenizer", "BartTokenizerFast", ): # HACK(label indices are swapped in RoBERTa pretrained model) lowerCAmelCase ,lowerCAmelCase = label_list[2], label_list[1] lowerCAmelCase = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. lowerCAmelCase = cached_features_file + '.lock' with FileLock(_snake_case ): if os.path.exists(_snake_case ) and not args.overwrite_cache: lowerCAmelCase = time.time() lowerCAmelCase = torch.load(_snake_case ) logger.info( F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start ) else: logger.info(F'Creating features from dataset file at {args.data_dir}' ) if mode == Split.dev: lowerCAmelCase = self.processor.get_dev_examples(args.data_dir ) elif mode == Split.test: lowerCAmelCase = self.processor.get_test_examples(args.data_dir ) else: lowerCAmelCase = self.processor.get_train_examples(args.data_dir ) if limit_length is not None: lowerCAmelCase = examples[:limit_length] lowerCAmelCase = glue_convert_examples_to_features( _snake_case , _snake_case , max_length=args.max_seq_length , label_list=_snake_case , output_mode=self.output_mode , ) lowerCAmelCase = time.time() torch.save(self.features , _snake_case ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' ) def __len__( self ): """simple docstring""" return len(self.features ) def __getitem__( self , _snake_case ): """simple docstring""" return self.features[i] def UpperCamelCase__ ( self ): """simple docstring""" return self.label_list
309
1
"""simple docstring""" import inspect import unittest from transformers import MobileNetVaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class a ( a__ ): def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(_snake_case , 'tf_padding' ) ) self.parent.assertTrue(hasattr(_snake_case , 'depth_multiplier' ) ) class a : def __init__( self , _snake_case , _snake_case=13 , _snake_case=3 , _snake_case=32 , _snake_case=0.25 , _snake_case=8 , _snake_case=8 , _snake_case=6 , _snake_case=32 , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case="relu6" , _snake_case=12_80 , _snake_case=0.1 , _snake_case=0.02 , _snake_case=True , _snake_case=True , _snake_case=10 , _snake_case=None , ): """simple docstring""" lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = num_channels lowerCAmelCase = image_size lowerCAmelCase = depth_multiplier lowerCAmelCase = depth_divisible_by lowerCAmelCase = min_depth lowerCAmelCase = expand_ratio lowerCAmelCase = tf_padding lowerCAmelCase = output_stride lowerCAmelCase = first_layer_is_expansion lowerCAmelCase = finegrained_output lowerCAmelCase = hidden_act lowerCAmelCase = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier ) lowerCAmelCase = classifier_dropout_prob lowerCAmelCase = use_labels lowerCAmelCase = is_training lowerCAmelCase = num_labels lowerCAmelCase = initializer_range lowerCAmelCase = scope def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase = None lowerCAmelCase = None if self.use_labels: lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels ) lowerCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) lowerCAmelCase = self.get_config() return config, pixel_values, labels, pixel_labels def UpperCamelCase__ ( self ): """simple docstring""" return MobileNetVaConfig( num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = MobileNetVaModel(config=_snake_case ) model.to(_snake_case ) model.eval() lowerCAmelCase = model(_snake_case ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) self.parent.assertEqual( result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = self.num_labels lowerCAmelCase = MobileNetVaForImageClassification(_snake_case ) model.to(_snake_case ) model.eval() lowerCAmelCase = model(_snake_case , labels=_snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = self.num_labels lowerCAmelCase = MobileNetVaForSemanticSegmentation(_snake_case ) model.to(_snake_case ) model.eval() lowerCAmelCase = model(_snake_case ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) lowerCAmelCase = model(_snake_case , labels=_snake_case ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.prepare_config_and_inputs() lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = config_and_inputs lowerCAmelCase = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class a ( a__ , a__ , unittest.TestCase ): snake_case__ = ( (MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation) if is_torch_available() else () ) snake_case__ = ( { '''feature-extraction''': MobileNetVaModel, '''image-classification''': MobileNetVaForImageClassification, '''image-segmentation''': MobileNetVaForSemanticSegmentation, } if is_torch_available() else {} ) snake_case__ = False snake_case__ = False snake_case__ = False snake_case__ = False def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = MobileNetVaModelTester(self ) lowerCAmelCase = MobileNetVaConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='MobileNetV2 does not use inputs_embeds' ) def UpperCamelCase__ ( self ): """simple docstring""" pass @unittest.skip(reason='MobileNetV2 does not support input and output embeddings' ) def UpperCamelCase__ ( self ): """simple docstring""" pass @unittest.skip(reason='MobileNetV2 does not output attentions' ) def UpperCamelCase__ ( self ): """simple docstring""" pass def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase = model_class(_snake_case ) lowerCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase = [*signature.parameters.keys()] lowerCAmelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , _snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" def check_hidden_states_output(_snake_case , _snake_case , _snake_case ): lowerCAmelCase = model_class(_snake_case ) model.to(_snake_case ) model.eval() with torch.no_grad(): lowerCAmelCase = model(**self._prepare_for_class(_snake_case , _snake_case ) ) lowerCAmelCase = outputs.hidden_states lowerCAmelCase = 16 self.assertEqual(len(_snake_case ) , _snake_case ) lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase = True check_hidden_states_output(_snake_case , _snake_case , _snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCAmelCase = True check_hidden_states_output(_snake_case , _snake_case , _snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*_snake_case ) @slow def UpperCamelCase__ ( self ): """simple docstring""" for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase = MobileNetVaModel.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) def _SCREAMING_SNAKE_CASE (): lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class a ( unittest.TestCase ): @cached_property def UpperCamelCase__ ( self ): """simple docstring""" return ( MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v2_1.0_224' ) if is_vision_available() else None ) @slow def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v2_1.0_224' ).to(_snake_case ) lowerCAmelCase = self.default_image_processor lowerCAmelCase = prepare_img() lowerCAmelCase = image_processor(images=_snake_case , return_tensors='pt' ).to(_snake_case ) # forward pass with torch.no_grad(): lowerCAmelCase = model(**_snake_case ) # verify the logits lowerCAmelCase = torch.Size((1, 10_01) ) self.assertEqual(outputs.logits.shape , _snake_case ) lowerCAmelCase = torch.tensor([0.2_445, -1.1_993, 0.1_905] ).to(_snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1E-4 ) ) @slow def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = MobileNetVaForSemanticSegmentation.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' ) lowerCAmelCase = model.to(_snake_case ) lowerCAmelCase = MobileNetVaImageProcessor.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' ) lowerCAmelCase = prepare_img() lowerCAmelCase = image_processor(images=_snake_case , return_tensors='pt' ).to(_snake_case ) # forward pass with torch.no_grad(): lowerCAmelCase = model(**_snake_case ) lowerCAmelCase = outputs.logits # verify the logits lowerCAmelCase = torch.Size((1, 21, 65, 65) ) self.assertEqual(logits.shape , _snake_case ) lowerCAmelCase = torch.tensor( [ [[17.5_790, 17.7_581, 18.3_355], [18.3_257, 18.4_230, 18.8_973], [18.6_169, 18.8_650, 19.2_187]], [[-2.1_595, -2.0_977, -2.3_741], [-2.4_226, -2.3_028, -2.6_835], [-2.7_819, -2.5_991, -2.7_706]], [[4.2_058, 4.8_317, 4.7_638], [4.4_136, 5.0_361, 4.9_383], [4.5_028, 4.9_644, 4.8_734]], ] , device=_snake_case , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _snake_case , atol=1E-4 ) )
309
"""simple docstring""" import os from collections.abc import Iterator def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "." ): for dir_path, dir_names, filenames in os.walk(_UpperCAmelCase ): lowerCAmelCase = [d for d in dir_names if d != 'scripts' and d[0] not in '._'] for filename in filenames: if filename == "__init__.py": continue if os.path.splitext(_UpperCAmelCase )[1] in (".py", ".ipynb"): yield os.path.join(_UpperCAmelCase , _UpperCAmelCase ).lstrip('./' ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ): return F'{i * " "}*' if i else "\n##" def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ): lowerCAmelCase = old_path.split(os.sep ) for i, new_part in enumerate(new_path.split(os.sep ) ): if (i + 1 > len(_UpperCAmelCase ) or old_parts[i] != new_part) and new_part: print(F'{md_prefix(_UpperCAmelCase )} {new_part.replace("_" , " " ).title()}' ) return new_path def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "." ): lowerCAmelCase = '' for filepath in sorted(good_file_paths(_UpperCAmelCase ) ): lowerCAmelCase ,lowerCAmelCase = os.path.split(_UpperCAmelCase ) if filepath != old_path: lowerCAmelCase = print_path(_UpperCAmelCase , _UpperCAmelCase ) lowerCAmelCase = (filepath.count(os.sep ) + 1) if filepath else 0 lowerCAmelCase = F'{filepath}/{filename}'.replace(' ' , '%20' ) lowerCAmelCase = os.path.splitext(filename.replace('_' , ' ' ).title() )[0] print(F'{md_prefix(_UpperCAmelCase )} [{filename}]({url})' ) if __name__ == "__main__": print_directory_md('''.''')
309
1
"""simple docstring""" # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. __UpperCamelCase : Any = abspath(join(dirname(dirname(__file__)), '''src''')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='''ignore''', category=FutureWarning) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[Any] ): from diffusers.utils.testing_utils import pytest_addoption_shared pytest_addoption_shared(_UpperCAmelCase ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ): from diffusers.utils.testing_utils import pytest_terminal_summary_main lowerCAmelCase = terminalreporter.config.getoption('--make-reports' ) if make_reports: pytest_terminal_summary_main(_UpperCAmelCase , id=_UpperCAmelCase )
309
"""simple docstring""" import os from datetime import datetime as dt from github import Github __UpperCamelCase : int = [ '''good first issue''', '''good second issue''', '''good difficult issue''', '''enhancement''', '''new pipeline/model''', '''new scheduler''', '''wip''', ] def _SCREAMING_SNAKE_CASE (): lowerCAmelCase = Github(os.environ['GITHUB_TOKEN'] ) lowerCAmelCase = g.get_repo('huggingface/diffusers' ) lowerCAmelCase = repo.get_issues(state='open' ) for issue in open_issues: lowerCAmelCase = sorted(issue.get_comments() , key=lambda _UpperCAmelCase : i.created_at , reverse=_UpperCAmelCase ) lowerCAmelCase = comments[0] if len(_UpperCAmelCase ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Closes the issue after 7 days of inactivity since the Stalebot notification. issue.edit(state='closed' ) elif ( "stale" in issue.get_labels() and last_comment is not None and last_comment.user.login != "github-actions[bot]" ): # Opens the issue if someone other than Stalebot commented. issue.edit(state='open' ) issue.remove_from_labels('stale' ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( 'This issue has been automatically marked as stale because it has not had ' 'recent activity. If you think this still needs to be addressed ' 'please comment on this thread.\n\nPlease note that issues that do not follow the ' '[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) ' 'are likely to be ignored.' ) issue.add_to_labels('stale' ) if __name__ == "__main__": main()
309
1
"""simple docstring""" import argparse import logging import sys from unittest.mock import patch import run_glue_deebert from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow logging.basicConfig(level=logging.DEBUG) __UpperCamelCase : Tuple = logging.getLogger() def _SCREAMING_SNAKE_CASE (): lowerCAmelCase = argparse.ArgumentParser() parser.add_argument('-f' ) lowerCAmelCase = parser.parse_args() return args.f class a ( a__ ): def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = logging.StreamHandler(sys.stdout ) logger.addHandler(_snake_case ) def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" lowerCAmelCase = get_gpu_count() if n_gpu > 1: pass # XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560 # script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py" # distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split() # cmd = [sys.executable] + distributed_args + args # execute_subprocess_async(cmd, env=self.get_env()) # XXX: test the results - need to save them first into .json file else: args.insert(0 , 'run_glue_deebert.py' ) with patch.object(_snake_case , 'argv' , _snake_case ): lowerCAmelCase = run_glue_deebert.main() for value in result.values(): self.assertGreaterEqual(_snake_case , 0.666 ) @slow @require_torch_non_multi_gpu def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = '\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n '.split() self.run_and_check(_snake_case ) lowerCAmelCase = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split() self.run_and_check(_snake_case ) lowerCAmelCase = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split() self.run_and_check(_snake_case )
309
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) __UpperCamelCase : Any = { '''configuration_layoutlmv2''': ['''LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LayoutLMv2Config'''], '''processing_layoutlmv2''': ['''LayoutLMv2Processor'''], '''tokenization_layoutlmv2''': ['''LayoutLMv2Tokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Tuple = ['''LayoutLMv2TokenizerFast'''] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Optional[int] = ['''LayoutLMv2FeatureExtractor'''] __UpperCamelCase : Optional[int] = ['''LayoutLMv2ImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Any = [ '''LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LayoutLMv2ForQuestionAnswering''', '''LayoutLMv2ForSequenceClassification''', '''LayoutLMv2ForTokenClassification''', '''LayoutLMv2Layer''', '''LayoutLMv2Model''', '''LayoutLMv2PreTrainedModel''', ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys __UpperCamelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
309
1
"""simple docstring""" from __future__ import annotations import numpy as np def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[float] ): return np.maximum(0 , _UpperCAmelCase ) if __name__ == "__main__": print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
309
"""simple docstring""" import warnings from ...utils import logging from .image_processing_poolformer import PoolFormerImageProcessor __UpperCamelCase : Optional[Any] = logging.get_logger(__name__) class a ( a__ ): def __init__( self , *_snake_case , **_snake_case ): """simple docstring""" warnings.warn( 'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use PoolFormerImageProcessor instead.' , _snake_case , ) super().__init__(*_snake_case , **_snake_case )
309
1
"""simple docstring""" import copy import tempfile import unittest from transformers import MaMaaaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : int=None , _UpperCAmelCase : Any=None , _UpperCAmelCase : Dict=None , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : Any=None , ): if attention_mask is None: lowerCAmelCase = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: lowerCAmelCase = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: lowerCAmelCase = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=_UpperCAmelCase ) if decoder_head_mask is None: lowerCAmelCase = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=_UpperCAmelCase ) if cross_attn_head_mask is None: lowerCAmelCase = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=_UpperCAmelCase ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } class a : def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=False , _snake_case=99 , _snake_case=16 , _snake_case=2 , _snake_case=4 , _snake_case=4 , _snake_case="relu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=20 , _snake_case=2 , _snake_case=1 , _snake_case=0 , ): """simple docstring""" lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = seq_length lowerCAmelCase = is_training lowerCAmelCase = use_labels lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_act lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = encoder_layerdrop lowerCAmelCase = decoder_layerdrop lowerCAmelCase = max_position_embeddings lowerCAmelCase = eos_token_id lowerCAmelCase = pad_token_id lowerCAmelCase = bos_token_id def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase = self.eos_token_id # Eos Token lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for M2M100 the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input lowerCAmelCase = input_ids.clamp(self.pad_token_id + 1 ) lowerCAmelCase = decoder_input_ids.clamp(self.pad_token_id + 1 ) lowerCAmelCase = self.get_config() lowerCAmelCase = prepare_mam_aaa_inputs_dict(_snake_case , _snake_case , _snake_case ) return config, inputs_dict def UpperCamelCase__ ( self ): """simple docstring""" return MaMaaaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase ,lowerCAmelCase = self.prepare_config_and_inputs() return config, inputs_dict def UpperCamelCase__ ( self , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = MaMaaaModel(config=_snake_case ).get_decoder().to(_snake_case ).eval() lowerCAmelCase = inputs_dict['input_ids'] lowerCAmelCase = inputs_dict['attention_mask'] lowerCAmelCase = inputs_dict['head_mask'] # first forward pass lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , head_mask=_snake_case , use_cache=_snake_case ) lowerCAmelCase ,lowerCAmelCase = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowerCAmelCase = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) lowerCAmelCase = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) lowerCAmelCase = model(_snake_case , attention_mask=_snake_case )['last_hidden_state'] lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , past_key_values=_snake_case )[ 'last_hidden_state' ] # select random slice lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach() lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1E-2 ) ) def UpperCamelCase__ ( self , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = MaMaaaModel(config=_snake_case ).to(_snake_case ).eval() lowerCAmelCase = model(**_snake_case ) lowerCAmelCase = outputs.encoder_last_hidden_state lowerCAmelCase = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase = model.get_encoder() encoder.save_pretrained(_snake_case ) lowerCAmelCase = MaMaaaEncoder.from_pretrained(_snake_case ).to(_snake_case ) lowerCAmelCase = encoder(inputs_dict['input_ids'] , attention_mask=inputs_dict['attention_mask'] )[ 0 ] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 ) with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase = model.get_decoder() decoder.save_pretrained(_snake_case ) lowerCAmelCase = MaMaaaDecoder.from_pretrained(_snake_case ).to(_snake_case ) lowerCAmelCase = decoder( input_ids=inputs_dict['decoder_input_ids'] , attention_mask=inputs_dict['decoder_attention_mask'] , encoder_hidden_states=_snake_case , encoder_attention_mask=inputs_dict['attention_mask'] , )[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 ) @require_torch class a ( a__ , a__ , a__ , unittest.TestCase ): snake_case__ = ( ( MaMaaaModel, MaMaaaForConditionalGeneration, ) if is_torch_available() else () ) snake_case__ = (MaMaaaForConditionalGeneration,) if is_torch_available() else () snake_case__ = ( { '''conversational''': MaMaaaForConditionalGeneration, '''feature-extraction''': MaMaaaModel, '''summarization''': MaMaaaForConditionalGeneration, '''text2text-generation''': MaMaaaForConditionalGeneration, '''translation''': MaMaaaForConditionalGeneration, } if is_torch_available() else {} ) snake_case__ = True snake_case__ = True snake_case__ = False snake_case__ = False def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" if pipeline_test_casse_name == "TranslationPipelineTests": # Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`. # `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer. return True return False def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = MaMaaaModelTester(self ) lowerCAmelCase = ConfigTester(self , config_class=_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" self.config_tester.run_common_tests() def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: lowerCAmelCase = model_class(_snake_case ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_snake_case ) lowerCAmelCase ,lowerCAmelCase = model_class.from_pretrained(_snake_case , output_loading_info=_snake_case ) self.assertEqual(info['missing_keys'] , [] ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration): lowerCAmelCase = model_class(_snake_case ) model.to(_snake_case ) model.eval() lowerCAmelCase = copy.deepcopy(self._prepare_for_class(_snake_case , _snake_case ) ) if not self.is_encoder_decoder: lowerCAmelCase = inputs['input_ids'] del inputs["input_ids"] else: lowerCAmelCase = inputs['input_ids'] lowerCAmelCase = inputs.get('decoder_input_ids' , _snake_case ) del inputs["input_ids"] inputs.pop('decoder_input_ids' , _snake_case ) lowerCAmelCase = model.get_input_embeddings() if not self.is_encoder_decoder: lowerCAmelCase = wte(_snake_case ) else: lowerCAmelCase = wte(_snake_case ) lowerCAmelCase = wte(_snake_case ) with torch.no_grad(): model(**_snake_case )[0] def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs() lowerCAmelCase = input_dict['input_ids'] lowerCAmelCase = input_ids.ne(1 ).to(_snake_case ) lowerCAmelCase = MaMaaaForConditionalGeneration(_snake_case ).eval().to(_snake_case ) if torch_device == "cuda": model.half() model.generate(_snake_case , attention_mask=_snake_case ) model.generate(num_beams=4 , do_sample=_snake_case , early_stopping=_snake_case , num_return_sequences=3 ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ): return torch.tensor(_UpperCAmelCase , dtype=torch.long , device=_UpperCAmelCase ) __UpperCamelCase : List[Any] = 1e-4 @require_torch @require_sentencepiece @require_tokenizers @slow class a ( unittest.TestCase ): @cached_property def UpperCamelCase__ ( self ): """simple docstring""" return MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = MaMaaaModel.from_pretrained('facebook/m2m100_418M' ).to(_snake_case ) lowerCAmelCase = _long_tensor([[12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38, 2]] ) lowerCAmelCase = _long_tensor([[2, 12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38]] ) lowerCAmelCase = prepare_mam_aaa_inputs_dict(model.config , _snake_case , _snake_case ) with torch.no_grad(): lowerCAmelCase = model(**_snake_case )[0] lowerCAmelCase = torch.Size((1, 11, 10_24) ) self.assertEqual(output.shape , _snake_case ) # change to expected output here lowerCAmelCase = torch.tensor( [[-0.7_780, -0.1_676, 0.1_038], [-6.7_556, -1.3_992, 0.0_567], [-7.5_383, -0.5_920, -0.2_779]] , device=_snake_case ) self.assertTrue(torch.allclose(output[:, :3, :3] , _snake_case , atol=_snake_case ) ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(_snake_case ) # change to intended input lowerCAmelCase = _long_tensor([[12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38, 2]] ) lowerCAmelCase = _long_tensor([[2, 12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38]] ) lowerCAmelCase = prepare_mam_aaa_inputs_dict(model.config , _snake_case , _snake_case ) with torch.no_grad(): lowerCAmelCase = model(**_snake_case )[0] lowerCAmelCase = torch.Size((1, 11, model.config.vocab_size) ) self.assertEqual(output.shape , _snake_case ) # change to expected output here lowerCAmelCase = torch.tensor( [[-1.0_448, -1.0_411, 3.7_992], [-3.2_191, -3.2_386, -1.3_451], [-3.6_210, -3.5_993, 0.4_925]] , device=_snake_case ) self.assertTrue(torch.allclose(output[:, :3, :3] , _snake_case , atol=_snake_case ) ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(_snake_case ) lowerCAmelCase = MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' , src_lang='fr' , tgt_lang='en' ) lowerCAmelCase = [ 'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement', 'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.', 'Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent' ' Fabius convoque l\'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de' ' l\'ampleur de la surveillance américaine sur l\'ensemble des communications en France.', ] # The below article tests that we don't add any hypotheses outside of the top n_beams lowerCAmelCase = tokenizer(_snake_case , padding=_snake_case , return_tensors='pt' ) lowerCAmelCase = model.generate( input_ids=dct['input_ids'].to(_snake_case ) , attention_mask=dct['attention_mask'].to(_snake_case ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id('en' ) , ) lowerCAmelCase = [ 'The NSA case highlights the total absence of intelligence debate', 'I think there are two levels of response from the French government.', 'When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S.' ' Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all' ' communications in France.', ] lowerCAmelCase = tokenizer.batch_decode( hypotheses_batch.tolist() , clean_up_tokenization_spaces=_snake_case , skip_special_tokens=_snake_case ) assert generated == expected_en
309
"""simple docstring""" from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. __UpperCamelCase : str = 200 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. __UpperCamelCase : Optional[Any] = 50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. __UpperCamelCase : Dict = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 1000)) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ): lowerCAmelCase = len([g for position, g in enumerate(_UpperCAmelCase ) if g == main_target[position]] ) return (item, float(_UpperCAmelCase )) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ): lowerCAmelCase = random.randint(0 , len(_UpperCAmelCase ) - 1 ) lowerCAmelCase = parent_a[:random_slice] + parent_a[random_slice:] lowerCAmelCase = parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] ): lowerCAmelCase = list(_UpperCAmelCase ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: lowerCAmelCase = random.choice(_UpperCAmelCase ) return "".join(_UpperCAmelCase ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : tuple[str, float] , _UpperCAmelCase : list[tuple[str, float]] , _UpperCAmelCase : list[str] , ): lowerCAmelCase = [] # Generate more children proportionally to the fitness score. lowerCAmelCase = int(parent_a[1] * 100 ) + 1 lowerCAmelCase = 10 if child_n >= 10 else child_n for _ in range(_UpperCAmelCase ): lowerCAmelCase = population_score[random.randint(0 , _UpperCAmelCase )][0] lowerCAmelCase ,lowerCAmelCase = crossover(parent_a[0] , _UpperCAmelCase ) # Append new string to the population list. pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) ) pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) ) return pop def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] , _UpperCAmelCase : bool = True ): # Verify if N_POPULATION is bigger than N_SELECTED if N_POPULATION < N_SELECTED: lowerCAmelCase = F'{N_POPULATION} must be bigger than {N_SELECTED}' raise ValueError(_UpperCAmelCase ) # Verify that the target contains no genes besides the ones inside genes variable. lowerCAmelCase = sorted({c for c in target if c not in genes} ) if not_in_genes_list: lowerCAmelCase = F'{not_in_genes_list} is not in genes list, evolution cannot converge' raise ValueError(_UpperCAmelCase ) # Generate random starting population. lowerCAmelCase = [] for _ in range(_UpperCAmelCase ): population.append(''.join([random.choice(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) )] ) ) # Just some logs to know what the algorithms is doing. lowerCAmelCase ,lowerCAmelCase = 0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(_UpperCAmelCase ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. lowerCAmelCase = [evaluate(_UpperCAmelCase , _UpperCAmelCase ) for item in population] # Check if there is a matching evolution. lowerCAmelCase = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[1] , reverse=_UpperCAmelCase ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( F'\nGeneration: {generation}' F'\nTotal Population:{total_population}' F'\nBest score: {population_score[0][1]}' F'\nBest string: {population_score[0][0]}' ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. lowerCAmelCase = population[: int(N_POPULATION / 3 )] population.clear() population.extend(_UpperCAmelCase ) # Normalize population score to be between 0 and 1. lowerCAmelCase = [ (item, score / len(_UpperCAmelCase )) for item, score in population_score ] # This is selection for i in range(_UpperCAmelCase ): population.extend(select(population_score[int(_UpperCAmelCase )] , _UpperCAmelCase , _UpperCAmelCase ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(_UpperCAmelCase ) > N_POPULATION: break if __name__ == "__main__": __UpperCamelCase : Tuple = ( '''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!''' ) __UpperCamelCase : str = list( ''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm''' '''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\''' ) __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase : Dict = basic(target_str, genes_list) print( f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}''' )
309
1
"""simple docstring""" import warnings from contextlib import contextmanager from ....processing_utils import ProcessorMixin class a ( a__ ): snake_case__ = '''MCTCTFeatureExtractor''' snake_case__ = '''AutoTokenizer''' def __init__( self , _snake_case , _snake_case ): """simple docstring""" super().__init__(_snake_case , _snake_case ) lowerCAmelCase = self.feature_extractor lowerCAmelCase = False def __call__( self , *_snake_case , **_snake_case ): """simple docstring""" if self._in_target_context_manager: return self.current_processor(*_snake_case , **_snake_case ) if "raw_speech" in kwargs: warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' ) lowerCAmelCase = kwargs.pop('raw_speech' ) else: lowerCAmelCase = kwargs.pop('audio' , _snake_case ) lowerCAmelCase = kwargs.pop('sampling_rate' , _snake_case ) lowerCAmelCase = kwargs.pop('text' , _snake_case ) if len(_snake_case ) > 0: lowerCAmelCase = args[0] lowerCAmelCase = args[1:] if audio is None and text is None: raise ValueError('You need to specify either an `audio` or `text` input to process.' ) if audio is not None: lowerCAmelCase = self.feature_extractor(_snake_case , *_snake_case , sampling_rate=_snake_case , **_snake_case ) if text is not None: lowerCAmelCase = self.tokenizer(_snake_case , **_snake_case ) if text is None: return inputs elif audio is None: return encodings else: lowerCAmelCase = encodings['input_ids'] return inputs def UpperCamelCase__ ( self , *_snake_case , **_snake_case ): """simple docstring""" return self.tokenizer.batch_decode(*_snake_case , **_snake_case ) def UpperCamelCase__ ( self , *_snake_case , **_snake_case ): """simple docstring""" if self._in_target_context_manager: return self.current_processor.pad(*_snake_case , **_snake_case ) lowerCAmelCase = kwargs.pop('input_features' , _snake_case ) lowerCAmelCase = kwargs.pop('labels' , _snake_case ) if len(_snake_case ) > 0: lowerCAmelCase = args[0] lowerCAmelCase = args[1:] if input_features is not None: lowerCAmelCase = self.feature_extractor.pad(_snake_case , *_snake_case , **_snake_case ) if labels is not None: lowerCAmelCase = self.tokenizer.pad(_snake_case , **_snake_case ) if labels is None: return input_features elif input_features is None: return labels else: lowerCAmelCase = labels['input_ids'] return input_features def UpperCamelCase__ ( self , *_snake_case , **_snake_case ): """simple docstring""" return self.tokenizer.decode(*_snake_case , **_snake_case ) @contextmanager def UpperCamelCase__ ( self ): """simple docstring""" warnings.warn( '`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ' 'labels by using the argument `text` of the regular `__call__` method (either in the same call as ' 'your audio inputs, or in a separate call.' ) lowerCAmelCase = True lowerCAmelCase = self.tokenizer yield lowerCAmelCase = self.feature_extractor lowerCAmelCase = False
309
"""simple docstring""" import copy import os import cva import numpy as np from matplotlib import pyplot as plt class a : def __init__( self ): """simple docstring""" lowerCAmelCase = '' lowerCAmelCase = '' lowerCAmelCase = [] lowerCAmelCase = 0 lowerCAmelCase = 2_56 lowerCAmelCase = 0 lowerCAmelCase = 0 lowerCAmelCase = 0 lowerCAmelCase = 0 def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" lowerCAmelCase = cva.imread(_snake_case , 0 ) lowerCAmelCase = copy.deepcopy(self.img ) lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label='x' ) lowerCAmelCase = np.sum(_snake_case ) for i in range(len(_snake_case ) ): lowerCAmelCase = x[i] / self.k self.sk += prk lowerCAmelCase = (self.L - 1) * self.sk if self.rem != 0: lowerCAmelCase = int(last % last ) lowerCAmelCase = int(last + 1 if self.rem >= 0.5 else last ) self.last_list.append(_snake_case ) lowerCAmelCase = int(np.ma.count(self.img ) / self.img[1].size ) lowerCAmelCase = self.img[1].size for i in range(self.number_of_cols ): for j in range(self.number_of_rows ): lowerCAmelCase = self.img[j][i] if num != self.last_list[num]: lowerCAmelCase = self.last_list[num] cva.imwrite('output_data/output.jpg' , self.img ) def UpperCamelCase__ ( self ): """simple docstring""" plt.hist(self.img.ravel() , 2_56 , [0, 2_56] ) def UpperCamelCase__ ( self ): """simple docstring""" cva.imshow('Output-Image' , self.img ) cva.imshow('Input-Image' , self.original_image ) cva.waitKey(50_00 ) cva.destroyAllWindows() if __name__ == "__main__": __UpperCamelCase : int = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''') __UpperCamelCase : List[Any] = ConstantStretch() stretcher.stretch(file_path) stretcher.plot_histogram() stretcher.show_image()
309
1
"""simple docstring""" from pickle import UnpicklingError import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict from ..utils import logging __UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any ): try: with open(_UpperCAmelCase , 'rb' ) as flax_state_f: lowerCAmelCase = from_bytes(_UpperCAmelCase , flax_state_f.read() ) except UnpicklingError as e: try: with open(_UpperCAmelCase ) as f: if f.read().startswith('version' ): raise OSError( 'You seem to have cloned a repository without having git-lfs installed. Please' ' install git-lfs and run `git lfs install` followed by `git lfs pull` in the' ' folder you cloned.' ) else: raise ValueError from e except (UnicodeDecodeError, ValueError): raise EnvironmentError(F'Unable to convert {model_file} to Flax deserializable object. ' ) return load_flax_weights_in_pytorch_model(_UpperCAmelCase , _UpperCAmelCase ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : List[Any] ): try: import torch # noqa: F401 except ImportError: logger.error( 'Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see' ' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation' ' instructions.' ) raise # check if we have bf16 weights lowerCAmelCase = flatten_dict(jax.tree_util.tree_map(lambda _UpperCAmelCase : x.dtype == jnp.bfloataa , _UpperCAmelCase ) ).values() if any(_UpperCAmelCase ): # convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( 'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` ' 'before loading those in PyTorch model.' ) lowerCAmelCase = jax.tree_util.tree_map( lambda _UpperCAmelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _UpperCAmelCase ) lowerCAmelCase = '' lowerCAmelCase = flatten_dict(_UpperCAmelCase , sep='.' ) lowerCAmelCase = pt_model.state_dict() # keep track of unexpected & missing keys lowerCAmelCase = [] lowerCAmelCase = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): lowerCAmelCase = flax_key_tuple.split('.' ) if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4: lowerCAmelCase = flax_key_tuple_array[:-1] + ['weight'] lowerCAmelCase = jnp.transpose(_UpperCAmelCase , (3, 2, 0, 1) ) elif flax_key_tuple_array[-1] == "kernel": lowerCAmelCase = flax_key_tuple_array[:-1] + ['weight'] lowerCAmelCase = flax_tensor.T elif flax_key_tuple_array[-1] == "scale": lowerCAmelCase = flax_key_tuple_array[:-1] + ['weight'] if "time_embedding" not in flax_key_tuple_array: for i, flax_key_tuple_string in enumerate(_UpperCAmelCase ): lowerCAmelCase = ( flax_key_tuple_string.replace('_0' , '.0' ) .replace('_1' , '.1' ) .replace('_2' , '.2' ) .replace('_3' , '.3' ) .replace('_4' , '.4' ) .replace('_5' , '.5' ) .replace('_6' , '.6' ) .replace('_7' , '.7' ) .replace('_8' , '.8' ) .replace('_9' , '.9' ) ) lowerCAmelCase = '.'.join(_UpperCAmelCase ) if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected ' F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) else: # add weight to pytorch dict lowerCAmelCase = np.asarray(_UpperCAmelCase ) if not isinstance(_UpperCAmelCase , np.ndarray ) else flax_tensor lowerCAmelCase = torch.from_numpy(_UpperCAmelCase ) # remove from missing keys missing_keys.remove(_UpperCAmelCase ) else: # weight is not expected by PyTorch model unexpected_keys.append(_UpperCAmelCase ) pt_model.load_state_dict(_UpperCAmelCase ) # re-transform missing_keys to list lowerCAmelCase = list(_UpperCAmelCase ) if len(_UpperCAmelCase ) > 0: logger.warning( 'Some weights of the Flax model were not used when initializing the PyTorch model' F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing' F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture' ' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This' F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect' ' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a' ' FlaxBertForSequenceClassification model).' ) if len(_UpperCAmelCase ) > 0: logger.warning( F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly' F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to' ' use it for predictions and inference.' ) return pt_model
309
"""simple docstring""" import pytest from datasets.splits import SplitDict, SplitInfo from datasets.utils.py_utils import asdict @pytest.mark.parametrize( 'split_dict' , [ SplitDict(), SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 , dataset_name='my_dataset' )} ), SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 )} ), SplitDict({'train': SplitInfo()} ), ] , ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : SplitDict ): lowerCAmelCase = split_dict._to_yaml_list() assert len(_UpperCAmelCase ) == len(_UpperCAmelCase ) lowerCAmelCase = SplitDict._from_yaml_list(_UpperCAmelCase ) for split_name, split_info in split_dict.items(): # dataset_name field is deprecated, and is therefore not part of the YAML dump lowerCAmelCase = None # the split name of split_dict takes over the name of the split info object lowerCAmelCase = split_name assert split_dict == reloaded @pytest.mark.parametrize( 'split_info' , [SplitInfo(), SplitInfo(dataset_name=_UpperCAmelCase ), SplitInfo(dataset_name='my_dataset' )] ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] ): # For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name" # field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files lowerCAmelCase = asdict(SplitDict({'train': split_info} ) ) assert "dataset_name" in split_dict_asdict["train"] assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
309
1
"""simple docstring""" # Algorithm for the pigeonhole sorting def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple ): lowerCAmelCase = min(_UpperCAmelCase ) # min() finds the minimum value lowerCAmelCase = max(_UpperCAmelCase ) # max() finds the maximum value lowerCAmelCase = max_val - min_val + 1 # size is difference of max and min values plus one # list of pigeonholes of size equal to the variable size lowerCAmelCase = [0] * size # Populate the pigeonholes. for x in a: assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "integers only please" holes[x - min_val] += 1 # Putting the elements back into the array in an order. lowerCAmelCase = 0 for count in range(_UpperCAmelCase ): while holes[count] > 0: holes[count] -= 1 lowerCAmelCase = count + min_val i += 1 def _SCREAMING_SNAKE_CASE (): lowerCAmelCase = [8, 3, 2, 7, 4, 6, 8] pigeonhole_sort(_UpperCAmelCase ) print('Sorted order is:' , ' '.join(_UpperCAmelCase ) ) if __name__ == "__main__": main()
309
"""simple docstring""" # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. __UpperCamelCase : Any = abspath(join(dirname(dirname(__file__)), '''src''')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='''ignore''', category=FutureWarning) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[Any] ): from diffusers.utils.testing_utils import pytest_addoption_shared pytest_addoption_shared(_UpperCAmelCase ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ): from diffusers.utils.testing_utils import pytest_terminal_summary_main lowerCAmelCase = terminalreporter.config.getoption('--make-reports' ) if make_reports: pytest_terminal_summary_main(_UpperCAmelCase , id=_UpperCAmelCase )
309
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __UpperCamelCase : Union[str, Any] = { '''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''], '''tokenization_mvp''': ['''MvpTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Any = ['''MvpTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : List[Any] = [ '''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MvpForCausalLM''', '''MvpForConditionalGeneration''', '''MvpForQuestionAnswering''', '''MvpForSequenceClassification''', '''MvpModel''', '''MvpPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys __UpperCamelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
309
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class a ( unittest.TestCase ): def __init__( self , _snake_case , _snake_case=7 , _snake_case=3 , _snake_case=18 , _snake_case=30 , _snake_case=4_00 , _snake_case=True , _snake_case=None , _snake_case=True , _snake_case=None , _snake_case=True , ): """simple docstring""" lowerCAmelCase = size if size is not None else {'shortest_edge': 20} lowerCAmelCase = crop_size if crop_size is not None else {'height': 18, 'width': 18} lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = num_channels lowerCAmelCase = image_size lowerCAmelCase = min_resolution lowerCAmelCase = max_resolution lowerCAmelCase = do_resize lowerCAmelCase = size lowerCAmelCase = do_center_crop lowerCAmelCase = crop_size lowerCAmelCase = do_flip_channel_order def UpperCamelCase__ ( self ): """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class a ( a__ , unittest.TestCase ): snake_case__ = MobileViTImageProcessor if is_vision_available() else None def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = MobileViTImageProcessingTester(self ) @property def UpperCamelCase__ ( self ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_snake_case , 'do_resize' ) ) self.assertTrue(hasattr(_snake_case , 'size' ) ) self.assertTrue(hasattr(_snake_case , 'do_center_crop' ) ) self.assertTrue(hasattr(_snake_case , 'center_crop' ) ) self.assertTrue(hasattr(_snake_case , 'do_flip_channel_order' ) ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 20} ) self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} ) lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'shortest_edge': 42} ) self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} ) def UpperCamelCase__ ( self ): """simple docstring""" pass def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case ) for image in image_inputs: self.assertIsInstance(_snake_case , Image.Image ) # Test not batched input lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , numpify=_snake_case ) for image in image_inputs: self.assertIsInstance(_snake_case , np.ndarray ) # Test not batched input lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , torchify=_snake_case ) for image in image_inputs: self.assertIsInstance(_snake_case , torch.Tensor ) # Test not batched input lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , )
309
1
"""simple docstring""" import math import unittest from transformers import BioGptConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptTokenizer, ) from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST class a : def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=False , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , ): """simple docstring""" lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = seq_length lowerCAmelCase = is_training lowerCAmelCase = use_input_mask lowerCAmelCase = use_token_type_ids lowerCAmelCase = use_labels lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_act lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = type_vocab_size lowerCAmelCase = type_sequence_label_size lowerCAmelCase = initializer_range lowerCAmelCase = num_labels lowerCAmelCase = num_choices lowerCAmelCase = scope def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase = None if self.use_input_mask: lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase = None if self.use_token_type_ids: lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase = None lowerCAmelCase = None lowerCAmelCase = None if self.use_labels: lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase__ ( self ): """simple docstring""" return BioGptConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_snake_case , initializer_range=self.initializer_range , ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = BioGptModel(config=_snake_case ) model.to(_snake_case ) model.eval() lowerCAmelCase = model(_snake_case , attention_mask=_snake_case ) lowerCAmelCase = model(_snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ): """simple docstring""" lowerCAmelCase = BioGptForCausalLM(config=_snake_case ) model.to(_snake_case ) model.eval() lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , *_snake_case ): """simple docstring""" lowerCAmelCase = BioGptModel(config=_snake_case ) model.to(_snake_case ) model.eval() # create attention mask lowerCAmelCase = torch.ones(input_ids.shape , dtype=torch.long , device=_snake_case ) lowerCAmelCase = self.seq_length // 2 lowerCAmelCase = 0 # first forward pass lowerCAmelCase ,lowerCAmelCase = model(_snake_case , attention_mask=_snake_case ).to_tuple() # create hypothetical next token and extent to next_input_ids lowerCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size ) # change a random masked slice from input_ids lowerCAmelCase = ids_tensor((1,) , _snake_case ).item() + 1 lowerCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 ) lowerCAmelCase = random_other_next_tokens # append to next input_ids and attn_mask lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) lowerCAmelCase = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=_snake_case )] , dim=1 , ) # get two different outputs lowerCAmelCase = model(_snake_case , attention_mask=_snake_case )['last_hidden_state'] lowerCAmelCase = model(_snake_case , past_key_values=_snake_case , attention_mask=_snake_case )['last_hidden_state'] # select random slice lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() lowerCAmelCase = output_from_no_past[:, -1, random_slice_idx].detach() lowerCAmelCase = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1E-3 ) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , *_snake_case ): """simple docstring""" lowerCAmelCase = BioGptModel(config=_snake_case ).to(_snake_case ).eval() lowerCAmelCase = torch.ones(input_ids.shape , dtype=torch.long , device=_snake_case ) # first forward pass lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , use_cache=_snake_case ) lowerCAmelCase ,lowerCAmelCase = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowerCAmelCase = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) lowerCAmelCase = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) lowerCAmelCase = model(_snake_case , attention_mask=_snake_case )['last_hidden_state'] lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , past_key_values=_snake_case )[ 'last_hidden_state' ] # select random slice lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach() lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1E-3 ) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , *_snake_case , _snake_case=False ): """simple docstring""" lowerCAmelCase = BioGptForCausalLM(_snake_case ) model.to(_snake_case ) if gradient_checkpointing: model.gradient_checkpointing_enable() lowerCAmelCase = model(_snake_case , labels=_snake_case ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) result.loss.backward() def UpperCamelCase__ ( self , _snake_case , *_snake_case ): """simple docstring""" lowerCAmelCase = BioGptModel(_snake_case ) lowerCAmelCase = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers ) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 ) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , *_snake_case ): """simple docstring""" lowerCAmelCase = self.num_labels lowerCAmelCase = BioGptForTokenClassification(_snake_case ) model.to(_snake_case ) model.eval() lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.prepare_config_and_inputs() ( ( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) , ) = config_and_inputs lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class a ( a__ , a__ , a__ , unittest.TestCase ): snake_case__ = ( (BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification) if is_torch_available() else () ) snake_case__ = (BioGptForCausalLM,) if is_torch_available() else () snake_case__ = ( { '''feature-extraction''': BioGptModel, '''text-classification''': BioGptForSequenceClassification, '''text-generation''': BioGptForCausalLM, '''token-classification''': BioGptForTokenClassification, '''zero-shot''': BioGptForSequenceClassification, } if is_torch_available() else {} ) snake_case__ = False def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = BioGptModelTester(self ) lowerCAmelCase = ConfigTester(self , config_class=_snake_case , hidden_size=37 ) def UpperCamelCase__ ( self ): """simple docstring""" self.config_tester.run_common_tests() def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowerCAmelCase = type self.model_tester.create_and_check_model(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_attention_mask_past(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*_snake_case , gradient_checkpointing=_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_past_large_inputs(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_weight_initialization(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_for_token_classification(*_snake_case ) @slow def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = BioGptForCausalLM.from_pretrained('microsoft/biogpt' ) model.to(_snake_case ) lowerCAmelCase = BioGptTokenizer.from_pretrained('microsoft/biogpt' ) lowerCAmelCase = 'left' # Define PAD Token = EOS Token = 50256 lowerCAmelCase = tokenizer.eos_token lowerCAmelCase = model.config.eos_token_id # use different length sentences to test batching lowerCAmelCase = [ 'Hello, my dog is a little', 'Today, I', ] lowerCAmelCase = tokenizer(_snake_case , return_tensors='pt' , padding=_snake_case ) lowerCAmelCase = inputs['input_ids'].to(_snake_case ) lowerCAmelCase = model.generate( input_ids=_snake_case , attention_mask=inputs['attention_mask'].to(_snake_case ) , ) lowerCAmelCase = tokenizer(sentences[0] , return_tensors='pt' ).input_ids.to(_snake_case ) lowerCAmelCase = model.generate(input_ids=_snake_case ) lowerCAmelCase = inputs_non_padded.shape[-1] - inputs['attention_mask'][-1].long().sum().cpu().item() lowerCAmelCase = tokenizer(sentences[1] , return_tensors='pt' ).input_ids.to(_snake_case ) lowerCAmelCase = model.generate(input_ids=_snake_case , max_length=model.config.max_length - num_paddings ) lowerCAmelCase = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case ) lowerCAmelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_snake_case ) lowerCAmelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=_snake_case ) lowerCAmelCase = [ 'Hello, my dog is a little bit bigger than a little bit.', 'Today, I have a good idea of how to use the information', ] self.assertListEqual(_snake_case , _snake_case ) self.assertListEqual(_snake_case , [non_padded_sentence, padded_sentence] ) @slow def UpperCamelCase__ ( self ): """simple docstring""" for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase = BioGptModel.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase = 3 lowerCAmelCase = input_dict['input_ids'] lowerCAmelCase = input_ids.ne(1 ).to(_snake_case ) lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) lowerCAmelCase = BioGptForSequenceClassification(_snake_case ) model.to(_snake_case ) model.eval() lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase = 3 lowerCAmelCase = 'multi_label_classification' lowerCAmelCase = input_dict['input_ids'] lowerCAmelCase = input_ids.ne(1 ).to(_snake_case ) lowerCAmelCase = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) lowerCAmelCase = BioGptForSequenceClassification(_snake_case ) model.to(_snake_case ) model.eval() lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @require_torch class a ( unittest.TestCase ): @slow def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = BioGptForCausalLM.from_pretrained('microsoft/biogpt' ) lowerCAmelCase = torch.tensor([[2, 48_05, 9, 6_56, 21]] ) lowerCAmelCase = model(_snake_case )[0] lowerCAmelCase = 4_23_84 lowerCAmelCase = torch.Size((1, 5, vocab_size) ) self.assertEqual(output.shape , _snake_case ) lowerCAmelCase = torch.tensor( [[[-9.5_236, -9.8_918, 10.4_557], [-11.0_469, -9.6_423, 8.1_022], [-8.8_664, -7.8_826, 5.5_325]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _snake_case , atol=1E-4 ) ) @slow def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = BioGptTokenizer.from_pretrained('microsoft/biogpt' ) lowerCAmelCase = BioGptForCausalLM.from_pretrained('microsoft/biogpt' ) model.to(_snake_case ) torch.manual_seed(0 ) lowerCAmelCase = tokenizer('COVID-19 is' , return_tensors='pt' ).to(_snake_case ) lowerCAmelCase = model.generate( **_snake_case , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=_snake_case , ) lowerCAmelCase = tokenizer.decode(output_ids[0] , skip_special_tokens=_snake_case ) lowerCAmelCase = ( 'COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the' ' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and' ' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),' ' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and' ' more than 800,000 deaths.' ) self.assertEqual(_snake_case , _snake_case )
309
"""simple docstring""" import unittest from accelerate import debug_launcher from accelerate.test_utils import require_cpu, test_ops, test_script @require_cpu class a ( unittest.TestCase ): def UpperCamelCase__ ( self ): """simple docstring""" debug_launcher(test_script.main ) def UpperCamelCase__ ( self ): """simple docstring""" debug_launcher(test_ops.main )
309
1
"""simple docstring""" from __future__ import annotations # This is the precision for this function which can be altered. # It is recommended for users to keep this number greater than or equal to 10. __UpperCamelCase : int = 10 def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : list[int] , _UpperCAmelCase : int ): for i in range(_UpperCAmelCase , _UpperCAmelCase ): if array[i] == target: return i return -1 def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[int] , _UpperCAmelCase : int ): lowerCAmelCase = 0 lowerCAmelCase = len(_UpperCAmelCase ) while left <= right: if right - left < precision: return lin_search(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) lowerCAmelCase = (left + right) // 3 + 1 lowerCAmelCase = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: lowerCAmelCase = one_third - 1 elif array[two_third] < target: lowerCAmelCase = two_third + 1 else: lowerCAmelCase = one_third + 1 lowerCAmelCase = two_third - 1 else: return -1 def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : list[int] , _UpperCAmelCase : int ): if left < right: if right - left < precision: return lin_search(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) lowerCAmelCase = (left + right) // 3 + 1 lowerCAmelCase = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: return rec_ternary_search(_UpperCAmelCase , one_third - 1 , _UpperCAmelCase , _UpperCAmelCase ) elif array[two_third] < target: return rec_ternary_search(two_third + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) else: return rec_ternary_search(one_third + 1 , two_third - 1 , _UpperCAmelCase , _UpperCAmelCase ) else: return -1 if __name__ == "__main__": import doctest doctest.testmod() __UpperCamelCase : Optional[Any] = input('''Enter numbers separated by comma:\n''').strip() __UpperCamelCase : Any = [int(item.strip()) for item in user_input.split(''',''')] assert collection == sorted(collection), f"List must be ordered.\n{collection}." __UpperCamelCase : List[Any] = int(input('''Enter the number to be found in the list:\n''').strip()) __UpperCamelCase : Any = ite_ternary_search(collection, target) __UpperCamelCase : List[Any] = rec_ternary_search(0, len(collection) - 1, collection, target) if resulta != -1: print(f'''Iterative search: {target} found at positions: {resulta}''') print(f'''Recursive search: {target} found at positions: {resulta}''') else: print('''Not found''')
309
"""simple docstring""" from __future__ import annotations from decimal import Decimal from numpy import array def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[list[float]] ): lowerCAmelCase = Decimal # Check if the provided matrix has 2 rows and 2 columns # since this implementation only works for 2x2 matrices if len(_UpperCAmelCase ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2: # Calculate the determinant of the matrix lowerCAmelCase = float( d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) ) if determinant == 0: raise ValueError('This matrix has no inverse.' ) # Creates a copy of the matrix with swapped positions of the elements lowerCAmelCase = [[0.0, 0.0], [0.0, 0.0]] lowerCAmelCase ,lowerCAmelCase = matrix[1][1], matrix[0][0] lowerCAmelCase ,lowerCAmelCase = -matrix[1][0], -matrix[0][1] # Calculate the inverse of the matrix return [ [(float(d(_UpperCAmelCase ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix ] elif ( len(_UpperCAmelCase ) == 3 and len(matrix[0] ) == 3 and len(matrix[1] ) == 3 and len(matrix[2] ) == 3 ): # Calculate the determinant of the matrix using Sarrus rule lowerCAmelCase = float( ( (d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] )) + (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] )) + (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] )) ) - ( (d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] )) + (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] )) + (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] )) ) ) if determinant == 0: raise ValueError('This matrix has no inverse.' ) # Creating cofactor matrix lowerCAmelCase = [ [d(0.0 ), d(0.0 ), d(0.0 )], [d(0.0 ), d(0.0 ), d(0.0 )], [d(0.0 ), d(0.0 ), d(0.0 )], ] lowerCAmelCase = (d(matrix[1][1] ) * d(matrix[2][2] )) - ( d(matrix[1][2] ) * d(matrix[2][1] ) ) lowerCAmelCase = -( (d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] )) ) lowerCAmelCase = (d(matrix[1][0] ) * d(matrix[2][1] )) - ( d(matrix[1][1] ) * d(matrix[2][0] ) ) lowerCAmelCase = -( (d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] )) ) lowerCAmelCase = (d(matrix[0][0] ) * d(matrix[2][2] )) - ( d(matrix[0][2] ) * d(matrix[2][0] ) ) lowerCAmelCase = -( (d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] )) ) lowerCAmelCase = (d(matrix[0][1] ) * d(matrix[1][2] )) - ( d(matrix[0][2] ) * d(matrix[1][1] ) ) lowerCAmelCase = -( (d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] )) ) lowerCAmelCase = (d(matrix[0][0] ) * d(matrix[1][1] )) - ( d(matrix[0][1] ) * d(matrix[1][0] ) ) # Transpose the cofactor matrix (Adjoint matrix) lowerCAmelCase = array(_UpperCAmelCase ) for i in range(3 ): for j in range(3 ): lowerCAmelCase = cofactor_matrix[j][i] # Inverse of the matrix using the formula (1/determinant) * adjoint matrix lowerCAmelCase = array(_UpperCAmelCase ) for i in range(3 ): for j in range(3 ): inverse_matrix[i][j] /= d(_UpperCAmelCase ) # Calculate the inverse of the matrix return [[float(d(_UpperCAmelCase ) ) or 0.0 for n in row] for row in inverse_matrix] raise ValueError('Please provide a matrix of size 2x2 or 3x3.' )
309
1
"""simple docstring""" from __future__ import annotations from PIL import Image # Define glider example __UpperCamelCase : Union[str, Any] = [ [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], ] # Define blinker example __UpperCamelCase : List[Any] = [[0, 1, 0], [0, 1, 0], [0, 1, 0]] def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[list[int]] ): lowerCAmelCase = [] for i in range(len(_UpperCAmelCase ) ): lowerCAmelCase = [] for j in range(len(cells[i] ) ): # Get the number of live neighbours lowerCAmelCase = 0 if i > 0 and j > 0: neighbour_count += cells[i - 1][j - 1] if i > 0: neighbour_count += cells[i - 1][j] if i > 0 and j < len(cells[i] ) - 1: neighbour_count += cells[i - 1][j + 1] if j > 0: neighbour_count += cells[i][j - 1] if j < len(cells[i] ) - 1: neighbour_count += cells[i][j + 1] if i < len(_UpperCAmelCase ) - 1 and j > 0: neighbour_count += cells[i + 1][j - 1] if i < len(_UpperCAmelCase ) - 1: neighbour_count += cells[i + 1][j] if i < len(_UpperCAmelCase ) - 1 and j < len(cells[i] ) - 1: neighbour_count += cells[i + 1][j + 1] # Rules of the game of life (excerpt from Wikipedia): # 1. Any live cell with two or three live neighbours survives. # 2. Any dead cell with three live neighbours becomes a live cell. # 3. All other live cells die in the next generation. # Similarly, all other dead cells stay dead. lowerCAmelCase = cells[i][j] == 1 if ( (alive and 2 <= neighbour_count <= 3) or not alive and neighbour_count == 3 ): next_generation_row.append(1 ) else: next_generation_row.append(0 ) next_generation.append(_UpperCAmelCase ) return next_generation def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[list[int]] , _UpperCAmelCase : int ): lowerCAmelCase = [] for _ in range(_UpperCAmelCase ): # Create output image lowerCAmelCase = Image.new('RGB' , (len(cells[0] ), len(_UpperCAmelCase )) ) lowerCAmelCase = img.load() # Save cells to image for x in range(len(_UpperCAmelCase ) ): for y in range(len(cells[0] ) ): lowerCAmelCase = 255 - cells[y][x] * 255 lowerCAmelCase = (colour, colour, colour) # Save image images.append(_UpperCAmelCase ) lowerCAmelCase = new_generation(_UpperCAmelCase ) return images if __name__ == "__main__": __UpperCamelCase : List[Any] = generate_images(GLIDER, 16) images[0].save('''out.gif''', save_all=True, append_images=images[1:])
309
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCamelCase : Dict = { '''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''], '''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''], '''processing_mctct''': ['''MCTCTProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Tuple = [ '''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MCTCTForCTC''', '''MCTCTModel''', '''MCTCTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys __UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
309
1
"""simple docstring""" import argparse import math import traceback import dateutil.parser as date_parser import requests def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[Any] ): lowerCAmelCase = {} lowerCAmelCase = job['started_at'] lowerCAmelCase = job['completed_at'] lowerCAmelCase = date_parser.parse(_UpperCAmelCase ) lowerCAmelCase = date_parser.parse(_UpperCAmelCase ) lowerCAmelCase = round((end_datetime - start_datetime).total_seconds() / 60.0 ) lowerCAmelCase = start lowerCAmelCase = end lowerCAmelCase = duration_in_min return job_info def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any]=None ): lowerCAmelCase = None if token is not None: lowerCAmelCase = {'Accept': 'application/vnd.github+json', 'Authorization': F'Bearer {token}'} lowerCAmelCase = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100' lowerCAmelCase = requests.get(_UpperCAmelCase , headers=_UpperCAmelCase ).json() lowerCAmelCase = {} try: job_time.update({job['name']: extract_time_from_single_job(_UpperCAmelCase ) for job in result['jobs']} ) lowerCAmelCase = math.ceil((result['total_count'] - 100) / 100 ) for i in range(_UpperCAmelCase ): lowerCAmelCase = requests.get(url + F'&page={i + 2}' , headers=_UpperCAmelCase ).json() job_time.update({job['name']: extract_time_from_single_job(_UpperCAmelCase ) for job in result['jobs']} ) return job_time except Exception: print(F'Unknown error, could not fetch links:\n{traceback.format_exc()}' ) return {} if __name__ == "__main__": __UpperCamelCase : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''') __UpperCamelCase : Union[str, Any] = parser.parse_args() __UpperCamelCase : Any = get_job_time(args.workflow_run_id) __UpperCamelCase : List[str] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True)) for k, v in job_time.items(): print(f'''{k}: {v['duration']}''')
309
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_distilbert import DistilBertTokenizer __UpperCamelCase : Dict = logging.get_logger(__name__) __UpperCamelCase : str = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} __UpperCamelCase : Optional[int] = { '''vocab_file''': { '''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''', '''distilbert-base-uncased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt''' ), '''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''', '''distilbert-base-cased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt''' ), '''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''', '''distilbert-base-multilingual-cased''': ( '''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''', '''distilbert-base-uncased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json''' ), '''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''', '''distilbert-base-cased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json''' ), '''distilbert-base-german-cased''': ( '''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json''' ), '''distilbert-base-multilingual-cased''': ( '''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json''' ), }, } __UpperCamelCase : str = { '''distilbert-base-uncased''': 512, '''distilbert-base-uncased-distilled-squad''': 512, '''distilbert-base-cased''': 512, '''distilbert-base-cased-distilled-squad''': 512, '''distilbert-base-german-cased''': 512, '''distilbert-base-multilingual-cased''': 512, } __UpperCamelCase : Any = { '''distilbert-base-uncased''': {'''do_lower_case''': True}, '''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True}, '''distilbert-base-cased''': {'''do_lower_case''': False}, '''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False}, '''distilbert-base-german-cased''': {'''do_lower_case''': False}, '''distilbert-base-multilingual-cased''': {'''do_lower_case''': False}, } class a ( a__ ): snake_case__ = VOCAB_FILES_NAMES snake_case__ = PRETRAINED_VOCAB_FILES_MAP snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case__ = PRETRAINED_INIT_CONFIGURATION snake_case__ = ['''input_ids''', '''attention_mask'''] snake_case__ = DistilBertTokenizer def __init__( self , _snake_case=None , _snake_case=None , _snake_case=True , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case=True , _snake_case=None , **_snake_case , ): """simple docstring""" super().__init__( _snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , tokenize_chinese_chars=_snake_case , strip_accents=_snake_case , **_snake_case , ) lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , _snake_case ) != do_lower_case or normalizer_state.get('strip_accents' , _snake_case ) != strip_accents or normalizer_state.get('handle_chinese_chars' , _snake_case ) != tokenize_chinese_chars ): lowerCAmelCase = getattr(_snake_case , normalizer_state.pop('type' ) ) lowerCAmelCase = do_lower_case lowerCAmelCase = strip_accents lowerCAmelCase = tokenize_chinese_chars lowerCAmelCase = normalizer_class(**_snake_case ) lowerCAmelCase = do_lower_case def UpperCamelCase__ ( self , _snake_case , _snake_case=None ): """simple docstring""" lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCamelCase__ ( self , _snake_case , _snake_case = None ): """simple docstring""" lowerCAmelCase = [self.sep_token_id] lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase__ ( self , _snake_case , _snake_case = None ): """simple docstring""" lowerCAmelCase = self._tokenizer.model.save(_snake_case , name=_snake_case ) return tuple(_snake_case )
309
1
"""simple docstring""" import random import unittest import torch from diffusers import IFInpaintingPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class a ( a__ , a__ , unittest.TestCase ): snake_case__ = IFInpaintingPipeline snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''} snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS snake_case__ = PipelineTesterMixin.required_optional_params - {'''latents'''} def UpperCamelCase__ ( self ): """simple docstring""" return self._get_dummy_components() def UpperCamelCase__ ( self , _snake_case , _snake_case=0 ): """simple docstring""" if str(_snake_case ).startswith('mps' ): lowerCAmelCase = torch.manual_seed(_snake_case ) else: lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case ) lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case ) lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case ) lowerCAmelCase = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'mask_image': mask_image, 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def UpperCamelCase__ ( self ): """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def UpperCamelCase__ ( self ): """simple docstring""" self._test_save_load_optional_components() @unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' ) def UpperCamelCase__ ( self ): """simple docstring""" super().test_save_load_floataa(expected_max_diff=1E-1 ) def UpperCamelCase__ ( self ): """simple docstring""" self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def UpperCamelCase__ ( self ): """simple docstring""" self._test_save_load_local() def UpperCamelCase__ ( self ): """simple docstring""" self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
309
"""simple docstring""" from __future__ import annotations def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] | None = None ): lowerCAmelCase = word_bank or [] # create a table lowerCAmelCase = len(_UpperCAmelCase ) + 1 lowerCAmelCase = [] for _ in range(_UpperCAmelCase ): table.append([] ) # seed value lowerCAmelCase = [[]] # because empty string has empty combination # iterate through the indices for i in range(_UpperCAmelCase ): # condition if table[i] != []: for word in word_bank: # slice condition if target[i : i + len(_UpperCAmelCase )] == word: lowerCAmelCase = [ [word, *way] for way in table[i] ] # adds the word to every combination the current position holds # now,push that combination to the table[i+len(word)] table[i + len(_UpperCAmelCase )] += new_combinations # combinations are in reverse order so reverse for better output for combination in table[len(_UpperCAmelCase )]: combination.reverse() return table[len(_UpperCAmelCase )] if __name__ == "__main__": print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa'''])) print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t'''])) print( all_construct( '''hexagonosaurus''', ['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''], ) )
309
1
"""simple docstring""" import gc import unittest from transformers import CTRLConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, ) class a : def __init__( self , _snake_case , _snake_case=14 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , ): """simple docstring""" lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = seq_length lowerCAmelCase = is_training lowerCAmelCase = use_token_type_ids lowerCAmelCase = use_input_mask lowerCAmelCase = use_labels lowerCAmelCase = use_mc_token_ids lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_act lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = type_vocab_size lowerCAmelCase = type_sequence_label_size lowerCAmelCase = initializer_range lowerCAmelCase = num_labels lowerCAmelCase = num_choices lowerCAmelCase = scope lowerCAmelCase = self.vocab_size - 1 def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase = None if self.use_input_mask: lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase = None if self.use_token_type_ids: lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase = None if self.use_mc_token_ids: lowerCAmelCase = ids_tensor([self.batch_size, self.num_choices] , self.seq_length ) lowerCAmelCase = None lowerCAmelCase = None lowerCAmelCase = None if self.use_labels: lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase = self.get_config() lowerCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) def UpperCamelCase__ ( self ): """simple docstring""" return CTRLConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , *_snake_case ): """simple docstring""" lowerCAmelCase = CTRLModel(config=_snake_case ) model.to(_snake_case ) model.eval() model(_snake_case , token_type_ids=_snake_case , head_mask=_snake_case ) model(_snake_case , token_type_ids=_snake_case ) lowerCAmelCase = model(_snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(len(result.past_key_values ) , config.n_layer ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , *_snake_case ): """simple docstring""" lowerCAmelCase = CTRLLMHeadModel(_snake_case ) model.to(_snake_case ) model.eval() lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case , labels=_snake_case ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.prepare_config_and_inputs() ( ( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) , ) = config_and_inputs lowerCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask} return config, inputs_dict def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , *_snake_case ): """simple docstring""" lowerCAmelCase = self.num_labels lowerCAmelCase = CTRLForSequenceClassification(_snake_case ) model.to(_snake_case ) model.eval() lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase = model(_snake_case , token_type_ids=_snake_case , labels=_snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) @require_torch class a ( a__ , a__ , a__ , unittest.TestCase ): snake_case__ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else () snake_case__ = (CTRLLMHeadModel,) if is_torch_available() else () snake_case__ = ( { '''feature-extraction''': CTRLModel, '''text-classification''': CTRLForSequenceClassification, '''text-generation''': CTRLLMHeadModel, '''zero-shot''': CTRLForSequenceClassification, } if is_torch_available() else {} ) snake_case__ = True snake_case__ = False snake_case__ = False def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny # config could not be created. return True return False def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = CTRLModelTester(self ) lowerCAmelCase = ConfigTester(self , config_class=_snake_case , n_embd=37 ) def UpperCamelCase__ ( self ): """simple docstring""" super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() def UpperCamelCase__ ( self ): """simple docstring""" self.config_tester.run_common_tests() def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_ctrl_model(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*_snake_case ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def UpperCamelCase__ ( self ): """simple docstring""" pass @slow def UpperCamelCase__ ( self ): """simple docstring""" for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase = CTRLModel.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) @unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :) def UpperCamelCase__ ( self ): """simple docstring""" pass @require_torch class a ( unittest.TestCase ): def UpperCamelCase__ ( self ): """simple docstring""" super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() @slow def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = CTRLLMHeadModel.from_pretrained('ctrl' ) model.to(_snake_case ) lowerCAmelCase = torch.tensor( [[1_18_59, 0, 16_11, 8]] , dtype=torch.long , device=_snake_case ) # Legal the president is lowerCAmelCase = [ 1_18_59, 0, 16_11, 8, 5, 1_50, 2_64_49, 2, 19, 3_48, 4_69, 3, 25_95, 48, 2_07_40, 24_65_33, 24_65_33, 19, 30, 5, ] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a lowerCAmelCase = model.generate(_snake_case , do_sample=_snake_case ) self.assertListEqual(output_ids[0].tolist() , _snake_case )
309
"""simple docstring""" import re def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ): if len(re.findall('[ATCG]' , _UpperCAmelCase ) ) != len(_UpperCAmelCase ): raise ValueError('Invalid Strand' ) return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) ) if __name__ == "__main__": import doctest doctest.testmod()
309
1
"""simple docstring""" import functools def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[int] , _UpperCAmelCase : list[int] ): # Validation if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not all(isinstance(_UpperCAmelCase , _UpperCAmelCase ) for day in days ): raise ValueError('The parameter days should be a list of integers' ) if len(_UpperCAmelCase ) != 3 or not all(isinstance(_UpperCAmelCase , _UpperCAmelCase ) for cost in costs ): raise ValueError('The parameter costs should be a list of three integers' ) if len(_UpperCAmelCase ) == 0: return 0 if min(_UpperCAmelCase ) <= 0: raise ValueError('All days elements should be greater than 0' ) if max(_UpperCAmelCase ) >= 366: raise ValueError('All days elements should be less than 366' ) lowerCAmelCase = set(_UpperCAmelCase ) @functools.cache def dynamic_programming(_UpperCAmelCase : int ) -> int: if index > 365: return 0 if index not in days_set: return dynamic_programming(index + 1 ) return min( costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , ) return dynamic_programming(1 ) if __name__ == "__main__": import doctest doctest.testmod()
309
"""simple docstring""" import numpy as np import skfuzzy as fuzz if __name__ == "__main__": # Create universe of discourse in Python using linspace () __UpperCamelCase : List[Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False) # Create two fuzzy sets by defining any membership function # (trapmf(), gbellmf(), gaussmf(), etc). __UpperCamelCase : str = [0, 25, 50] __UpperCamelCase : int = [25, 50, 75] __UpperCamelCase : str = fuzz.membership.trimf(X, abca) __UpperCamelCase : Tuple = fuzz.membership.trimf(X, abca) # Compute the different operations using inbuilt functions. __UpperCamelCase : Dict = np.ones(75) __UpperCamelCase : str = np.zeros((75,)) # 1. Union = max(µA(x), µB(x)) __UpperCamelCase : Optional[Any] = fuzz.fuzzy_or(X, young, X, middle_aged)[1] # 2. Intersection = min(µA(x), µB(x)) __UpperCamelCase : Dict = fuzz.fuzzy_and(X, young, X, middle_aged)[1] # 3. Complement (A) = (1- min(µA(x)) __UpperCamelCase : Dict = fuzz.fuzzy_not(young) # 4. Difference (A/B) = min(µA(x),(1- µB(x))) __UpperCamelCase : List[str] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1] # 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))] __UpperCamelCase : List[str] = young + middle_aged - (young * middle_aged) # 6. Algebraic Product = (µA(x) * µB(x)) __UpperCamelCase : Tuple = young * middle_aged # 7. Bounded Sum = min[1,(µA(x), µB(x))] __UpperCamelCase : Union[str, Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1] # 8. Bounded difference = min[0,(µA(x), µB(x))] __UpperCamelCase : Dict = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1] # max-min composition # max-product composition # Plot each set A, set B and each operation result using plot() and subplot(). from matplotlib import pyplot as plt plt.figure() plt.subplot(4, 3, 1) plt.plot(X, young) plt.title('''Young''') plt.grid(True) plt.subplot(4, 3, 2) plt.plot(X, middle_aged) plt.title('''Middle aged''') plt.grid(True) plt.subplot(4, 3, 3) plt.plot(X, union) plt.title('''union''') plt.grid(True) plt.subplot(4, 3, 4) plt.plot(X, intersection) plt.title('''intersection''') plt.grid(True) plt.subplot(4, 3, 5) plt.plot(X, complement_a) plt.title('''complement_a''') plt.grid(True) plt.subplot(4, 3, 6) plt.plot(X, difference) plt.title('''difference a/b''') plt.grid(True) plt.subplot(4, 3, 7) plt.plot(X, alg_sum) plt.title('''alg_sum''') plt.grid(True) plt.subplot(4, 3, 8) plt.plot(X, alg_product) plt.title('''alg_product''') plt.grid(True) plt.subplot(4, 3, 9) plt.plot(X, bdd_sum) plt.title('''bdd_sum''') plt.grid(True) plt.subplot(4, 3, 10) plt.plot(X, bdd_difference) plt.title('''bdd_difference''') plt.grid(True) plt.subplots_adjust(hspace=0.5) plt.show()
309
1
"""simple docstring""" import re def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ): if len(re.findall('[ATCG]' , _UpperCAmelCase ) ) != len(_UpperCAmelCase ): raise ValueError('Invalid Strand' ) return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) ) if __name__ == "__main__": import doctest doctest.testmod()
309
"""simple docstring""" def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[int] , _UpperCAmelCase : str ): lowerCAmelCase = int(_UpperCAmelCase ) # Initialize Result lowerCAmelCase = [] # Traverse through all denomination for denomination in reversed(_UpperCAmelCase ): # Find denominations while int(_UpperCAmelCase ) >= int(_UpperCAmelCase ): total_value -= int(_UpperCAmelCase ) answer.append(_UpperCAmelCase ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": __UpperCamelCase : Any = [] __UpperCamelCase : List[Any] = '''0''' if ( input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower() == "y" ): __UpperCamelCase : Any = int(input('''Enter the number of denominations you want to add: ''').strip()) for i in range(0, n): denominations.append(int(input(f'''Denomination {i}: ''').strip())) __UpperCamelCase : int = input('''Enter the change you want to make in Indian Currency: ''').strip() else: # All denominations of Indian Currency if user does not enter __UpperCamelCase : List[str] = [1, 2, 5, 10, 20, 50, 100, 500, 2000] __UpperCamelCase : Any = input('''Enter the change you want to make: ''').strip() if int(value) == 0 or int(value) < 0: print('''The total value cannot be zero or negative.''') else: print(f'''Following is minimal change for {value}: ''') __UpperCamelCase : List[str] = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=''' ''')
309
1
"""simple docstring""" import numpy as np from transformers import BatchFeature from transformers.testing_utils import require_tf, require_torch from .test_feature_extraction_common import FeatureExtractionSavingTestMixin class a ( a__ ): # to overwrite at feature extractactor specific tests snake_case__ = None snake_case__ = None @property def UpperCamelCase__ ( self ): """simple docstring""" return self.feat_extract_tester.prepare_feat_extract_dict() def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(_snake_case , 'feature_size' ) ) self.assertTrue(hasattr(_snake_case , 'sampling_rate' ) ) self.assertTrue(hasattr(_snake_case , 'padding_value' ) ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common() lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict ) lowerCAmelCase = feat_extract.model_input_names[0] lowerCAmelCase = BatchFeature({input_name: speech_inputs} ) self.assertTrue(all(len(_snake_case ) == len(_snake_case ) for x, y in zip(_snake_case , processed_features[input_name] ) ) ) lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_snake_case ) lowerCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type='np' ) lowerCAmelCase = processed_features[input_name] if len(batch_features_input.shape ) < 3: lowerCAmelCase = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) @require_torch def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_snake_case ) lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict ) lowerCAmelCase = feat_extract.model_input_names[0] lowerCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type='pt' ) lowerCAmelCase = processed_features[input_name] if len(batch_features_input.shape ) < 3: lowerCAmelCase = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) @require_tf def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_snake_case ) lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict ) lowerCAmelCase = feat_extract.model_input_names[0] lowerCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type='tf' ) lowerCAmelCase = processed_features[input_name] if len(batch_features_input.shape ) < 3: lowerCAmelCase = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) def UpperCamelCase__ ( self , _snake_case=False ): """simple docstring""" def _inputs_have_equal_length(_snake_case ): lowerCAmelCase = len(input[0] ) for input_slice in input[1:]: if len(_snake_case ) != length: return False return True def _inputs_are_equal(_snake_case , _snake_case ): if len(_snake_case ) != len(_snake_case ): return False for input_slice_a, input_slice_a in zip(_snake_case , _snake_case ): if not np.allclose(np.asarray(_snake_case ) , np.asarray(_snake_case ) , atol=1E-3 ): return False return True lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict ) lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common(numpify=_snake_case ) lowerCAmelCase = feat_extract.model_input_names[0] lowerCAmelCase = BatchFeature({input_name: speech_inputs} ) lowerCAmelCase = self.feat_extract_tester.seq_length_diff lowerCAmelCase = self.feat_extract_tester.max_seq_length + pad_diff lowerCAmelCase = self.feat_extract_tester.min_seq_length lowerCAmelCase = self.feat_extract_tester.batch_size lowerCAmelCase = self.feat_extract_tester.feature_size # test padding for List[int] + numpy lowerCAmelCase = feat_extract.pad(_snake_case , padding=_snake_case ) lowerCAmelCase = input_a[input_name] lowerCAmelCase = feat_extract.pad(_snake_case , padding='longest' ) lowerCAmelCase = input_a[input_name] lowerCAmelCase = feat_extract.pad(_snake_case , padding='max_length' , max_length=len(speech_inputs[-1] ) ) lowerCAmelCase = input_a[input_name] lowerCAmelCase = feat_extract.pad(_snake_case , padding='longest' , return_tensors='np' ) lowerCAmelCase = input_a[input_name] # max_length parameter has to be provided when setting `padding="max_length"` with self.assertRaises(_snake_case ): feat_extract.pad(_snake_case , padding='max_length' )[input_name] lowerCAmelCase = feat_extract.pad( _snake_case , padding='max_length' , max_length=_snake_case , return_tensors='np' ) lowerCAmelCase = input_a[input_name] self.assertFalse(_inputs_have_equal_length(_snake_case ) ) self.assertTrue(_inputs_have_equal_length(_snake_case ) ) self.assertTrue(_inputs_have_equal_length(_snake_case ) ) self.assertTrue(_inputs_are_equal(_snake_case , _snake_case ) ) self.assertTrue(len(input_a[0] ) == pad_min_length ) self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff ) self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) ) self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) ) if feature_size > 1: self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size ) # test padding for `pad_to_multiple_of` for List[int] + numpy lowerCAmelCase = feat_extract.pad(_snake_case , pad_to_multiple_of=10 ) lowerCAmelCase = input_a[input_name] lowerCAmelCase = feat_extract.pad(_snake_case , padding='longest' , pad_to_multiple_of=10 ) lowerCAmelCase = input_a[input_name] lowerCAmelCase = feat_extract.pad( _snake_case , padding='max_length' , pad_to_multiple_of=10 , max_length=_snake_case ) lowerCAmelCase = input_a[input_name] lowerCAmelCase = feat_extract.pad( _snake_case , padding='max_length' , pad_to_multiple_of=10 , max_length=_snake_case , return_tensors='np' , ) lowerCAmelCase = input_a[input_name] self.assertTrue(all(len(_snake_case ) % 10 == 0 for x in input_a ) ) self.assertTrue(_inputs_are_equal(_snake_case , _snake_case ) ) lowerCAmelCase = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10 self.assertTrue(all(len(_snake_case ) == expected_mult_pad_length for x in input_a ) ) self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) ) if feature_size > 1: self.assertTrue(input_a.shape[2] == feature_size ) # Check padding value is correct lowerCAmelCase = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum() self.assertTrue( abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 ) self.assertTrue( abs( np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) ) < 1E-3 ) self.assertTrue( abs( np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) ) < 1E-3 ) self.assertTrue( abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 ) self.assertTrue( abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) ) < 1E-3 ) def UpperCamelCase__ ( self , _snake_case=False ): """simple docstring""" def _inputs_have_equal_length(_snake_case ): lowerCAmelCase = len(input[0] ) for input_slice in input[1:]: if len(_snake_case ) != length: return False return True def _inputs_are_equal(_snake_case , _snake_case ): if len(_snake_case ) != len(_snake_case ): return False for input_slice_a, input_slice_a in zip(_snake_case , _snake_case ): if not np.allclose(np.asarray(_snake_case ) , np.asarray(_snake_case ) , atol=1E-3 ): return False return True lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict ) lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common(numpify=_snake_case ) lowerCAmelCase = feat_extract.model_input_names[0] lowerCAmelCase = BatchFeature({input_name: speech_inputs} ) # truncate to smallest lowerCAmelCase = feat_extract.pad( _snake_case , padding='max_length' , max_length=len(speech_inputs[0] ) , truncation=_snake_case ) lowerCAmelCase = input_a[input_name] lowerCAmelCase = feat_extract.pad(_snake_case , padding='max_length' , max_length=len(speech_inputs[0] ) ) lowerCAmelCase = input_a[input_name] self.assertTrue(_inputs_have_equal_length(_snake_case ) ) self.assertFalse(_inputs_have_equal_length(_snake_case ) ) # truncate to smallest with np lowerCAmelCase = feat_extract.pad( _snake_case , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' , truncation=_snake_case , ) lowerCAmelCase = input_a[input_name] lowerCAmelCase = feat_extract.pad( _snake_case , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' ) lowerCAmelCase = input_a[input_name] self.assertTrue(_inputs_have_equal_length(_snake_case ) ) self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) ) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(_snake_case ) ) # truncate to middle lowerCAmelCase = feat_extract.pad( _snake_case , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=_snake_case , return_tensors='np' , ) lowerCAmelCase = input_a[input_name] lowerCAmelCase = feat_extract.pad( _snake_case , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=_snake_case ) lowerCAmelCase = input_a[input_name] lowerCAmelCase = feat_extract.pad( _snake_case , padding='max_length' , max_length=len(speech_inputs[1] ) , return_tensors='np' ) lowerCAmelCase = input_a[input_name] self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) ) self.assertTrue(_inputs_have_equal_length(_snake_case ) ) self.assertTrue(_inputs_have_equal_length(_snake_case ) ) self.assertTrue(_inputs_are_equal(_snake_case , _snake_case ) ) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(_snake_case ) ) self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) ) # padding has to be max_length when setting `truncation=True` with self.assertRaises(_snake_case ): feat_extract.pad(_snake_case , truncation=_snake_case )[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(_snake_case ): feat_extract.pad(_snake_case , padding='longest' , truncation=_snake_case )[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(_snake_case ): feat_extract.pad(_snake_case , padding='longest' , truncation=_snake_case )[input_name] # max_length parameter has to be provided when setting `truncation=True` and padding="max_length" with self.assertRaises(_snake_case ): feat_extract.pad(_snake_case , padding='max_length' , truncation=_snake_case )[input_name] # test truncation for `pad_to_multiple_of` for List[int] + numpy lowerCAmelCase = 12 lowerCAmelCase = feat_extract.pad( _snake_case , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_snake_case , truncation=_snake_case , ) lowerCAmelCase = input_a[input_name] lowerCAmelCase = feat_extract.pad( _snake_case , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_snake_case , ) lowerCAmelCase = input_a[input_name] # retrieve expected_length as multiple of pad_to_multiple_of lowerCAmelCase = len(speech_inputs[0] ) if expected_length % pad_to_multiple_of != 0: lowerCAmelCase = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of self.assertTrue(len(input_a[0] ) == expected_length ) self.assertTrue(_inputs_have_equal_length(_snake_case ) ) self.assertFalse(_inputs_have_equal_length(_snake_case ) ) def UpperCamelCase__ ( self ): """simple docstring""" self._check_padding(numpify=_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" self._check_padding(numpify=_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" self._check_truncation(numpify=_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" self._check_truncation(numpify=_snake_case ) @require_torch def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict ) lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common() lowerCAmelCase = feat_extract.model_input_names[0] lowerCAmelCase = BatchFeature({input_name: speech_inputs} ) lowerCAmelCase = feat_extract.pad(_snake_case , padding='longest' , return_tensors='np' )[input_name] lowerCAmelCase = feat_extract.pad(_snake_case , padding='longest' , return_tensors='pt' )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 ) @require_tf def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict ) lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common() lowerCAmelCase = feat_extract.model_input_names[0] lowerCAmelCase = BatchFeature({input_name: speech_inputs} ) lowerCAmelCase = feat_extract.pad(_snake_case , padding='longest' , return_tensors='np' )[input_name] lowerCAmelCase = feat_extract.pad(_snake_case , padding='longest' , return_tensors='tf' )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.feat_extract_dict lowerCAmelCase = True lowerCAmelCase = self.feature_extraction_class(**_snake_case ) lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common() lowerCAmelCase = [len(_snake_case ) for x in speech_inputs] lowerCAmelCase = feat_extract.model_input_names[0] lowerCAmelCase = BatchFeature({input_name: speech_inputs} ) lowerCAmelCase = feat_extract.pad(_snake_case , padding='longest' , return_tensors='np' ) self.assertIn('attention_mask' , _snake_case ) self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) ) self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.feat_extract_dict lowerCAmelCase = True lowerCAmelCase = self.feature_extraction_class(**_snake_case ) lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common() lowerCAmelCase = [len(_snake_case ) for x in speech_inputs] lowerCAmelCase = feat_extract.model_input_names[0] lowerCAmelCase = BatchFeature({input_name: speech_inputs} ) lowerCAmelCase = min(_snake_case ) lowerCAmelCase = feat_extract.pad( _snake_case , padding='max_length' , max_length=_snake_case , truncation=_snake_case , return_tensors='np' ) self.assertIn('attention_mask' , _snake_case ) self.assertListEqual( list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] ) self.assertListEqual( processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
309
"""simple docstring""" from __future__ import annotations import unittest from transformers import EsmConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers.models.esm.modeling_tf_esm import ( TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, TFEsmModel, ) class a : def __init__( self , _snake_case , ): """simple docstring""" lowerCAmelCase = parent lowerCAmelCase = 13 lowerCAmelCase = 7 lowerCAmelCase = True lowerCAmelCase = True lowerCAmelCase = True lowerCAmelCase = 99 lowerCAmelCase = 32 lowerCAmelCase = 2 lowerCAmelCase = 4 lowerCAmelCase = 37 lowerCAmelCase = 'gelu' lowerCAmelCase = 0.1 lowerCAmelCase = 0.1 lowerCAmelCase = 5_12 lowerCAmelCase = 16 lowerCAmelCase = 2 lowerCAmelCase = 0.02 lowerCAmelCase = 3 lowerCAmelCase = 4 lowerCAmelCase = None def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase = None if self.use_input_mask: lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase = None lowerCAmelCase = None lowerCAmelCase = None if self.use_labels: lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase = EsmConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase__ ( self ): """simple docstring""" ( ( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) , ) = self.prepare_config_and_inputs() lowerCAmelCase = True lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = TFEsmModel(config=_snake_case ) lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask} lowerCAmelCase = model(_snake_case ) lowerCAmelCase = [input_ids, input_mask] lowerCAmelCase = model(_snake_case ) lowerCAmelCase = model(_snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ): """simple docstring""" lowerCAmelCase = True lowerCAmelCase = TFEsmModel(config=_snake_case ) lowerCAmelCase = { 'input_ids': input_ids, 'attention_mask': input_mask, 'encoder_hidden_states': encoder_hidden_states, 'encoder_attention_mask': encoder_attention_mask, } lowerCAmelCase = model(_snake_case ) lowerCAmelCase = [input_ids, input_mask] lowerCAmelCase = model(_snake_case , encoder_hidden_states=_snake_case ) # Also check the case where encoder outputs are not passed lowerCAmelCase = model(_snake_case , attention_mask=_snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = TFEsmForMaskedLM(config=_snake_case ) lowerCAmelCase = model([input_ids, input_mask] ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = self.num_labels lowerCAmelCase = TFEsmForTokenClassification(config=_snake_case ) lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask} lowerCAmelCase = model(_snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.prepare_config_and_inputs() ( ( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) , ) = config_and_inputs lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_tf class a ( a__ , a__ , unittest.TestCase ): snake_case__ = ( ( TFEsmModel, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, ) if is_tf_available() else () ) snake_case__ = ( { '''feature-extraction''': TFEsmModel, '''fill-mask''': TFEsmForMaskedLM, '''text-classification''': TFEsmForSequenceClassification, '''token-classification''': TFEsmForTokenClassification, '''zero-shot''': TFEsmForSequenceClassification, } if is_tf_available() else {} ) snake_case__ = False snake_case__ = False def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = TFEsmModelTester(self ) lowerCAmelCase = ConfigTester(self , config_class=_snake_case , hidden_size=37 ) def UpperCamelCase__ ( self ): """simple docstring""" self.config_tester.run_common_tests() def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_snake_case ) @slow def UpperCamelCase__ ( self ): """simple docstring""" for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase = TFEsmModel.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) @unittest.skip('Protein models do not support embedding resizing.' ) def UpperCamelCase__ ( self ): """simple docstring""" pass @unittest.skip('Protein models do not support embedding resizing.' ) def UpperCamelCase__ ( self ): """simple docstring""" pass def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase = model_class(_snake_case ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class is TFEsmForMaskedLM: # Output embedding test differs from the main test because they're a matrix, not a layer lowerCAmelCase = model.get_bias() assert isinstance(_snake_case , _snake_case ) for k, v in name.items(): assert isinstance(_snake_case , tf.Variable ) else: lowerCAmelCase = model.get_output_embeddings() assert x is None lowerCAmelCase = model.get_bias() assert name is None @require_tf class a ( unittest.TestCase ): @slow def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' ) lowerCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] ) lowerCAmelCase = model(_snake_case )[0] lowerCAmelCase = [1, 6, 33] self.assertEqual(list(output.numpy().shape ) , _snake_case ) # compare the actual values for a slice. lowerCAmelCase = tf.constant( [ [ [8.921_518, -10.589_814, -6.4_671_307], [-6.3_967_156, -13.911_377, -1.1_211_915], [-7.781_247, -13.951_557, -3.740_592], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) ) @slow def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' ) lowerCAmelCase = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) lowerCAmelCase = model(_snake_case )[0] # compare the actual values for a slice. lowerCAmelCase = tf.constant( [ [ [0.14_443_092, 0.54_125_327, 0.3_247_739], [0.30_340_484, 0.00_526_676, 0.31_077_722], [0.32_278_043, -0.24_987_096, 0.3_414_628], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
309
1
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCamelCase : Dict = { '''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''], '''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''], '''processing_mctct''': ['''MCTCTProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Tuple = [ '''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MCTCTForCTC''', '''MCTCTModel''', '''MCTCTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys __UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
309
"""simple docstring""" import io import os import unicodedata from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) __UpperCamelCase : Dict = '''▁''' __UpperCamelCase : Optional[int] = {'''vocab_file''': '''vocab.txt''', '''sentencepiece_model_ckpt''': '''sentencepiece.bpe.model'''} __UpperCamelCase : str = { '''sentencepiece_model_file''': '''sentencepiece.bpe.model''', '''vocab_file''': '''vocab.txt''', } __UpperCamelCase : Tuple = { '''vocab_file''': { '''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''', '''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''', }, '''sentencepiece_model_file''': { '''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''', '''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''', }, } __UpperCamelCase : Optional[Any] = { '''ernie-m-base''': 514, '''ernie-m-large''': 514, } __UpperCamelCase : str = { '''ernie-m-base''': {'''do_lower_case''': False}, '''ernie-m-large''': {'''do_lower_case''': False}, } class a ( a__ ): snake_case__ = ["input_ids"] snake_case__ = VOCAB_FILES_NAMES snake_case__ = PRETRAINED_INIT_CONFIGURATION snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case__ = PRETRAINED_VOCAB_FILES_MAP snake_case__ = RESOURCE_FILES_NAMES def __init__( self , _snake_case , _snake_case=None , _snake_case=False , _snake_case="utf8" , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case = None , **_snake_case , ): """simple docstring""" lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , vocab_file=_snake_case , encoding=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , ) lowerCAmelCase = do_lower_case lowerCAmelCase = sentencepiece_model_ckpt lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_snake_case ) # to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning if vocab_file is not None: lowerCAmelCase = self.load_vocab(filepath=_snake_case ) else: lowerCAmelCase = {self.sp_model.id_to_piece(_snake_case ): id for id in range(self.sp_model.get_piece_size() )} lowerCAmelCase = {v: k for k, v in self.vocab.items()} def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" if text is None: return None lowerCAmelCase = self.tokenize(_snake_case ) lowerCAmelCase ,lowerCAmelCase = '', [] for i, ch in enumerate(_snake_case ): if ch in self.SP_CHAR_MAPPING: lowerCAmelCase = self.SP_CHAR_MAPPING.get(_snake_case ) else: lowerCAmelCase = unicodedata.normalize('NFKC' , _snake_case ) if self.is_whitespace(_snake_case ): continue normalized_text += ch char_mapping.extend([i] * len(_snake_case ) ) lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = normalized_text, [], 0 if self.do_lower_case: lowerCAmelCase = text.lower() for token in split_tokens: if token[:1] == "▁": lowerCAmelCase = token[1:] lowerCAmelCase = text[offset:].index(_snake_case ) + offset lowerCAmelCase = start + len(_snake_case ) token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) ) lowerCAmelCase = end return token_mapping @property def UpperCamelCase__ ( self ): """simple docstring""" return len(self.vocab ) def UpperCamelCase__ ( self ): """simple docstring""" return dict(self.vocab , **self.added_tokens_encoder ) def __getstate__( self ): """simple docstring""" lowerCAmelCase = self.__dict__.copy() lowerCAmelCase = None return state def __setstate__( self , _snake_case ): """simple docstring""" lowerCAmelCase = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): lowerCAmelCase = {} lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.sentencepiece_model_ckpt ) def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" return "".join((self.SP_CHAR_MAPPING.get(_snake_case , _snake_case ) for c in text) ) def UpperCamelCase__ ( self , _snake_case , _snake_case=False , _snake_case=64 , _snake_case=0.1 ): """simple docstring""" if self.sp_model_kwargs.get('enable_sampling' ) is True: lowerCAmelCase = True if self.sp_model_kwargs.get('alpha' ) is not None: lowerCAmelCase = self.sp_model_kwargs.get('alpha' ) if self.sp_model_kwargs.get('nbest_size' ) is not None: lowerCAmelCase = self.sp_model_kwargs.get('nbest_size' ) if not enable_sampling: lowerCAmelCase = self.sp_model.EncodeAsPieces(_snake_case ) else: lowerCAmelCase = self.sp_model.SampleEncodeAsPieces(_snake_case , _snake_case , _snake_case ) lowerCAmelCase = [] for pi, piece in enumerate(_snake_case ): if piece == SPIECE_UNDERLINE: if not pieces[pi + 1].startswith(_snake_case ) and pi != 0: new_pieces.append(_snake_case ) continue else: continue lowerCAmelCase = 0 for i, chunk in enumerate(_snake_case ): if chunk == SPIECE_UNDERLINE: continue if self.is_ch_char(_snake_case ) or self.is_punct(_snake_case ): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) new_pieces.append(_snake_case ) lowerCAmelCase = i + 1 elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) lowerCAmelCase = i elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) lowerCAmelCase = i if len(_snake_case ) > lst_i: new_pieces.append(piece[lst_i:] ) return new_pieces def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" lowerCAmelCase = ''.join(_snake_case ).replace(_snake_case , ' ' ).strip() return out_string def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" lowerCAmelCase = self.convert_ids_to_tokens(_snake_case ) lowerCAmelCase = ''.join(_snake_case ).replace(_snake_case , ' ' ).strip() return out_string def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" return self.vocab.get(_snake_case , self.vocab.get(self.unk_token ) ) def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" return self.reverse_vocab.get(_snake_case , self.unk_token ) def UpperCamelCase__ ( self , _snake_case , _snake_case=None ): """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCAmelCase = [self.cls_token_id] lowerCAmelCase = [self.sep_token_id] return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep def UpperCamelCase__ ( self , _snake_case , _snake_case=None ): """simple docstring""" if offset_mapping_a is None: return [(0, 0)] + offset_mapping_a + [(0, 0)] return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)] def UpperCamelCase__ ( self , _snake_case , _snake_case=None , _snake_case=False ): """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( 'You should not supply a second sequence if the provided sequence of ' 'ids is already formatted with special tokens for the model.' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case )) + [1] return [1] + ([0] * len(_snake_case )) + [1] def UpperCamelCase__ ( self , _snake_case , _snake_case = None ): """simple docstring""" if token_ids_a is None: # [CLS] X [SEP] return (len(_snake_case ) + 2) * [0] # [CLS] A [SEP] [SEP] B [SEP] return [0] * (len(_snake_case ) + 1) + [1] * (len(_snake_case ) + 3) def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" if "\u4e00" <= char <= "\u9fff": return True return False def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" if ("a" <= char <= "z") or ("A" <= char <= "Z"): return True return False def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" if char in ",;:.?!~,;:。?!《》【】": return True return False def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" if char == " " or char == "\t" or char == "\n" or char == "\r": return True if len(_snake_case ) == 1: lowerCAmelCase = unicodedata.category(_snake_case ) if cat == "Zs": return True return False def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" lowerCAmelCase = {} with io.open(_snake_case , 'r' , encoding='utf-8' ) as f: for index, line in enumerate(_snake_case ): lowerCAmelCase = line.rstrip('\n' ) lowerCAmelCase = int(_snake_case ) return token_to_idx def UpperCamelCase__ ( self , _snake_case , _snake_case = None ): """simple docstring""" lowerCAmelCase = 0 if os.path.isdir(_snake_case ): lowerCAmelCase = os.path.join( _snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) else: lowerCAmelCase = (filename_prefix + '-' if filename_prefix else '') + save_directory with open(_snake_case , 'w' , encoding='utf-8' ) as writer: for token, token_index in sorted(self.vocab.items() , key=lambda _snake_case : kv[1] ): if index != token_index: logger.warning( F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.' ' Please check that the vocabulary is not corrupted!' ) lowerCAmelCase = token_index writer.write(token + '\n' ) index += 1 lowerCAmelCase = os.path.join(_snake_case , 'sentencepiece.bpe.model' ) with open(_snake_case , 'wb' ) as fi: lowerCAmelCase = self.sp_model.serialized_model_proto() fi.write(_snake_case ) return (vocab_file,)
309
1
"""simple docstring""" import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class a ( unittest.TestCase ): @property def UpperCamelCase__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCAmelCase = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , ) return model @property def UpperCamelCase__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCAmelCase = VQModel( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , ) return model @property def UpperCamelCase__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCAmelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) return CLIPTextModel(_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.dummy_uncond_unet lowerCAmelCase = DDIMScheduler() lowerCAmelCase = self.dummy_vq_model lowerCAmelCase = LDMPipeline(unet=_snake_case , vqvae=_snake_case , scheduler=_snake_case ) ldm.to(_snake_case ) ldm.set_progress_bar_config(disable=_snake_case ) lowerCAmelCase = torch.manual_seed(0 ) lowerCAmelCase = ldm(generator=_snake_case , num_inference_steps=2 , output_type='numpy' ).images lowerCAmelCase = torch.manual_seed(0 ) lowerCAmelCase = ldm(generator=_snake_case , num_inference_steps=2 , output_type='numpy' , return_dict=_snake_case )[0] lowerCAmelCase = image[0, -3:, -3:, -1] lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCAmelCase = np.array([0.8_512, 0.818, 0.6_411, 0.6_808, 0.4_465, 0.5_618, 0.46, 0.6_231, 0.5_172] ) lowerCAmelCase = 1E-2 if torch_device != 'mps' else 3E-2 assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance @slow @require_torch class a ( unittest.TestCase ): def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' ) ldm.to(_snake_case ) ldm.set_progress_bar_config(disable=_snake_case ) lowerCAmelCase = torch.manual_seed(0 ) lowerCAmelCase = ldm(generator=_snake_case , num_inference_steps=5 , output_type='numpy' ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 2_56, 2_56, 3) lowerCAmelCase = np.array([0.4_399, 0.44_975, 0.46_825, 0.474, 0.4_359, 0.4_581, 0.45_095, 0.4_341, 0.4_447] ) lowerCAmelCase = 1E-2 if torch_device != 'mps' else 3E-2 assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
309
"""simple docstring""" import argparse import os import torch from transformers.utils import WEIGHTS_NAME __UpperCamelCase : int = ['''small''', '''medium''', '''large'''] __UpperCamelCase : str = '''lm_head.decoder.weight''' __UpperCamelCase : Dict = '''lm_head.weight''' def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ): lowerCAmelCase = torch.load(_UpperCAmelCase ) lowerCAmelCase = d.pop(_UpperCAmelCase ) os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase ) torch.save(_UpperCAmelCase , os.path.join(_UpperCAmelCase , _UpperCAmelCase ) ) if __name__ == "__main__": __UpperCamelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument('''--dialogpt_path''', default='''.''', type=str) __UpperCamelCase : Optional[int] = parser.parse_args() for MODEL in DIALOGPT_MODELS: __UpperCamelCase : Dict = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''') __UpperCamelCase : str = f'''./DialoGPT-{MODEL}''' convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
309
1
"""simple docstring""" from __future__ import annotations def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] | None = None ): lowerCAmelCase = word_bank or [] # create a table lowerCAmelCase = len(_UpperCAmelCase ) + 1 lowerCAmelCase = [] for _ in range(_UpperCAmelCase ): table.append([] ) # seed value lowerCAmelCase = [[]] # because empty string has empty combination # iterate through the indices for i in range(_UpperCAmelCase ): # condition if table[i] != []: for word in word_bank: # slice condition if target[i : i + len(_UpperCAmelCase )] == word: lowerCAmelCase = [ [word, *way] for way in table[i] ] # adds the word to every combination the current position holds # now,push that combination to the table[i+len(word)] table[i + len(_UpperCAmelCase )] += new_combinations # combinations are in reverse order so reverse for better output for combination in table[len(_UpperCAmelCase )]: combination.reverse() return table[len(_UpperCAmelCase )] if __name__ == "__main__": print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa'''])) print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t'''])) print( all_construct( '''hexagonosaurus''', ['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''], ) )
309
"""simple docstring""" __UpperCamelCase : Dict = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []} __UpperCamelCase : str = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]} def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] , _UpperCAmelCase : int , _UpperCAmelCase : list[bool] ): lowerCAmelCase = True lowerCAmelCase = [] for neighbour in graph[vert]: if not visited[neighbour]: order += topology_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) order.append(_UpperCAmelCase ) return order def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] , _UpperCAmelCase : int , _UpperCAmelCase : list[bool] ): lowerCAmelCase = True lowerCAmelCase = [vert] for neighbour in reversed_graph[vert]: if not visited[neighbour]: component += find_components(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) return component def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] ): lowerCAmelCase = len(_UpperCAmelCase ) * [False] lowerCAmelCase = {vert: [] for vert in range(len(_UpperCAmelCase ) )} for vert, neighbours in graph.items(): for neighbour in neighbours: reversed_graph[neighbour].append(_UpperCAmelCase ) lowerCAmelCase = [] for i, was_visited in enumerate(_UpperCAmelCase ): if not was_visited: order += topology_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) lowerCAmelCase = [] lowerCAmelCase = len(_UpperCAmelCase ) * [False] for i in range(len(_UpperCAmelCase ) ): lowerCAmelCase = order[len(_UpperCAmelCase ) - i - 1] if not visited[vert]: lowerCAmelCase = find_components(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) components_list.append(_UpperCAmelCase ) return components_list
309
1
"""simple docstring""" import logging import os import threading import time try: import warnings except ImportError: __UpperCamelCase : Dict = None try: import msvcrt except ImportError: __UpperCamelCase : Optional[int] = None try: import fcntl except ImportError: __UpperCamelCase : Dict = None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: __UpperCamelCase : str = OSError # Data # ------------------------------------------------ __UpperCamelCase : List[str] = [ '''Timeout''', '''BaseFileLock''', '''WindowsFileLock''', '''UnixFileLock''', '''SoftFileLock''', '''FileLock''', ] __UpperCamelCase : Union[str, Any] = '''3.0.12''' __UpperCamelCase : List[Any] = None def _SCREAMING_SNAKE_CASE (): global _logger lowerCAmelCase = _logger or logging.getLogger(__name__ ) return _logger class a ( a__ ): def __init__( self , _snake_case ): """simple docstring""" lowerCAmelCase = lock_file return None def __str__( self ): """simple docstring""" lowerCAmelCase = F'The file lock \'{self.lock_file}\' could not be acquired.' return temp class a : def __init__( self , _snake_case ): """simple docstring""" lowerCAmelCase = lock return None def __enter__( self ): """simple docstring""" return self.lock def __exit__( self , _snake_case , _snake_case , _snake_case ): """simple docstring""" self.lock.release() return None class a : def __init__( self , _snake_case , _snake_case=-1 , _snake_case=None ): """simple docstring""" lowerCAmelCase = max_filename_length if max_filename_length is not None else 2_55 # Hash the filename if it's too long lowerCAmelCase = self.hash_filename_if_too_long(_snake_case , _snake_case ) # The path to the lock file. lowerCAmelCase = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. lowerCAmelCase = None # The default timeout value. lowerCAmelCase = timeout # We use this lock primarily for the lock counter. lowerCAmelCase = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. lowerCAmelCase = 0 return None @property def UpperCamelCase__ ( self ): """simple docstring""" return self._lock_file @property def UpperCamelCase__ ( self ): """simple docstring""" return self._timeout @timeout.setter def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" lowerCAmelCase = float(_snake_case ) return None def UpperCamelCase__ ( self ): """simple docstring""" raise NotImplementedError() def UpperCamelCase__ ( self ): """simple docstring""" raise NotImplementedError() @property def UpperCamelCase__ ( self ): """simple docstring""" return self._lock_file_fd is not None def UpperCamelCase__ ( self , _snake_case=None , _snake_case=0.05 ): """simple docstring""" if timeout is None: lowerCAmelCase = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 lowerCAmelCase = id(self ) lowerCAmelCase = self._lock_file lowerCAmelCase = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(F'Attempting to acquire lock {lock_id} on {lock_filename}' ) self._acquire() if self.is_locked: logger().debug(F'Lock {lock_id} acquired on {lock_filename}' ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(F'Timeout on acquiring lock {lock_id} on {lock_filename}' ) raise Timeout(self._lock_file ) else: logger().debug( F'Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...' ) time.sleep(_snake_case ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: lowerCAmelCase = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def UpperCamelCase__ ( self , _snake_case=False ): """simple docstring""" with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: lowerCAmelCase = id(self ) lowerCAmelCase = self._lock_file logger().debug(F'Attempting to release lock {lock_id} on {lock_filename}' ) self._release() lowerCAmelCase = 0 logger().debug(F'Lock {lock_id} released on {lock_filename}' ) return None def __enter__( self ): """simple docstring""" self.acquire() return self def __exit__( self , _snake_case , _snake_case , _snake_case ): """simple docstring""" self.release() return None def __del__( self ): """simple docstring""" self.release(force=_snake_case ) return None def UpperCamelCase__ ( self , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = os.path.basename(_snake_case ) if len(_snake_case ) > max_length and max_length > 0: lowerCAmelCase = os.path.dirname(_snake_case ) lowerCAmelCase = str(hash(_snake_case ) ) lowerCAmelCase = filename[: max_length - len(_snake_case ) - 8] + '...' + hashed_filename + '.lock' return os.path.join(_snake_case , _snake_case ) else: return path class a ( a__ ): def __init__( self , _snake_case , _snake_case=-1 , _snake_case=None ): """simple docstring""" from .file_utils import relative_to_absolute_path super().__init__(_snake_case , timeout=_snake_case , max_filename_length=_snake_case ) lowerCAmelCase = '\\\\?\\' + relative_to_absolute_path(self.lock_file ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: lowerCAmelCase = os.open(self._lock_file , _snake_case ) except OSError: pass else: try: msvcrt.locking(_snake_case , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(_snake_case ) else: lowerCAmelCase = fd return None def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self._lock_file_fd lowerCAmelCase = None msvcrt.locking(_snake_case , msvcrt.LK_UNLCK , 1 ) os.close(_snake_case ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class a ( a__ ): def __init__( self , _snake_case , _snake_case=-1 , _snake_case=None ): """simple docstring""" lowerCAmelCase = os.statvfs(os.path.dirname(_snake_case ) ).f_namemax super().__init__(_snake_case , timeout=_snake_case , max_filename_length=_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC lowerCAmelCase = os.open(self._lock_file , _snake_case ) try: fcntl.flock(_snake_case , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(_snake_case ) else: lowerCAmelCase = fd return None def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self._lock_file_fd lowerCAmelCase = None fcntl.flock(_snake_case , fcntl.LOCK_UN ) os.close(_snake_case ) return None class a ( a__ ): def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: lowerCAmelCase = os.open(self._lock_file , _snake_case ) except OSError: pass else: lowerCAmelCase = fd return None def UpperCamelCase__ ( self ): """simple docstring""" os.close(self._lock_file_fd ) lowerCAmelCase = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None __UpperCamelCase : Any = None if msvcrt: __UpperCamelCase : List[Any] = WindowsFileLock elif fcntl: __UpperCamelCase : int = UnixFileLock else: __UpperCamelCase : Tuple = SoftFileLock if warnings is not None: warnings.warn('''only soft file lock is available''')
309
"""simple docstring""" import os import time import warnings from dataclasses import dataclass, field from enum import Enum from typing import List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import logging from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors from ..processors.utils import InputFeatures __UpperCamelCase : Optional[Any] = logging.get_logger(__name__) @dataclass class a : snake_case__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(glue_processors.keys() )} ) snake_case__ = field( metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} ) snake_case__ = field( default=1_2_8 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) snake_case__ = field( default=a__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.task_name.lower() class a ( a__ ): snake_case__ = '''train''' snake_case__ = '''dev''' snake_case__ = '''test''' class a ( a__ ): snake_case__ = 42 snake_case__ = 42 snake_case__ = 42 def __init__( self , _snake_case , _snake_case , _snake_case = None , _snake_case = Split.train , _snake_case = None , ): """simple docstring""" warnings.warn( 'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets ' 'library. You can have a look at this example script for pointers: ' 'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , _snake_case , ) lowerCAmelCase = args lowerCAmelCase = glue_processors[args.task_name]() lowerCAmelCase = glue_output_modes[args.task_name] if isinstance(_snake_case , _snake_case ): try: lowerCAmelCase = Split[mode] except KeyError: raise KeyError('mode is not a valid split name' ) # Load data features from cache or dataset file lowerCAmelCase = os.path.join( cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , ) lowerCAmelCase = self.processor.get_labels() if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in ( "RobertaTokenizer", "RobertaTokenizerFast", "XLMRobertaTokenizer", "BartTokenizer", "BartTokenizerFast", ): # HACK(label indices are swapped in RoBERTa pretrained model) lowerCAmelCase ,lowerCAmelCase = label_list[2], label_list[1] lowerCAmelCase = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. lowerCAmelCase = cached_features_file + '.lock' with FileLock(_snake_case ): if os.path.exists(_snake_case ) and not args.overwrite_cache: lowerCAmelCase = time.time() lowerCAmelCase = torch.load(_snake_case ) logger.info( F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start ) else: logger.info(F'Creating features from dataset file at {args.data_dir}' ) if mode == Split.dev: lowerCAmelCase = self.processor.get_dev_examples(args.data_dir ) elif mode == Split.test: lowerCAmelCase = self.processor.get_test_examples(args.data_dir ) else: lowerCAmelCase = self.processor.get_train_examples(args.data_dir ) if limit_length is not None: lowerCAmelCase = examples[:limit_length] lowerCAmelCase = glue_convert_examples_to_features( _snake_case , _snake_case , max_length=args.max_seq_length , label_list=_snake_case , output_mode=self.output_mode , ) lowerCAmelCase = time.time() torch.save(self.features , _snake_case ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' ) def __len__( self ): """simple docstring""" return len(self.features ) def __getitem__( self , _snake_case ): """simple docstring""" return self.features[i] def UpperCamelCase__ ( self ): """simple docstring""" return self.label_list
309
1
"""simple docstring""" # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401 from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401 deprecate( '''stable diffusion controlnet''', '''0.22.0''', '''Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.''', standard_warn=False, stacklevel=3, )
309
"""simple docstring""" import os from collections.abc import Iterator def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "." ): for dir_path, dir_names, filenames in os.walk(_UpperCAmelCase ): lowerCAmelCase = [d for d in dir_names if d != 'scripts' and d[0] not in '._'] for filename in filenames: if filename == "__init__.py": continue if os.path.splitext(_UpperCAmelCase )[1] in (".py", ".ipynb"): yield os.path.join(_UpperCAmelCase , _UpperCAmelCase ).lstrip('./' ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ): return F'{i * " "}*' if i else "\n##" def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ): lowerCAmelCase = old_path.split(os.sep ) for i, new_part in enumerate(new_path.split(os.sep ) ): if (i + 1 > len(_UpperCAmelCase ) or old_parts[i] != new_part) and new_part: print(F'{md_prefix(_UpperCAmelCase )} {new_part.replace("_" , " " ).title()}' ) return new_path def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "." ): lowerCAmelCase = '' for filepath in sorted(good_file_paths(_UpperCAmelCase ) ): lowerCAmelCase ,lowerCAmelCase = os.path.split(_UpperCAmelCase ) if filepath != old_path: lowerCAmelCase = print_path(_UpperCAmelCase , _UpperCAmelCase ) lowerCAmelCase = (filepath.count(os.sep ) + 1) if filepath else 0 lowerCAmelCase = F'{filepath}/{filename}'.replace(' ' , '%20' ) lowerCAmelCase = os.path.splitext(filename.replace('_' , ' ' ).title() )[0] print(F'{md_prefix(_UpperCAmelCase )} [{filename}]({url})' ) if __name__ == "__main__": print_directory_md('''.''')
309
1
"""simple docstring""" from unittest.mock import Mock, patch from file_transfer.send_file import send_file @patch('socket.socket' ) @patch('builtins.open' ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] ): # ===== initialization ===== lowerCAmelCase = Mock() lowerCAmelCase = conn, Mock() lowerCAmelCase = iter([1, None] ) lowerCAmelCase = lambda _UpperCAmelCase : next(_UpperCAmelCase ) # ===== invoke ===== send_file(filename='mytext.txt' , testing=_UpperCAmelCase ) # ===== ensurance ===== sock.assert_called_once() sock.return_value.bind.assert_called_once() sock.return_value.listen.assert_called_once() sock.return_value.accept.assert_called_once() conn.recv.assert_called_once() file.return_value.__enter__.assert_called_once() file.return_value.__enter__.return_value.read.assert_called() conn.send.assert_called_once() conn.close.assert_called_once() sock.return_value.shutdown.assert_called_once() sock.return_value.close.assert_called_once()
309
"""simple docstring""" import os from datetime import datetime as dt from github import Github __UpperCamelCase : int = [ '''good first issue''', '''good second issue''', '''good difficult issue''', '''enhancement''', '''new pipeline/model''', '''new scheduler''', '''wip''', ] def _SCREAMING_SNAKE_CASE (): lowerCAmelCase = Github(os.environ['GITHUB_TOKEN'] ) lowerCAmelCase = g.get_repo('huggingface/diffusers' ) lowerCAmelCase = repo.get_issues(state='open' ) for issue in open_issues: lowerCAmelCase = sorted(issue.get_comments() , key=lambda _UpperCAmelCase : i.created_at , reverse=_UpperCAmelCase ) lowerCAmelCase = comments[0] if len(_UpperCAmelCase ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Closes the issue after 7 days of inactivity since the Stalebot notification. issue.edit(state='closed' ) elif ( "stale" in issue.get_labels() and last_comment is not None and last_comment.user.login != "github-actions[bot]" ): # Opens the issue if someone other than Stalebot commented. issue.edit(state='open' ) issue.remove_from_labels('stale' ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( 'This issue has been automatically marked as stale because it has not had ' 'recent activity. If you think this still needs to be addressed ' 'please comment on this thread.\n\nPlease note that issues that do not follow the ' '[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) ' 'are likely to be ignored.' ) issue.add_to_labels('stale' ) if __name__ == "__main__": main()
309
1
"""simple docstring""" import os from typing import Dict, List, Tuple, TypeVar, Union __UpperCamelCase : Any = TypeVar('''T''') __UpperCamelCase : Dict = Union[List[T], Tuple[T, ...]] __UpperCamelCase : str = Union[T, List[T], Dict[str, T]] __UpperCamelCase : Optional[Any] = Union[str, bytes, os.PathLike]
309
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) __UpperCamelCase : Any = { '''configuration_layoutlmv2''': ['''LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LayoutLMv2Config'''], '''processing_layoutlmv2''': ['''LayoutLMv2Processor'''], '''tokenization_layoutlmv2''': ['''LayoutLMv2Tokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Tuple = ['''LayoutLMv2TokenizerFast'''] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Optional[int] = ['''LayoutLMv2FeatureExtractor'''] __UpperCamelCase : Optional[int] = ['''LayoutLMv2ImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Any = [ '''LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LayoutLMv2ForQuestionAnswering''', '''LayoutLMv2ForSequenceClassification''', '''LayoutLMv2ForTokenClassification''', '''LayoutLMv2Layer''', '''LayoutLMv2Model''', '''LayoutLMv2PreTrainedModel''', ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys __UpperCamelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
309
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __UpperCamelCase : Any = { '''configuration_chinese_clip''': [ '''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ChineseCLIPConfig''', '''ChineseCLIPOnnxConfig''', '''ChineseCLIPTextConfig''', '''ChineseCLIPVisionConfig''', ], '''processing_chinese_clip''': ['''ChineseCLIPProcessor'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : str = ['''ChineseCLIPFeatureExtractor'''] __UpperCamelCase : List[str] = ['''ChineseCLIPImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Any = [ '''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ChineseCLIPModel''', '''ChineseCLIPPreTrainedModel''', '''ChineseCLIPTextModel''', '''ChineseCLIPVisionModel''', ] if TYPE_CHECKING: from .configuration_chinese_clip import ( CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, ChineseCLIPConfig, ChineseCLIPOnnxConfig, ChineseCLIPTextConfig, ChineseCLIPVisionConfig, ) from .processing_chinese_clip import ChineseCLIPProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_chinese_clip import ( CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, ChineseCLIPModel, ChineseCLIPPreTrainedModel, ChineseCLIPTextModel, ChineseCLIPVisionModel, ) else: import sys __UpperCamelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
309
"""simple docstring""" import warnings from ...utils import logging from .image_processing_poolformer import PoolFormerImageProcessor __UpperCamelCase : Optional[Any] = logging.get_logger(__name__) class a ( a__ ): def __init__( self , *_snake_case , **_snake_case ): """simple docstring""" warnings.warn( 'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use PoolFormerImageProcessor instead.' , _snake_case , ) super().__init__(*_snake_case , **_snake_case )
309
1
"""simple docstring""" import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class a ( a__ ): snake_case__ = (IPNDMScheduler,) snake_case__ = (('''num_inference_steps''', 5_0),) def UpperCamelCase__ ( self , **_snake_case ): """simple docstring""" lowerCAmelCase = {'num_train_timesteps': 10_00} config.update(**_snake_case ) return config def UpperCamelCase__ ( self , _snake_case=0 , **_snake_case ): """simple docstring""" lowerCAmelCase = dict(self.forward_default_kwargs ) lowerCAmelCase = kwargs.pop('num_inference_steps' , _snake_case ) lowerCAmelCase = self.dummy_sample lowerCAmelCase = 0.1 * sample lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: lowerCAmelCase = self.get_scheduler_config(**_snake_case ) lowerCAmelCase = scheduler_class(**_snake_case ) scheduler.set_timesteps(_snake_case ) # copy over dummy past residuals lowerCAmelCase = dummy_past_residuals[:] if time_step is None: lowerCAmelCase = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_snake_case ) lowerCAmelCase = scheduler_class.from_pretrained(_snake_case ) new_scheduler.set_timesteps(_snake_case ) # copy over dummy past residuals lowerCAmelCase = dummy_past_residuals[:] lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample lowerCAmelCase = new_scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample lowerCAmelCase = new_scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def UpperCamelCase__ ( self ): """simple docstring""" pass def UpperCamelCase__ ( self , _snake_case=0 , **_snake_case ): """simple docstring""" lowerCAmelCase = dict(self.forward_default_kwargs ) lowerCAmelCase = kwargs.pop('num_inference_steps' , _snake_case ) lowerCAmelCase = self.dummy_sample lowerCAmelCase = 0.1 * sample lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**_snake_case ) scheduler.set_timesteps(_snake_case ) # copy over dummy past residuals (must be after setting timesteps) lowerCAmelCase = dummy_past_residuals[:] if time_step is None: lowerCAmelCase = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_snake_case ) lowerCAmelCase = scheduler_class.from_pretrained(_snake_case ) # copy over dummy past residuals new_scheduler.set_timesteps(_snake_case ) # copy over dummy past residual (must be after setting timesteps) lowerCAmelCase = dummy_past_residuals[:] lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample lowerCAmelCase = new_scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample lowerCAmelCase = new_scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def UpperCamelCase__ ( self , **_snake_case ): """simple docstring""" lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config(**_snake_case ) lowerCAmelCase = scheduler_class(**_snake_case ) lowerCAmelCase = 10 lowerCAmelCase = self.dummy_model() lowerCAmelCase = self.dummy_sample_deter scheduler.set_timesteps(_snake_case ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase = model(_snake_case , _snake_case ) lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case ).prev_sample for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase = model(_snake_case , _snake_case ) lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case ).prev_sample return sample def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = dict(self.forward_default_kwargs ) lowerCAmelCase = kwargs.pop('num_inference_steps' , _snake_case ) for scheduler_class in self.scheduler_classes: lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**_snake_case ) lowerCAmelCase = self.dummy_sample lowerCAmelCase = 0.1 * sample if num_inference_steps is not None and hasattr(_snake_case , 'set_timesteps' ): scheduler.set_timesteps(_snake_case ) elif num_inference_steps is not None and not hasattr(_snake_case , 'set_timesteps' ): lowerCAmelCase = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] lowerCAmelCase = dummy_past_residuals[:] lowerCAmelCase = scheduler.timesteps[5] lowerCAmelCase = scheduler.timesteps[6] lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def UpperCamelCase__ ( self ): """simple docstring""" for timesteps in [1_00, 10_00]: self.check_over_configs(num_train_timesteps=_snake_case , time_step=_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ): self.check_over_forward(num_inference_steps=_snake_case , time_step=_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.full_loop() lowerCAmelCase = torch.mean(torch.abs(_snake_case ) ) assert abs(result_mean.item() - 2_54_05_29 ) < 10
309
"""simple docstring""" from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. __UpperCamelCase : str = 200 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. __UpperCamelCase : Optional[Any] = 50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. __UpperCamelCase : Dict = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 1000)) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ): lowerCAmelCase = len([g for position, g in enumerate(_UpperCAmelCase ) if g == main_target[position]] ) return (item, float(_UpperCAmelCase )) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ): lowerCAmelCase = random.randint(0 , len(_UpperCAmelCase ) - 1 ) lowerCAmelCase = parent_a[:random_slice] + parent_a[random_slice:] lowerCAmelCase = parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] ): lowerCAmelCase = list(_UpperCAmelCase ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: lowerCAmelCase = random.choice(_UpperCAmelCase ) return "".join(_UpperCAmelCase ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : tuple[str, float] , _UpperCAmelCase : list[tuple[str, float]] , _UpperCAmelCase : list[str] , ): lowerCAmelCase = [] # Generate more children proportionally to the fitness score. lowerCAmelCase = int(parent_a[1] * 100 ) + 1 lowerCAmelCase = 10 if child_n >= 10 else child_n for _ in range(_UpperCAmelCase ): lowerCAmelCase = population_score[random.randint(0 , _UpperCAmelCase )][0] lowerCAmelCase ,lowerCAmelCase = crossover(parent_a[0] , _UpperCAmelCase ) # Append new string to the population list. pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) ) pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) ) return pop def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] , _UpperCAmelCase : bool = True ): # Verify if N_POPULATION is bigger than N_SELECTED if N_POPULATION < N_SELECTED: lowerCAmelCase = F'{N_POPULATION} must be bigger than {N_SELECTED}' raise ValueError(_UpperCAmelCase ) # Verify that the target contains no genes besides the ones inside genes variable. lowerCAmelCase = sorted({c for c in target if c not in genes} ) if not_in_genes_list: lowerCAmelCase = F'{not_in_genes_list} is not in genes list, evolution cannot converge' raise ValueError(_UpperCAmelCase ) # Generate random starting population. lowerCAmelCase = [] for _ in range(_UpperCAmelCase ): population.append(''.join([random.choice(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) )] ) ) # Just some logs to know what the algorithms is doing. lowerCAmelCase ,lowerCAmelCase = 0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(_UpperCAmelCase ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. lowerCAmelCase = [evaluate(_UpperCAmelCase , _UpperCAmelCase ) for item in population] # Check if there is a matching evolution. lowerCAmelCase = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[1] , reverse=_UpperCAmelCase ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( F'\nGeneration: {generation}' F'\nTotal Population:{total_population}' F'\nBest score: {population_score[0][1]}' F'\nBest string: {population_score[0][0]}' ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. lowerCAmelCase = population[: int(N_POPULATION / 3 )] population.clear() population.extend(_UpperCAmelCase ) # Normalize population score to be between 0 and 1. lowerCAmelCase = [ (item, score / len(_UpperCAmelCase )) for item, score in population_score ] # This is selection for i in range(_UpperCAmelCase ): population.extend(select(population_score[int(_UpperCAmelCase )] , _UpperCAmelCase , _UpperCAmelCase ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(_UpperCAmelCase ) > N_POPULATION: break if __name__ == "__main__": __UpperCamelCase : Tuple = ( '''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!''' ) __UpperCamelCase : str = list( ''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm''' '''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\''' ) __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase : Dict = basic(target_str, genes_list) print( f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}''' )
309
1
"""simple docstring""" from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast from ...utils import logging if TYPE_CHECKING: from ...feature_extraction_utils import FeatureExtractionMixin from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType __UpperCamelCase : Any = logging.get_logger(__name__) __UpperCamelCase : List[str] = { '''openai/whisper-base''': '''https://huggingface.co/openai/whisper-base/resolve/main/config.json''', } # fmt: off __UpperCamelCase : Tuple = [ 1, 2, 7, 8, 9, 10, 14, 25, 26, 27, 28, 29, 31, 58, 59, 60, 61, 62, 63, 90, 91, 92, 93, 357, 366, 438, 532, 685, 705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377, 1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211, 4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 1_0563, 1_0786, 1_1420, 1_1709, 1_1907, 1_3163, 1_3697, 1_3700, 1_4808, 1_5306, 1_6410, 1_6791, 1_7992, 1_9203, 1_9510, 2_0724, 2_2305, 2_2935, 2_7007, 3_0109, 3_0420, 3_3409, 3_4949, 4_0283, 4_0493, 4_0549, 4_7282, 4_9146, 5_0257, 5_0359, 5_0360, 5_0361 ] __UpperCamelCase : Optional[Any] = [ 1, 2, 7, 8, 9, 10, 14, 25, 26, 27, 28, 29, 31, 58, 59, 60, 61, 62, 63, 90, 91, 92, 93, 359, 503, 522, 542, 873, 893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627, 3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647, 7273, 9061, 9383, 1_0428, 1_0929, 1_1938, 1_2033, 1_2331, 1_2562, 1_3793, 1_4157, 1_4635, 1_5265, 1_5618, 1_6553, 1_6604, 1_8362, 1_8956, 2_0075, 2_1675, 2_2520, 2_6130, 2_6161, 2_6435, 2_8279, 2_9464, 3_1650, 3_2302, 3_2470, 3_6865, 4_2863, 4_7425, 4_9870, 5_0254, 5_0258, 5_0360, 5_0361, 5_0362 ] class a ( a__ ): snake_case__ = '''whisper''' snake_case__ = ['''past_key_values'''] snake_case__ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self , _snake_case=5_18_65 , _snake_case=80 , _snake_case=6 , _snake_case=4 , _snake_case=6 , _snake_case=4 , _snake_case=15_36 , _snake_case=15_36 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=5_02_57 , _snake_case=True , _snake_case=True , _snake_case="gelu" , _snake_case=2_56 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=False , _snake_case=15_00 , _snake_case=4_48 , _snake_case=5_02_56 , _snake_case=5_02_56 , _snake_case=5_02_56 , _snake_case=None , _snake_case=[2_20, 5_02_56] , _snake_case=False , _snake_case=2_56 , _snake_case=False , _snake_case=0.05 , _snake_case=10 , _snake_case=2 , _snake_case=0.0 , _snake_case=10 , _snake_case=0 , _snake_case=7 , **_snake_case , ): """simple docstring""" lowerCAmelCase = vocab_size lowerCAmelCase = num_mel_bins lowerCAmelCase = d_model lowerCAmelCase = encoder_layers lowerCAmelCase = encoder_attention_heads lowerCAmelCase = decoder_layers lowerCAmelCase = decoder_attention_heads lowerCAmelCase = decoder_ffn_dim lowerCAmelCase = encoder_ffn_dim lowerCAmelCase = dropout lowerCAmelCase = attention_dropout lowerCAmelCase = activation_dropout lowerCAmelCase = activation_function lowerCAmelCase = init_std lowerCAmelCase = encoder_layerdrop lowerCAmelCase = decoder_layerdrop lowerCAmelCase = use_cache lowerCAmelCase = encoder_layers lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True lowerCAmelCase = max_source_positions lowerCAmelCase = max_target_positions # Audio Classification-specific parameters. Feel free to ignore for other classes. lowerCAmelCase = classifier_proj_size lowerCAmelCase = use_weighted_layer_sum # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowerCAmelCase = apply_spec_augment lowerCAmelCase = mask_time_prob lowerCAmelCase = mask_time_length lowerCAmelCase = mask_time_min_masks lowerCAmelCase = mask_feature_prob lowerCAmelCase = mask_feature_length lowerCAmelCase = mask_feature_min_masks lowerCAmelCase = median_filter_width super().__init__( pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , is_encoder_decoder=_snake_case , decoder_start_token_id=_snake_case , suppress_tokens=_snake_case , begin_suppress_tokens=_snake_case , **_snake_case , ) class a ( a__ ): @property def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = OrderedDict( [ ('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}), ] ) if self.use_past: lowerCAmelCase = {0: 'batch'} else: lowerCAmelCase = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(_snake_case , direction='inputs' ) return common_inputs def UpperCamelCase__ ( self , _snake_case , _snake_case = -1 , _snake_case = -1 , _snake_case = False , _snake_case = None , _snake_case = 2_20_50 , _snake_case = 5.0 , _snake_case = 2_20 , ): """simple docstring""" lowerCAmelCase = OrderedDict() lowerCAmelCase = OnnxConfig.generate_dummy_inputs( self , preprocessor=preprocessor.feature_extractor , batch_size=_snake_case , framework=_snake_case , sampling_rate=_snake_case , time_duration=_snake_case , frequency=_snake_case , ) lowerCAmelCase = encoder_inputs['input_features'].shape[2] lowerCAmelCase = encoder_sequence_length // 2 if self.use_past else seq_length lowerCAmelCase = super().generate_dummy_inputs( preprocessor.tokenizer , _snake_case , _snake_case , _snake_case , _snake_case ) lowerCAmelCase = encoder_inputs.pop('input_features' ) lowerCAmelCase = decoder_inputs.pop('decoder_input_ids' ) if "past_key_values" in decoder_inputs: lowerCAmelCase = decoder_inputs.pop('past_key_values' ) return dummy_inputs @property def UpperCamelCase__ ( self ): """simple docstring""" return 1E-3
309
"""simple docstring""" import copy import os import cva import numpy as np from matplotlib import pyplot as plt class a : def __init__( self ): """simple docstring""" lowerCAmelCase = '' lowerCAmelCase = '' lowerCAmelCase = [] lowerCAmelCase = 0 lowerCAmelCase = 2_56 lowerCAmelCase = 0 lowerCAmelCase = 0 lowerCAmelCase = 0 lowerCAmelCase = 0 def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" lowerCAmelCase = cva.imread(_snake_case , 0 ) lowerCAmelCase = copy.deepcopy(self.img ) lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label='x' ) lowerCAmelCase = np.sum(_snake_case ) for i in range(len(_snake_case ) ): lowerCAmelCase = x[i] / self.k self.sk += prk lowerCAmelCase = (self.L - 1) * self.sk if self.rem != 0: lowerCAmelCase = int(last % last ) lowerCAmelCase = int(last + 1 if self.rem >= 0.5 else last ) self.last_list.append(_snake_case ) lowerCAmelCase = int(np.ma.count(self.img ) / self.img[1].size ) lowerCAmelCase = self.img[1].size for i in range(self.number_of_cols ): for j in range(self.number_of_rows ): lowerCAmelCase = self.img[j][i] if num != self.last_list[num]: lowerCAmelCase = self.last_list[num] cva.imwrite('output_data/output.jpg' , self.img ) def UpperCamelCase__ ( self ): """simple docstring""" plt.hist(self.img.ravel() , 2_56 , [0, 2_56] ) def UpperCamelCase__ ( self ): """simple docstring""" cva.imshow('Output-Image' , self.img ) cva.imshow('Input-Image' , self.original_image ) cva.waitKey(50_00 ) cva.destroyAllWindows() if __name__ == "__main__": __UpperCamelCase : int = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''') __UpperCamelCase : List[Any] = ConstantStretch() stretcher.stretch(file_path) stretcher.plot_histogram() stretcher.show_image()
309
1
"""simple docstring""" import collections import importlib.util import os import re from pathlib import Path __UpperCamelCase : int = '''src/transformers''' # Matches is_xxx_available() __UpperCamelCase : str = re.compile(R'''is\_([a-z_]*)_available()''') # Catches a one-line _import_struct = {xxx} __UpperCamelCase : Tuple = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''') # Catches a line with a key-values pattern: "bla": ["foo", "bar"] __UpperCamelCase : Optional[Any] = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''') # Catches a line if not is_foo_available __UpperCamelCase : int = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''') # Catches a line _import_struct["bla"].append("foo") __UpperCamelCase : Optional[Any] = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''') # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] __UpperCamelCase : Dict = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''') # Catches a line with an object between quotes and a comma: "MyModel", __UpperCamelCase : Optional[Any] = re.compile('''^\s+"([^"]+)",''') # Catches a line with objects between brackets only: ["foo", "bar"], __UpperCamelCase : Optional[int] = re.compile('''^\s+\[([^\]]+)\]''') # Catches a line with from foo import bar, bla, boo __UpperCamelCase : Dict = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''') # Catches a line with try: __UpperCamelCase : Any = re.compile(R'''^\s*try:''') # Catches a line with else: __UpperCamelCase : Union[str, Any] = re.compile(R'''^\s*else:''') def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any ): if _re_test_backend.search(_UpperCAmelCase ) is None: return None lowerCAmelCase = [b[0] for b in _re_backend.findall(_UpperCAmelCase )] backends.sort() return "_and_".join(_UpperCAmelCase ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, Any] ): with open(_UpperCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f: lowerCAmelCase = f.readlines() lowerCAmelCase = 0 while line_index < len(_UpperCAmelCase ) and not lines[line_index].startswith('_import_structure = {' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(_UpperCAmelCase ): return None # First grab the objects without a specific backend in _import_structure lowerCAmelCase = [] while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None: lowerCAmelCase = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(_UpperCAmelCase ): lowerCAmelCase = _re_one_line_import_struct.search(_UpperCAmelCase ).groups()[0] lowerCAmelCase = re.findall('\[([^\]]+)\]' , _UpperCAmelCase ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(', ' )] ) line_index += 1 continue lowerCAmelCase = _re_import_struct_key_value.search(_UpperCAmelCase ) if single_line_import_search is not None: lowerCAmelCase = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(_UpperCAmelCase ) > 0] objects.extend(_UpperCAmelCase ) elif line.startswith(' ' * 8 + '"' ): objects.append(line[9:-3] ) line_index += 1 lowerCAmelCase = {'none': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('if TYPE_CHECKING' ): # If the line is an if not is_backend_available, we grab all objects associated. lowerCAmelCase = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: lowerCAmelCase = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 lowerCAmelCase = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ): lowerCAmelCase = lines[line_index] if _re_import_struct_add_one.search(_UpperCAmelCase ) is not None: objects.append(_re_import_struct_add_one.search(_UpperCAmelCase ).groups()[0] ) elif _re_import_struct_add_many.search(_UpperCAmelCase ) is not None: lowerCAmelCase = _re_import_struct_add_many.search(_UpperCAmelCase ).groups()[0].split(', ' ) lowerCAmelCase = [obj[1:-1] for obj in imports if len(_UpperCAmelCase ) > 0] objects.extend(_UpperCAmelCase ) elif _re_between_brackets.search(_UpperCAmelCase ) is not None: lowerCAmelCase = _re_between_brackets.search(_UpperCAmelCase ).groups()[0].split(', ' ) lowerCAmelCase = [obj[1:-1] for obj in imports if len(_UpperCAmelCase ) > 0] objects.extend(_UpperCAmelCase ) elif _re_quote_object.search(_UpperCAmelCase ) is not None: objects.append(_re_quote_object.search(_UpperCAmelCase ).groups()[0] ) elif line.startswith(' ' * 8 + '"' ): objects.append(line[9:-3] ) elif line.startswith(' ' * 12 + '"' ): objects.append(line[13:-3] ) line_index += 1 lowerCAmelCase = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend lowerCAmelCase = [] while ( line_index < len(_UpperCAmelCase ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('else' ) ): lowerCAmelCase = lines[line_index] lowerCAmelCase = _re_import.search(_UpperCAmelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(', ' ) ) elif line.startswith(' ' * 8 ): objects.append(line[8:-2] ) line_index += 1 lowerCAmelCase = {'none': objects} # Let's continue with backend-specific objects while line_index < len(_UpperCAmelCase ): # If the line is an if is_backend_available, we grab all objects associated. lowerCAmelCase = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: lowerCAmelCase = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 lowerCAmelCase = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ): lowerCAmelCase = lines[line_index] lowerCAmelCase = _re_import.search(_UpperCAmelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(', ' ) ) elif line.startswith(' ' * 12 ): objects.append(line[12:-2] ) line_index += 1 lowerCAmelCase = objects else: line_index += 1 return import_dict_objects, type_hint_objects def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] ): def find_duplicates(_UpperCAmelCase : Any ): return [k for k, v in collections.Counter(_UpperCAmelCase ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] lowerCAmelCase = [] for key in import_dict_objects.keys(): lowerCAmelCase = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F'Duplicate _import_structure definitions for: {duplicate_imports}' ) lowerCAmelCase = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): lowerCAmelCase = 'base imports' if key == 'none' else F'{key} backend' errors.append(F'Differences for {name}:' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F' {a} in TYPE_HINT but not in _import_structure.' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F' {a} in _import_structure but not in TYPE_HINT.' ) return errors def _SCREAMING_SNAKE_CASE (): lowerCAmelCase = [] for root, _, files in os.walk(_UpperCAmelCase ): if "__init__.py" in files: lowerCAmelCase = os.path.join(_UpperCAmelCase , '__init__.py' ) lowerCAmelCase = parse_init(_UpperCAmelCase ) if objects is not None: lowerCAmelCase = analyze_results(*_UpperCAmelCase ) if len(_UpperCAmelCase ) > 0: lowerCAmelCase = F'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}' failures.append('\n'.join(_UpperCAmelCase ) ) if len(_UpperCAmelCase ) > 0: raise ValueError('\n\n'.join(_UpperCAmelCase ) ) def _SCREAMING_SNAKE_CASE (): lowerCAmelCase = [] for path, directories, files in os.walk(_UpperCAmelCase ): for folder in directories: # Ignore private modules if folder.startswith('_' ): directories.remove(_UpperCAmelCase ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(_UpperCAmelCase ) / folder).glob('*.py' ) ) ) == 0: continue lowerCAmelCase = str((Path(_UpperCAmelCase ) / folder).relative_to(_UpperCAmelCase ) ) lowerCAmelCase = short_path.replace(os.path.sep , '.' ) submodules.append(_UpperCAmelCase ) for fname in files: if fname == "__init__.py": continue lowerCAmelCase = str((Path(_UpperCAmelCase ) / fname).relative_to(_UpperCAmelCase ) ) lowerCAmelCase = short_path.replace('.py' , '' ).replace(os.path.sep , '.' ) if len(submodule.split('.' ) ) == 1: submodules.append(_UpperCAmelCase ) return submodules __UpperCamelCase : Union[str, Any] = [ '''convert_pytorch_checkpoint_to_tf2''', '''modeling_flax_pytorch_utils''', ] def _SCREAMING_SNAKE_CASE (): # This is to make sure the transformers module imported is the one in the repo. lowerCAmelCase = importlib.util.spec_from_file_location( 'transformers' , os.path.join(_UpperCAmelCase , '__init__.py' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , ) lowerCAmelCase = spec.loader.load_module() lowerCAmelCase = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys() ] if len(_UpperCAmelCase ) > 0: lowerCAmelCase = '\n'.join(F'- {module}' for module in module_not_registered ) raise ValueError( 'The following submodules are not properly registered in the main init of Transformers:\n' F'{list_of_modules}\n' 'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' ) if __name__ == "__main__": check_all_inits() check_submodules()
309
"""simple docstring""" import pytest from datasets.splits import SplitDict, SplitInfo from datasets.utils.py_utils import asdict @pytest.mark.parametrize( 'split_dict' , [ SplitDict(), SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 , dataset_name='my_dataset' )} ), SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 )} ), SplitDict({'train': SplitInfo()} ), ] , ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : SplitDict ): lowerCAmelCase = split_dict._to_yaml_list() assert len(_UpperCAmelCase ) == len(_UpperCAmelCase ) lowerCAmelCase = SplitDict._from_yaml_list(_UpperCAmelCase ) for split_name, split_info in split_dict.items(): # dataset_name field is deprecated, and is therefore not part of the YAML dump lowerCAmelCase = None # the split name of split_dict takes over the name of the split info object lowerCAmelCase = split_name assert split_dict == reloaded @pytest.mark.parametrize( 'split_info' , [SplitInfo(), SplitInfo(dataset_name=_UpperCAmelCase ), SplitInfo(dataset_name='my_dataset' )] ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] ): # For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name" # field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files lowerCAmelCase = asdict(SplitDict({'train': split_info} ) ) assert "dataset_name" in split_dict_asdict["train"] assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
309
1
"""simple docstring""" def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : int ): if a < 0 or b < 0: raise ValueError('the value of both inputs must be positive' ) lowerCAmelCase = str(bin(_UpperCAmelCase ) )[2:] # remove the leading "0b" lowerCAmelCase = str(bin(_UpperCAmelCase ) )[2:] lowerCAmelCase = max(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) ) return "0b" + "".join( str(int('1' in (char_a, char_b) ) ) for char_a, char_b in zip(a_binary.zfill(_UpperCAmelCase ) , b_binary.zfill(_UpperCAmelCase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
309
"""simple docstring""" # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. __UpperCamelCase : Any = abspath(join(dirname(dirname(__file__)), '''src''')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='''ignore''', category=FutureWarning) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[Any] ): from diffusers.utils.testing_utils import pytest_addoption_shared pytest_addoption_shared(_UpperCAmelCase ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ): from diffusers.utils.testing_utils import pytest_terminal_summary_main lowerCAmelCase = terminalreporter.config.getoption('--make-reports' ) if make_reports: pytest_terminal_summary_main(_UpperCAmelCase , id=_UpperCAmelCase )
309
1
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class a ( unittest.TestCase ): def __init__( self , _snake_case , _snake_case=7 , _snake_case=3 , _snake_case=18 , _snake_case=30 , _snake_case=4_00 , _snake_case=True , _snake_case=None , _snake_case=True , _snake_case=None , _snake_case=True , ): """simple docstring""" lowerCAmelCase = size if size is not None else {'shortest_edge': 20} lowerCAmelCase = crop_size if crop_size is not None else {'height': 18, 'width': 18} lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = num_channels lowerCAmelCase = image_size lowerCAmelCase = min_resolution lowerCAmelCase = max_resolution lowerCAmelCase = do_resize lowerCAmelCase = size lowerCAmelCase = do_center_crop lowerCAmelCase = crop_size lowerCAmelCase = do_flip_channel_order def UpperCamelCase__ ( self ): """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class a ( a__ , unittest.TestCase ): snake_case__ = MobileViTImageProcessor if is_vision_available() else None def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = MobileViTImageProcessingTester(self ) @property def UpperCamelCase__ ( self ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_snake_case , 'do_resize' ) ) self.assertTrue(hasattr(_snake_case , 'size' ) ) self.assertTrue(hasattr(_snake_case , 'do_center_crop' ) ) self.assertTrue(hasattr(_snake_case , 'center_crop' ) ) self.assertTrue(hasattr(_snake_case , 'do_flip_channel_order' ) ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 20} ) self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} ) lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'shortest_edge': 42} ) self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} ) def UpperCamelCase__ ( self ): """simple docstring""" pass def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case ) for image in image_inputs: self.assertIsInstance(_snake_case , Image.Image ) # Test not batched input lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , numpify=_snake_case ) for image in image_inputs: self.assertIsInstance(_snake_case , np.ndarray ) # Test not batched input lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , torchify=_snake_case ) for image in image_inputs: self.assertIsInstance(_snake_case , torch.Tensor ) # Test not batched input lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , )
309
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class a ( unittest.TestCase ): def __init__( self , _snake_case , _snake_case=7 , _snake_case=3 , _snake_case=18 , _snake_case=30 , _snake_case=4_00 , _snake_case=True , _snake_case=None , _snake_case=True , _snake_case=None , _snake_case=True , ): """simple docstring""" lowerCAmelCase = size if size is not None else {'shortest_edge': 20} lowerCAmelCase = crop_size if crop_size is not None else {'height': 18, 'width': 18} lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = num_channels lowerCAmelCase = image_size lowerCAmelCase = min_resolution lowerCAmelCase = max_resolution lowerCAmelCase = do_resize lowerCAmelCase = size lowerCAmelCase = do_center_crop lowerCAmelCase = crop_size lowerCAmelCase = do_flip_channel_order def UpperCamelCase__ ( self ): """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class a ( a__ , unittest.TestCase ): snake_case__ = MobileViTImageProcessor if is_vision_available() else None def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = MobileViTImageProcessingTester(self ) @property def UpperCamelCase__ ( self ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_snake_case , 'do_resize' ) ) self.assertTrue(hasattr(_snake_case , 'size' ) ) self.assertTrue(hasattr(_snake_case , 'do_center_crop' ) ) self.assertTrue(hasattr(_snake_case , 'center_crop' ) ) self.assertTrue(hasattr(_snake_case , 'do_flip_channel_order' ) ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 20} ) self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} ) lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'shortest_edge': 42} ) self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} ) def UpperCamelCase__ ( self ): """simple docstring""" pass def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case ) for image in image_inputs: self.assertIsInstance(_snake_case , Image.Image ) # Test not batched input lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , numpify=_snake_case ) for image in image_inputs: self.assertIsInstance(_snake_case , np.ndarray ) # Test not batched input lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , torchify=_snake_case ) for image in image_inputs: self.assertIsInstance(_snake_case , torch.Tensor ) # Test not batched input lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , )
309
1
"""simple docstring""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class a ( a__ ): snake_case__ = ['''image_processor''', '''tokenizer'''] snake_case__ = '''CLIPImageProcessor''' snake_case__ = ('''XLMRobertaTokenizer''', '''XLMRobertaTokenizerFast''') def __init__( self , _snake_case=None , _snake_case=None , **_snake_case ): """simple docstring""" lowerCAmelCase = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , _snake_case , ) lowerCAmelCase = kwargs.pop('feature_extractor' ) lowerCAmelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(_snake_case , _snake_case ) def __call__( self , _snake_case=None , _snake_case=None , _snake_case=None , **_snake_case ): """simple docstring""" if text is None and images is None: raise ValueError('You have to specify either text or images. Both cannot be none.' ) if text is not None: lowerCAmelCase = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case ) if images is not None: lowerCAmelCase = self.image_processor(_snake_case , return_tensors=_snake_case , **_snake_case ) if text is not None and images is not None: lowerCAmelCase = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_snake_case ) , tensor_type=_snake_case ) def UpperCamelCase__ ( self , *_snake_case , **_snake_case ): """simple docstring""" return self.tokenizer.batch_decode(*_snake_case , **_snake_case ) def UpperCamelCase__ ( self , *_snake_case , **_snake_case ): """simple docstring""" return self.tokenizer.decode(*_snake_case , **_snake_case ) @property def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.tokenizer.model_input_names lowerCAmelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
309
"""simple docstring""" import unittest from accelerate import debug_launcher from accelerate.test_utils import require_cpu, test_ops, test_script @require_cpu class a ( unittest.TestCase ): def UpperCamelCase__ ( self ): """simple docstring""" debug_launcher(test_script.main ) def UpperCamelCase__ ( self ): """simple docstring""" debug_launcher(test_ops.main )
309
1
"""simple docstring""" from __future__ import annotations from decimal import Decimal from numpy import array def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[list[float]] ): lowerCAmelCase = Decimal # Check if the provided matrix has 2 rows and 2 columns # since this implementation only works for 2x2 matrices if len(_UpperCAmelCase ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2: # Calculate the determinant of the matrix lowerCAmelCase = float( d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) ) if determinant == 0: raise ValueError('This matrix has no inverse.' ) # Creates a copy of the matrix with swapped positions of the elements lowerCAmelCase = [[0.0, 0.0], [0.0, 0.0]] lowerCAmelCase ,lowerCAmelCase = matrix[1][1], matrix[0][0] lowerCAmelCase ,lowerCAmelCase = -matrix[1][0], -matrix[0][1] # Calculate the inverse of the matrix return [ [(float(d(_UpperCAmelCase ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix ] elif ( len(_UpperCAmelCase ) == 3 and len(matrix[0] ) == 3 and len(matrix[1] ) == 3 and len(matrix[2] ) == 3 ): # Calculate the determinant of the matrix using Sarrus rule lowerCAmelCase = float( ( (d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] )) + (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] )) + (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] )) ) - ( (d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] )) + (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] )) + (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] )) ) ) if determinant == 0: raise ValueError('This matrix has no inverse.' ) # Creating cofactor matrix lowerCAmelCase = [ [d(0.0 ), d(0.0 ), d(0.0 )], [d(0.0 ), d(0.0 ), d(0.0 )], [d(0.0 ), d(0.0 ), d(0.0 )], ] lowerCAmelCase = (d(matrix[1][1] ) * d(matrix[2][2] )) - ( d(matrix[1][2] ) * d(matrix[2][1] ) ) lowerCAmelCase = -( (d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] )) ) lowerCAmelCase = (d(matrix[1][0] ) * d(matrix[2][1] )) - ( d(matrix[1][1] ) * d(matrix[2][0] ) ) lowerCAmelCase = -( (d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] )) ) lowerCAmelCase = (d(matrix[0][0] ) * d(matrix[2][2] )) - ( d(matrix[0][2] ) * d(matrix[2][0] ) ) lowerCAmelCase = -( (d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] )) ) lowerCAmelCase = (d(matrix[0][1] ) * d(matrix[1][2] )) - ( d(matrix[0][2] ) * d(matrix[1][1] ) ) lowerCAmelCase = -( (d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] )) ) lowerCAmelCase = (d(matrix[0][0] ) * d(matrix[1][1] )) - ( d(matrix[0][1] ) * d(matrix[1][0] ) ) # Transpose the cofactor matrix (Adjoint matrix) lowerCAmelCase = array(_UpperCAmelCase ) for i in range(3 ): for j in range(3 ): lowerCAmelCase = cofactor_matrix[j][i] # Inverse of the matrix using the formula (1/determinant) * adjoint matrix lowerCAmelCase = array(_UpperCAmelCase ) for i in range(3 ): for j in range(3 ): inverse_matrix[i][j] /= d(_UpperCAmelCase ) # Calculate the inverse of the matrix return [[float(d(_UpperCAmelCase ) ) or 0.0 for n in row] for row in inverse_matrix] raise ValueError('Please provide a matrix of size 2x2 or 3x3.' )
309
"""simple docstring""" from __future__ import annotations from decimal import Decimal from numpy import array def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[list[float]] ): lowerCAmelCase = Decimal # Check if the provided matrix has 2 rows and 2 columns # since this implementation only works for 2x2 matrices if len(_UpperCAmelCase ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2: # Calculate the determinant of the matrix lowerCAmelCase = float( d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) ) if determinant == 0: raise ValueError('This matrix has no inverse.' ) # Creates a copy of the matrix with swapped positions of the elements lowerCAmelCase = [[0.0, 0.0], [0.0, 0.0]] lowerCAmelCase ,lowerCAmelCase = matrix[1][1], matrix[0][0] lowerCAmelCase ,lowerCAmelCase = -matrix[1][0], -matrix[0][1] # Calculate the inverse of the matrix return [ [(float(d(_UpperCAmelCase ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix ] elif ( len(_UpperCAmelCase ) == 3 and len(matrix[0] ) == 3 and len(matrix[1] ) == 3 and len(matrix[2] ) == 3 ): # Calculate the determinant of the matrix using Sarrus rule lowerCAmelCase = float( ( (d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] )) + (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] )) + (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] )) ) - ( (d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] )) + (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] )) + (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] )) ) ) if determinant == 0: raise ValueError('This matrix has no inverse.' ) # Creating cofactor matrix lowerCAmelCase = [ [d(0.0 ), d(0.0 ), d(0.0 )], [d(0.0 ), d(0.0 ), d(0.0 )], [d(0.0 ), d(0.0 ), d(0.0 )], ] lowerCAmelCase = (d(matrix[1][1] ) * d(matrix[2][2] )) - ( d(matrix[1][2] ) * d(matrix[2][1] ) ) lowerCAmelCase = -( (d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] )) ) lowerCAmelCase = (d(matrix[1][0] ) * d(matrix[2][1] )) - ( d(matrix[1][1] ) * d(matrix[2][0] ) ) lowerCAmelCase = -( (d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] )) ) lowerCAmelCase = (d(matrix[0][0] ) * d(matrix[2][2] )) - ( d(matrix[0][2] ) * d(matrix[2][0] ) ) lowerCAmelCase = -( (d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] )) ) lowerCAmelCase = (d(matrix[0][1] ) * d(matrix[1][2] )) - ( d(matrix[0][2] ) * d(matrix[1][1] ) ) lowerCAmelCase = -( (d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] )) ) lowerCAmelCase = (d(matrix[0][0] ) * d(matrix[1][1] )) - ( d(matrix[0][1] ) * d(matrix[1][0] ) ) # Transpose the cofactor matrix (Adjoint matrix) lowerCAmelCase = array(_UpperCAmelCase ) for i in range(3 ): for j in range(3 ): lowerCAmelCase = cofactor_matrix[j][i] # Inverse of the matrix using the formula (1/determinant) * adjoint matrix lowerCAmelCase = array(_UpperCAmelCase ) for i in range(3 ): for j in range(3 ): inverse_matrix[i][j] /= d(_UpperCAmelCase ) # Calculate the inverse of the matrix return [[float(d(_UpperCAmelCase ) ) or 0.0 for n in row] for row in inverse_matrix] raise ValueError('Please provide a matrix of size 2x2 or 3x3.' )
309
1
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer __UpperCamelCase : Tuple = logging.get_logger(__name__) __UpperCamelCase : List[str] = {'''vocab_file''': '''vocab.txt'''} __UpperCamelCase : Tuple = { '''vocab_file''': { '''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''', '''YituTech/conv-bert-medium-small''': ( '''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt''' ), '''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''', } } __UpperCamelCase : Dict = { '''YituTech/conv-bert-base''': 512, '''YituTech/conv-bert-medium-small''': 512, '''YituTech/conv-bert-small''': 512, } __UpperCamelCase : List[str] = { '''YituTech/conv-bert-base''': {'''do_lower_case''': True}, '''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True}, '''YituTech/conv-bert-small''': {'''do_lower_case''': True}, } class a ( a__ ): snake_case__ = VOCAB_FILES_NAMES snake_case__ = PRETRAINED_VOCAB_FILES_MAP snake_case__ = PRETRAINED_INIT_CONFIGURATION snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case__ = ConvBertTokenizer def __init__( self , _snake_case=None , _snake_case=None , _snake_case=True , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case=True , _snake_case=None , **_snake_case , ): """simple docstring""" super().__init__( _snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , tokenize_chinese_chars=_snake_case , strip_accents=_snake_case , **_snake_case , ) lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , _snake_case ) != do_lower_case or normalizer_state.get('strip_accents' , _snake_case ) != strip_accents or normalizer_state.get('handle_chinese_chars' , _snake_case ) != tokenize_chinese_chars ): lowerCAmelCase = getattr(_snake_case , normalizer_state.pop('type' ) ) lowerCAmelCase = do_lower_case lowerCAmelCase = strip_accents lowerCAmelCase = tokenize_chinese_chars lowerCAmelCase = normalizer_class(**_snake_case ) lowerCAmelCase = do_lower_case def UpperCamelCase__ ( self , _snake_case , _snake_case=None ): """simple docstring""" lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCamelCase__ ( self , _snake_case , _snake_case = None ): """simple docstring""" lowerCAmelCase = [self.sep_token_id] lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase__ ( self , _snake_case , _snake_case = None ): """simple docstring""" lowerCAmelCase = self._tokenizer.model.save(_snake_case , name=_snake_case ) return tuple(_snake_case )
309
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCamelCase : Dict = { '''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''], '''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''], '''processing_mctct''': ['''MCTCTProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Tuple = [ '''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MCTCTForCTC''', '''MCTCTModel''', '''MCTCTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys __UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
309
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __UpperCamelCase : str = { '''configuration_bridgetower''': [ '''BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BridgeTowerConfig''', '''BridgeTowerTextConfig''', '''BridgeTowerVisionConfig''', ], '''processing_bridgetower''': ['''BridgeTowerProcessor'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Optional[Any] = ['''BridgeTowerImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : int = [ '''BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BridgeTowerForContrastiveLearning''', '''BridgeTowerForImageAndTextRetrieval''', '''BridgeTowerForMaskedLM''', '''BridgeTowerModel''', '''BridgeTowerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_bridgetower import ( BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP, BridgeTowerConfig, BridgeTowerTextConfig, BridgeTowerVisionConfig, ) from .processing_bridgetower import BridgeTowerProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_bridgetower import BridgeTowerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bridgetower import ( BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST, BridgeTowerForContrastiveLearning, BridgeTowerForImageAndTextRetrieval, BridgeTowerForMaskedLM, BridgeTowerModel, BridgeTowerPreTrainedModel, ) else: import sys __UpperCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
309
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_distilbert import DistilBertTokenizer __UpperCamelCase : Dict = logging.get_logger(__name__) __UpperCamelCase : str = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} __UpperCamelCase : Optional[int] = { '''vocab_file''': { '''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''', '''distilbert-base-uncased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt''' ), '''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''', '''distilbert-base-cased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt''' ), '''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''', '''distilbert-base-multilingual-cased''': ( '''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''', '''distilbert-base-uncased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json''' ), '''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''', '''distilbert-base-cased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json''' ), '''distilbert-base-german-cased''': ( '''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json''' ), '''distilbert-base-multilingual-cased''': ( '''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json''' ), }, } __UpperCamelCase : str = { '''distilbert-base-uncased''': 512, '''distilbert-base-uncased-distilled-squad''': 512, '''distilbert-base-cased''': 512, '''distilbert-base-cased-distilled-squad''': 512, '''distilbert-base-german-cased''': 512, '''distilbert-base-multilingual-cased''': 512, } __UpperCamelCase : Any = { '''distilbert-base-uncased''': {'''do_lower_case''': True}, '''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True}, '''distilbert-base-cased''': {'''do_lower_case''': False}, '''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False}, '''distilbert-base-german-cased''': {'''do_lower_case''': False}, '''distilbert-base-multilingual-cased''': {'''do_lower_case''': False}, } class a ( a__ ): snake_case__ = VOCAB_FILES_NAMES snake_case__ = PRETRAINED_VOCAB_FILES_MAP snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case__ = PRETRAINED_INIT_CONFIGURATION snake_case__ = ['''input_ids''', '''attention_mask'''] snake_case__ = DistilBertTokenizer def __init__( self , _snake_case=None , _snake_case=None , _snake_case=True , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case=True , _snake_case=None , **_snake_case , ): """simple docstring""" super().__init__( _snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , tokenize_chinese_chars=_snake_case , strip_accents=_snake_case , **_snake_case , ) lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , _snake_case ) != do_lower_case or normalizer_state.get('strip_accents' , _snake_case ) != strip_accents or normalizer_state.get('handle_chinese_chars' , _snake_case ) != tokenize_chinese_chars ): lowerCAmelCase = getattr(_snake_case , normalizer_state.pop('type' ) ) lowerCAmelCase = do_lower_case lowerCAmelCase = strip_accents lowerCAmelCase = tokenize_chinese_chars lowerCAmelCase = normalizer_class(**_snake_case ) lowerCAmelCase = do_lower_case def UpperCamelCase__ ( self , _snake_case , _snake_case=None ): """simple docstring""" lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCamelCase__ ( self , _snake_case , _snake_case = None ): """simple docstring""" lowerCAmelCase = [self.sep_token_id] lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase__ ( self , _snake_case , _snake_case = None ): """simple docstring""" lowerCAmelCase = self._tokenizer.model.save(_snake_case , name=_snake_case ) return tuple(_snake_case )
309
1
"""simple docstring""" from collections.abc import Sequence from queue import Queue class a : def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case=None , _snake_case=None ): """simple docstring""" lowerCAmelCase = start lowerCAmelCase = end lowerCAmelCase = val lowerCAmelCase = (start + end) // 2 lowerCAmelCase = left lowerCAmelCase = right def __repr__( self ): """simple docstring""" return F'SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})' class a : def __init__( self , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = collection lowerCAmelCase = function if self.collection: lowerCAmelCase = self._build_tree(0 , len(_snake_case ) - 1 ) def UpperCamelCase__ ( self , _snake_case , _snake_case ): """simple docstring""" self._update_tree(self.root , _snake_case , _snake_case ) def UpperCamelCase__ ( self , _snake_case , _snake_case ): """simple docstring""" return self._query_range(self.root , _snake_case , _snake_case ) def UpperCamelCase__ ( self , _snake_case , _snake_case ): """simple docstring""" if start == end: return SegmentTreeNode(_snake_case , _snake_case , self.collection[start] ) lowerCAmelCase = (start + end) // 2 lowerCAmelCase = self._build_tree(_snake_case , _snake_case ) lowerCAmelCase = self._build_tree(mid + 1 , _snake_case ) return SegmentTreeNode(_snake_case , _snake_case , self.fn(left.val , right.val ) , _snake_case , _snake_case ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case ): """simple docstring""" if node.start == i and node.end == i: lowerCAmelCase = val return if i <= node.mid: self._update_tree(node.left , _snake_case , _snake_case ) else: self._update_tree(node.right , _snake_case , _snake_case ) lowerCAmelCase = self.fn(node.left.val , node.right.val ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case ): """simple docstring""" if node.start == i and node.end == j: return node.val if i <= node.mid: if j <= node.mid: # range in left child tree return self._query_range(node.left , _snake_case , _snake_case ) else: # range in left child tree and right child tree return self.fn( self._query_range(node.left , _snake_case , node.mid ) , self._query_range(node.right , node.mid + 1 , _snake_case ) , ) else: # range in right child tree return self._query_range(node.right , _snake_case , _snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" if self.root is not None: lowerCAmelCase = Queue() queue.put(self.root ) while not queue.empty(): lowerCAmelCase = queue.get() yield node if node.left is not None: queue.put(node.left ) if node.right is not None: queue.put(node.right ) if __name__ == "__main__": import operator for fn in [operator.add, max, min]: print('''*''' * 50) __UpperCamelCase : Tuple = SegmentTree([2, 1, 5, 3, 4], fn) for node in arr.traverse(): print(node) print() arr.update(1, 5) for node in arr.traverse(): print(node) print() print(arr.query_range(3, 4)) # 7 print(arr.query_range(2, 2)) # 5 print(arr.query_range(1, 3)) # 13 print()
309
"""simple docstring""" from __future__ import annotations def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] | None = None ): lowerCAmelCase = word_bank or [] # create a table lowerCAmelCase = len(_UpperCAmelCase ) + 1 lowerCAmelCase = [] for _ in range(_UpperCAmelCase ): table.append([] ) # seed value lowerCAmelCase = [[]] # because empty string has empty combination # iterate through the indices for i in range(_UpperCAmelCase ): # condition if table[i] != []: for word in word_bank: # slice condition if target[i : i + len(_UpperCAmelCase )] == word: lowerCAmelCase = [ [word, *way] for way in table[i] ] # adds the word to every combination the current position holds # now,push that combination to the table[i+len(word)] table[i + len(_UpperCAmelCase )] += new_combinations # combinations are in reverse order so reverse for better output for combination in table[len(_UpperCAmelCase )]: combination.reverse() return table[len(_UpperCAmelCase )] if __name__ == "__main__": print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa'''])) print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t'''])) print( all_construct( '''hexagonosaurus''', ['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''], ) )
309
1
"""simple docstring""" import math def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ): lowerCAmelCase = [] lowerCAmelCase = 2 lowerCAmelCase = int(math.sqrt(_UpperCAmelCase ) ) # Size of every segment lowerCAmelCase = [True] * (end + 1) lowerCAmelCase = [] while start <= end: if temp[start] is True: in_prime.append(_UpperCAmelCase ) for i in range(start * start , end + 1 , _UpperCAmelCase ): lowerCAmelCase = False start += 1 prime += in_prime lowerCAmelCase = end + 1 lowerCAmelCase = min(2 * end , _UpperCAmelCase ) while low <= n: lowerCAmelCase = [True] * (high - low + 1) for each in in_prime: lowerCAmelCase = math.floor(low / each ) * each if t < low: t += each for j in range(_UpperCAmelCase , high + 1 , _UpperCAmelCase ): lowerCAmelCase = False for j in range(len(_UpperCAmelCase ) ): if temp[j] is True: prime.append(j + low ) lowerCAmelCase = high + 1 lowerCAmelCase = min(high + end , _UpperCAmelCase ) return prime print(sieve(10**6))
309
"""simple docstring""" import re def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ): if len(re.findall('[ATCG]' , _UpperCAmelCase ) ) != len(_UpperCAmelCase ): raise ValueError('Invalid Strand' ) return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) ) if __name__ == "__main__": import doctest doctest.testmod()
309
1
"""simple docstring""" from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available from ...utils import OptionalDependencyNotAvailable __UpperCamelCase : Any = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Optional[Any] = ['''DPTFeatureExtractor'''] __UpperCamelCase : List[Any] = ['''DPTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Tuple = [ '''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''DPTForDepthEstimation''', '''DPTForSemanticSegmentation''', '''DPTModel''', '''DPTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_dpt import DPTFeatureExtractor from .image_processing_dpt import DPTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dpt import ( DPT_PRETRAINED_MODEL_ARCHIVE_LIST, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel, DPTPreTrainedModel, ) else: import sys __UpperCamelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
309
"""simple docstring""" import numpy as np import skfuzzy as fuzz if __name__ == "__main__": # Create universe of discourse in Python using linspace () __UpperCamelCase : List[Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False) # Create two fuzzy sets by defining any membership function # (trapmf(), gbellmf(), gaussmf(), etc). __UpperCamelCase : str = [0, 25, 50] __UpperCamelCase : int = [25, 50, 75] __UpperCamelCase : str = fuzz.membership.trimf(X, abca) __UpperCamelCase : Tuple = fuzz.membership.trimf(X, abca) # Compute the different operations using inbuilt functions. __UpperCamelCase : Dict = np.ones(75) __UpperCamelCase : str = np.zeros((75,)) # 1. Union = max(µA(x), µB(x)) __UpperCamelCase : Optional[Any] = fuzz.fuzzy_or(X, young, X, middle_aged)[1] # 2. Intersection = min(µA(x), µB(x)) __UpperCamelCase : Dict = fuzz.fuzzy_and(X, young, X, middle_aged)[1] # 3. Complement (A) = (1- min(µA(x)) __UpperCamelCase : Dict = fuzz.fuzzy_not(young) # 4. Difference (A/B) = min(µA(x),(1- µB(x))) __UpperCamelCase : List[str] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1] # 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))] __UpperCamelCase : List[str] = young + middle_aged - (young * middle_aged) # 6. Algebraic Product = (µA(x) * µB(x)) __UpperCamelCase : Tuple = young * middle_aged # 7. Bounded Sum = min[1,(µA(x), µB(x))] __UpperCamelCase : Union[str, Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1] # 8. Bounded difference = min[0,(µA(x), µB(x))] __UpperCamelCase : Dict = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1] # max-min composition # max-product composition # Plot each set A, set B and each operation result using plot() and subplot(). from matplotlib import pyplot as plt plt.figure() plt.subplot(4, 3, 1) plt.plot(X, young) plt.title('''Young''') plt.grid(True) plt.subplot(4, 3, 2) plt.plot(X, middle_aged) plt.title('''Middle aged''') plt.grid(True) plt.subplot(4, 3, 3) plt.plot(X, union) plt.title('''union''') plt.grid(True) plt.subplot(4, 3, 4) plt.plot(X, intersection) plt.title('''intersection''') plt.grid(True) plt.subplot(4, 3, 5) plt.plot(X, complement_a) plt.title('''complement_a''') plt.grid(True) plt.subplot(4, 3, 6) plt.plot(X, difference) plt.title('''difference a/b''') plt.grid(True) plt.subplot(4, 3, 7) plt.plot(X, alg_sum) plt.title('''alg_sum''') plt.grid(True) plt.subplot(4, 3, 8) plt.plot(X, alg_product) plt.title('''alg_product''') plt.grid(True) plt.subplot(4, 3, 9) plt.plot(X, bdd_sum) plt.title('''bdd_sum''') plt.grid(True) plt.subplot(4, 3, 10) plt.plot(X, bdd_difference) plt.title('''bdd_difference''') plt.grid(True) plt.subplots_adjust(hspace=0.5) plt.show()
309
1
"""simple docstring""" def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int = 1000 ): return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) ) if __name__ == "__main__": print(solution())
309
"""simple docstring""" def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[int] , _UpperCAmelCase : str ): lowerCAmelCase = int(_UpperCAmelCase ) # Initialize Result lowerCAmelCase = [] # Traverse through all denomination for denomination in reversed(_UpperCAmelCase ): # Find denominations while int(_UpperCAmelCase ) >= int(_UpperCAmelCase ): total_value -= int(_UpperCAmelCase ) answer.append(_UpperCAmelCase ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": __UpperCamelCase : Any = [] __UpperCamelCase : List[Any] = '''0''' if ( input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower() == "y" ): __UpperCamelCase : Any = int(input('''Enter the number of denominations you want to add: ''').strip()) for i in range(0, n): denominations.append(int(input(f'''Denomination {i}: ''').strip())) __UpperCamelCase : int = input('''Enter the change you want to make in Indian Currency: ''').strip() else: # All denominations of Indian Currency if user does not enter __UpperCamelCase : List[str] = [1, 2, 5, 10, 20, 50, 100, 500, 2000] __UpperCamelCase : Any = input('''Enter the change you want to make: ''').strip() if int(value) == 0 or int(value) < 0: print('''The total value cannot be zero or negative.''') else: print(f'''Following is minimal change for {value}: ''') __UpperCamelCase : List[str] = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=''' ''')
309
1
"""simple docstring""" from sympy import diff, lambdify, symbols from sympy.functions import * # noqa: F403 def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : complex , _UpperCAmelCase : str = "x" , _UpperCAmelCase : float = 10**-10 , _UpperCAmelCase : int = 1 , ): lowerCAmelCase = symbols(_UpperCAmelCase ) lowerCAmelCase = lambdify(_UpperCAmelCase , _UpperCAmelCase ) lowerCAmelCase = lambdify(_UpperCAmelCase , diff(_UpperCAmelCase , _UpperCAmelCase ) ) lowerCAmelCase = starting_point while True: if diff_function(_UpperCAmelCase ) != 0: lowerCAmelCase = prev_guess - multiplicity * func(_UpperCAmelCase ) / diff_function( _UpperCAmelCase ) else: raise ZeroDivisionError('Could not find root' ) from None # Precision is checked by comparing the difference of consecutive guesses if abs(next_guess - prev_guess ) < precision: return next_guess lowerCAmelCase = next_guess # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(f'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''') # Find root of polynomial # Find fourth Root of 5 print(f'''The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}''') # Find value of e print( '''The root of log(y) - 1 = 0 is ''', f'''{newton_raphson('log(y) - 1', 2, variable='y')}''', ) # Exponential Roots print( '''The root of exp(x) - 1 = 0 is''', f'''{newton_raphson('exp(x) - 1', 10, precision=0.0_05)}''', ) # Find root of cos(x) print(f'''The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}''')
309
"""simple docstring""" from __future__ import annotations import unittest from transformers import EsmConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers.models.esm.modeling_tf_esm import ( TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, TFEsmModel, ) class a : def __init__( self , _snake_case , ): """simple docstring""" lowerCAmelCase = parent lowerCAmelCase = 13 lowerCAmelCase = 7 lowerCAmelCase = True lowerCAmelCase = True lowerCAmelCase = True lowerCAmelCase = 99 lowerCAmelCase = 32 lowerCAmelCase = 2 lowerCAmelCase = 4 lowerCAmelCase = 37 lowerCAmelCase = 'gelu' lowerCAmelCase = 0.1 lowerCAmelCase = 0.1 lowerCAmelCase = 5_12 lowerCAmelCase = 16 lowerCAmelCase = 2 lowerCAmelCase = 0.02 lowerCAmelCase = 3 lowerCAmelCase = 4 lowerCAmelCase = None def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase = None if self.use_input_mask: lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase = None lowerCAmelCase = None lowerCAmelCase = None if self.use_labels: lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase = EsmConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase__ ( self ): """simple docstring""" ( ( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) , ) = self.prepare_config_and_inputs() lowerCAmelCase = True lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = TFEsmModel(config=_snake_case ) lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask} lowerCAmelCase = model(_snake_case ) lowerCAmelCase = [input_ids, input_mask] lowerCAmelCase = model(_snake_case ) lowerCAmelCase = model(_snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ): """simple docstring""" lowerCAmelCase = True lowerCAmelCase = TFEsmModel(config=_snake_case ) lowerCAmelCase = { 'input_ids': input_ids, 'attention_mask': input_mask, 'encoder_hidden_states': encoder_hidden_states, 'encoder_attention_mask': encoder_attention_mask, } lowerCAmelCase = model(_snake_case ) lowerCAmelCase = [input_ids, input_mask] lowerCAmelCase = model(_snake_case , encoder_hidden_states=_snake_case ) # Also check the case where encoder outputs are not passed lowerCAmelCase = model(_snake_case , attention_mask=_snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = TFEsmForMaskedLM(config=_snake_case ) lowerCAmelCase = model([input_ids, input_mask] ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = self.num_labels lowerCAmelCase = TFEsmForTokenClassification(config=_snake_case ) lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask} lowerCAmelCase = model(_snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.prepare_config_and_inputs() ( ( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) , ) = config_and_inputs lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_tf class a ( a__ , a__ , unittest.TestCase ): snake_case__ = ( ( TFEsmModel, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, ) if is_tf_available() else () ) snake_case__ = ( { '''feature-extraction''': TFEsmModel, '''fill-mask''': TFEsmForMaskedLM, '''text-classification''': TFEsmForSequenceClassification, '''token-classification''': TFEsmForTokenClassification, '''zero-shot''': TFEsmForSequenceClassification, } if is_tf_available() else {} ) snake_case__ = False snake_case__ = False def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = TFEsmModelTester(self ) lowerCAmelCase = ConfigTester(self , config_class=_snake_case , hidden_size=37 ) def UpperCamelCase__ ( self ): """simple docstring""" self.config_tester.run_common_tests() def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_snake_case ) @slow def UpperCamelCase__ ( self ): """simple docstring""" for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase = TFEsmModel.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) @unittest.skip('Protein models do not support embedding resizing.' ) def UpperCamelCase__ ( self ): """simple docstring""" pass @unittest.skip('Protein models do not support embedding resizing.' ) def UpperCamelCase__ ( self ): """simple docstring""" pass def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase = model_class(_snake_case ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class is TFEsmForMaskedLM: # Output embedding test differs from the main test because they're a matrix, not a layer lowerCAmelCase = model.get_bias() assert isinstance(_snake_case , _snake_case ) for k, v in name.items(): assert isinstance(_snake_case , tf.Variable ) else: lowerCAmelCase = model.get_output_embeddings() assert x is None lowerCAmelCase = model.get_bias() assert name is None @require_tf class a ( unittest.TestCase ): @slow def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' ) lowerCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] ) lowerCAmelCase = model(_snake_case )[0] lowerCAmelCase = [1, 6, 33] self.assertEqual(list(output.numpy().shape ) , _snake_case ) # compare the actual values for a slice. lowerCAmelCase = tf.constant( [ [ [8.921_518, -10.589_814, -6.4_671_307], [-6.3_967_156, -13.911_377, -1.1_211_915], [-7.781_247, -13.951_557, -3.740_592], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) ) @slow def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' ) lowerCAmelCase = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) lowerCAmelCase = model(_snake_case )[0] # compare the actual values for a slice. lowerCAmelCase = tf.constant( [ [ [0.14_443_092, 0.54_125_327, 0.3_247_739], [0.30_340_484, 0.00_526_676, 0.31_077_722], [0.32_278_043, -0.24_987_096, 0.3_414_628], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
309
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __UpperCamelCase : Dict = { '''configuration_groupvit''': [ '''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GroupViTConfig''', '''GroupViTOnnxConfig''', '''GroupViTTextConfig''', '''GroupViTVisionConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : List[Any] = [ '''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GroupViTModel''', '''GroupViTPreTrainedModel''', '''GroupViTTextModel''', '''GroupViTVisionModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Optional[int] = [ '''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFGroupViTModel''', '''TFGroupViTPreTrainedModel''', '''TFGroupViTTextModel''', '''TFGroupViTVisionModel''', ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys __UpperCamelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
309
"""simple docstring""" import io import os import unicodedata from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) __UpperCamelCase : Dict = '''▁''' __UpperCamelCase : Optional[int] = {'''vocab_file''': '''vocab.txt''', '''sentencepiece_model_ckpt''': '''sentencepiece.bpe.model'''} __UpperCamelCase : str = { '''sentencepiece_model_file''': '''sentencepiece.bpe.model''', '''vocab_file''': '''vocab.txt''', } __UpperCamelCase : Tuple = { '''vocab_file''': { '''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''', '''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''', }, '''sentencepiece_model_file''': { '''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''', '''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''', }, } __UpperCamelCase : Optional[Any] = { '''ernie-m-base''': 514, '''ernie-m-large''': 514, } __UpperCamelCase : str = { '''ernie-m-base''': {'''do_lower_case''': False}, '''ernie-m-large''': {'''do_lower_case''': False}, } class a ( a__ ): snake_case__ = ["input_ids"] snake_case__ = VOCAB_FILES_NAMES snake_case__ = PRETRAINED_INIT_CONFIGURATION snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case__ = PRETRAINED_VOCAB_FILES_MAP snake_case__ = RESOURCE_FILES_NAMES def __init__( self , _snake_case , _snake_case=None , _snake_case=False , _snake_case="utf8" , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case = None , **_snake_case , ): """simple docstring""" lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , vocab_file=_snake_case , encoding=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , ) lowerCAmelCase = do_lower_case lowerCAmelCase = sentencepiece_model_ckpt lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_snake_case ) # to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning if vocab_file is not None: lowerCAmelCase = self.load_vocab(filepath=_snake_case ) else: lowerCAmelCase = {self.sp_model.id_to_piece(_snake_case ): id for id in range(self.sp_model.get_piece_size() )} lowerCAmelCase = {v: k for k, v in self.vocab.items()} def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" if text is None: return None lowerCAmelCase = self.tokenize(_snake_case ) lowerCAmelCase ,lowerCAmelCase = '', [] for i, ch in enumerate(_snake_case ): if ch in self.SP_CHAR_MAPPING: lowerCAmelCase = self.SP_CHAR_MAPPING.get(_snake_case ) else: lowerCAmelCase = unicodedata.normalize('NFKC' , _snake_case ) if self.is_whitespace(_snake_case ): continue normalized_text += ch char_mapping.extend([i] * len(_snake_case ) ) lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = normalized_text, [], 0 if self.do_lower_case: lowerCAmelCase = text.lower() for token in split_tokens: if token[:1] == "▁": lowerCAmelCase = token[1:] lowerCAmelCase = text[offset:].index(_snake_case ) + offset lowerCAmelCase = start + len(_snake_case ) token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) ) lowerCAmelCase = end return token_mapping @property def UpperCamelCase__ ( self ): """simple docstring""" return len(self.vocab ) def UpperCamelCase__ ( self ): """simple docstring""" return dict(self.vocab , **self.added_tokens_encoder ) def __getstate__( self ): """simple docstring""" lowerCAmelCase = self.__dict__.copy() lowerCAmelCase = None return state def __setstate__( self , _snake_case ): """simple docstring""" lowerCAmelCase = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): lowerCAmelCase = {} lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.sentencepiece_model_ckpt ) def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" return "".join((self.SP_CHAR_MAPPING.get(_snake_case , _snake_case ) for c in text) ) def UpperCamelCase__ ( self , _snake_case , _snake_case=False , _snake_case=64 , _snake_case=0.1 ): """simple docstring""" if self.sp_model_kwargs.get('enable_sampling' ) is True: lowerCAmelCase = True if self.sp_model_kwargs.get('alpha' ) is not None: lowerCAmelCase = self.sp_model_kwargs.get('alpha' ) if self.sp_model_kwargs.get('nbest_size' ) is not None: lowerCAmelCase = self.sp_model_kwargs.get('nbest_size' ) if not enable_sampling: lowerCAmelCase = self.sp_model.EncodeAsPieces(_snake_case ) else: lowerCAmelCase = self.sp_model.SampleEncodeAsPieces(_snake_case , _snake_case , _snake_case ) lowerCAmelCase = [] for pi, piece in enumerate(_snake_case ): if piece == SPIECE_UNDERLINE: if not pieces[pi + 1].startswith(_snake_case ) and pi != 0: new_pieces.append(_snake_case ) continue else: continue lowerCAmelCase = 0 for i, chunk in enumerate(_snake_case ): if chunk == SPIECE_UNDERLINE: continue if self.is_ch_char(_snake_case ) or self.is_punct(_snake_case ): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) new_pieces.append(_snake_case ) lowerCAmelCase = i + 1 elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) lowerCAmelCase = i elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) lowerCAmelCase = i if len(_snake_case ) > lst_i: new_pieces.append(piece[lst_i:] ) return new_pieces def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" lowerCAmelCase = ''.join(_snake_case ).replace(_snake_case , ' ' ).strip() return out_string def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" lowerCAmelCase = self.convert_ids_to_tokens(_snake_case ) lowerCAmelCase = ''.join(_snake_case ).replace(_snake_case , ' ' ).strip() return out_string def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" return self.vocab.get(_snake_case , self.vocab.get(self.unk_token ) ) def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" return self.reverse_vocab.get(_snake_case , self.unk_token ) def UpperCamelCase__ ( self , _snake_case , _snake_case=None ): """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCAmelCase = [self.cls_token_id] lowerCAmelCase = [self.sep_token_id] return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep def UpperCamelCase__ ( self , _snake_case , _snake_case=None ): """simple docstring""" if offset_mapping_a is None: return [(0, 0)] + offset_mapping_a + [(0, 0)] return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)] def UpperCamelCase__ ( self , _snake_case , _snake_case=None , _snake_case=False ): """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( 'You should not supply a second sequence if the provided sequence of ' 'ids is already formatted with special tokens for the model.' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case )) + [1] return [1] + ([0] * len(_snake_case )) + [1] def UpperCamelCase__ ( self , _snake_case , _snake_case = None ): """simple docstring""" if token_ids_a is None: # [CLS] X [SEP] return (len(_snake_case ) + 2) * [0] # [CLS] A [SEP] [SEP] B [SEP] return [0] * (len(_snake_case ) + 1) + [1] * (len(_snake_case ) + 3) def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" if "\u4e00" <= char <= "\u9fff": return True return False def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" if ("a" <= char <= "z") or ("A" <= char <= "Z"): return True return False def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" if char in ",;:.?!~,;:。?!《》【】": return True return False def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" if char == " " or char == "\t" or char == "\n" or char == "\r": return True if len(_snake_case ) == 1: lowerCAmelCase = unicodedata.category(_snake_case ) if cat == "Zs": return True return False def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" lowerCAmelCase = {} with io.open(_snake_case , 'r' , encoding='utf-8' ) as f: for index, line in enumerate(_snake_case ): lowerCAmelCase = line.rstrip('\n' ) lowerCAmelCase = int(_snake_case ) return token_to_idx def UpperCamelCase__ ( self , _snake_case , _snake_case = None ): """simple docstring""" lowerCAmelCase = 0 if os.path.isdir(_snake_case ): lowerCAmelCase = os.path.join( _snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) else: lowerCAmelCase = (filename_prefix + '-' if filename_prefix else '') + save_directory with open(_snake_case , 'w' , encoding='utf-8' ) as writer: for token, token_index in sorted(self.vocab.items() , key=lambda _snake_case : kv[1] ): if index != token_index: logger.warning( F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.' ' Please check that the vocabulary is not corrupted!' ) lowerCAmelCase = token_index writer.write(token + '\n' ) index += 1 lowerCAmelCase = os.path.join(_snake_case , 'sentencepiece.bpe.model' ) with open(_snake_case , 'wb' ) as fi: lowerCAmelCase = self.sp_model.serialized_model_proto() fi.write(_snake_case ) return (vocab_file,)
309
1
"""simple docstring""" # Lint as: python3 # pylint: enable=line-too-long # pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position __UpperCamelCase : int = '''2.13.1''' import platform import pyarrow from packaging import version if version.parse(platform.python_version()) < version.parse('''3.7'''): raise ImportWarning( '''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.''' ) if version.parse(pyarrow.__version__).major < 8: raise ImportWarning( '''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n''' '''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.''' ) del platform del pyarrow del version from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip __UpperCamelCase : Dict = concatenate_datasets __UpperCamelCase : Optional[int] = DownloadConfig __UpperCamelCase : List[Any] = DownloadManager __UpperCamelCase : List[str] = DownloadMode __UpperCamelCase : Optional[Any] = DownloadConfig __UpperCamelCase : Any = DownloadMode __UpperCamelCase : Optional[Any] = DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
309
"""simple docstring""" import argparse import os import torch from transformers.utils import WEIGHTS_NAME __UpperCamelCase : int = ['''small''', '''medium''', '''large'''] __UpperCamelCase : str = '''lm_head.decoder.weight''' __UpperCamelCase : Dict = '''lm_head.weight''' def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ): lowerCAmelCase = torch.load(_UpperCAmelCase ) lowerCAmelCase = d.pop(_UpperCAmelCase ) os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase ) torch.save(_UpperCAmelCase , os.path.join(_UpperCAmelCase , _UpperCAmelCase ) ) if __name__ == "__main__": __UpperCamelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument('''--dialogpt_path''', default='''.''', type=str) __UpperCamelCase : Optional[int] = parser.parse_args() for MODEL in DIALOGPT_MODELS: __UpperCamelCase : Dict = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''') __UpperCamelCase : str = f'''./DialoGPT-{MODEL}''' convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
309
1
"""simple docstring""" from typing import Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_mobilenet_va import MobileNetVaConfig __UpperCamelCase : Dict = logging.get_logger(__name__) # General docstring __UpperCamelCase : str = '''MobileNetV1Config''' # Base docstring __UpperCamelCase : List[str] = '''google/mobilenet_v1_1.0_224''' __UpperCamelCase : Any = [1, 1024, 7, 7] # Image classification docstring __UpperCamelCase : Optional[Any] = '''google/mobilenet_v1_1.0_224''' __UpperCamelCase : int = '''tabby, tabby cat''' __UpperCamelCase : Any = [ '''google/mobilenet_v1_1.0_224''', '''google/mobilenet_v1_0.75_192''', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 ] def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : List[str]=None ): lowerCAmelCase = {} if isinstance(_UpperCAmelCase , _UpperCAmelCase ): lowerCAmelCase = model.mobilenet_va else: lowerCAmelCase = model lowerCAmelCase = 'MobilenetV1/Conv2d_0/' lowerCAmelCase = backbone.conv_stem.convolution.weight lowerCAmelCase = backbone.conv_stem.normalization.bias lowerCAmelCase = backbone.conv_stem.normalization.weight lowerCAmelCase = backbone.conv_stem.normalization.running_mean lowerCAmelCase = backbone.conv_stem.normalization.running_var for i in range(13 ): lowerCAmelCase = i + 1 lowerCAmelCase = i * 2 lowerCAmelCase = backbone.layer[pt_index] lowerCAmelCase = F'MobilenetV1/Conv2d_{tf_index}_depthwise/' lowerCAmelCase = pointer.convolution.weight lowerCAmelCase = pointer.normalization.bias lowerCAmelCase = pointer.normalization.weight lowerCAmelCase = pointer.normalization.running_mean lowerCAmelCase = pointer.normalization.running_var lowerCAmelCase = backbone.layer[pt_index + 1] lowerCAmelCase = F'MobilenetV1/Conv2d_{tf_index}_pointwise/' lowerCAmelCase = pointer.convolution.weight lowerCAmelCase = pointer.normalization.bias lowerCAmelCase = pointer.normalization.weight lowerCAmelCase = pointer.normalization.running_mean lowerCAmelCase = pointer.normalization.running_var if isinstance(_UpperCAmelCase , _UpperCAmelCase ): lowerCAmelCase = 'MobilenetV1/Logits/Conv2d_1c_1x1/' lowerCAmelCase = model.classifier.weight lowerCAmelCase = model.classifier.bias return tf_to_pt_map def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : int ): try: import numpy as np import tensorflow as tf except ImportError: logger.error( 'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see ' 'https://www.tensorflow.org/install/ for installation instructions.' ) raise # Load weights from TF model lowerCAmelCase = tf.train.list_variables(_UpperCAmelCase ) lowerCAmelCase = {} for name, shape in init_vars: logger.info(F'Loading TF weight {name} with shape {shape}' ) lowerCAmelCase = tf.train.load_variable(_UpperCAmelCase , _UpperCAmelCase ) lowerCAmelCase = array # Build TF to PyTorch weights loading map lowerCAmelCase = _build_tf_to_pytorch_map(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) for name, pointer in tf_to_pt_map.items(): logger.info(F'Importing {name}' ) if name not in tf_weights: logger.info(F'{name} not in tf pre-trained weights, skipping' ) continue lowerCAmelCase = tf_weights[name] if "depthwise_weights" in name: logger.info('Transposing depthwise' ) lowerCAmelCase = np.transpose(_UpperCAmelCase , (2, 3, 0, 1) ) elif "weights" in name: logger.info('Transposing' ) if len(pointer.shape ) == 2: # copying into linear layer lowerCAmelCase = array.squeeze().transpose() else: lowerCAmelCase = np.transpose(_UpperCAmelCase , (3, 2, 0, 1) ) if pointer.shape != array.shape: raise ValueError(F'Pointer shape {pointer.shape} and array shape {array.shape} mismatched' ) logger.info(F'Initialize PyTorch weight {name} {array.shape}' ) lowerCAmelCase = torch.from_numpy(_UpperCAmelCase ) tf_weights.pop(_UpperCAmelCase , _UpperCAmelCase ) tf_weights.pop(name + '/RMSProp' , _UpperCAmelCase ) tf_weights.pop(name + '/RMSProp_1' , _UpperCAmelCase ) tf_weights.pop(name + '/ExponentialMovingAverage' , _UpperCAmelCase ) logger.info(F'Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}' ) return model def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : torch.Tensor , _UpperCAmelCase : nn.Convad ): lowerCAmelCase ,lowerCAmelCase = features.shape[-2:] lowerCAmelCase ,lowerCAmelCase = conv_layer.stride lowerCAmelCase ,lowerCAmelCase = conv_layer.kernel_size if in_height % stride_height == 0: lowerCAmelCase = max(kernel_height - stride_height , 0 ) else: lowerCAmelCase = max(kernel_height - (in_height % stride_height) , 0 ) if in_width % stride_width == 0: lowerCAmelCase = max(kernel_width - stride_width , 0 ) else: lowerCAmelCase = max(kernel_width - (in_width % stride_width) , 0 ) lowerCAmelCase = pad_along_width // 2 lowerCAmelCase = pad_along_width - pad_left lowerCAmelCase = pad_along_height // 2 lowerCAmelCase = pad_along_height - pad_top lowerCAmelCase = (pad_left, pad_right, pad_top, pad_bottom) return nn.functional.pad(_UpperCAmelCase , _UpperCAmelCase , 'constant' , 0.0 ) class a ( nn.Module ): def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case = 1 , _snake_case = 1 , _snake_case = False , _snake_case = True , _snake_case = True , ): """simple docstring""" super().__init__() lowerCAmelCase = config if in_channels % groups != 0: raise ValueError(F'Input channels ({in_channels}) are not divisible by {groups} groups.' ) if out_channels % groups != 0: raise ValueError(F'Output channels ({out_channels}) are not divisible by {groups} groups.' ) lowerCAmelCase = 0 if config.tf_padding else int((kernel_size - 1) / 2 ) lowerCAmelCase = nn.Convad( in_channels=_snake_case , out_channels=_snake_case , kernel_size=_snake_case , stride=_snake_case , padding=_snake_case , groups=_snake_case , bias=_snake_case , padding_mode='zeros' , ) if use_normalization: lowerCAmelCase = nn.BatchNormad( num_features=_snake_case , eps=config.layer_norm_eps , momentum=0.9_997 , affine=_snake_case , track_running_stats=_snake_case , ) else: lowerCAmelCase = None if use_activation: if isinstance(_snake_case , _snake_case ): lowerCAmelCase = ACTaFN[use_activation] elif isinstance(config.hidden_act , _snake_case ): lowerCAmelCase = ACTaFN[config.hidden_act] else: lowerCAmelCase = config.hidden_act else: lowerCAmelCase = None def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" if self.config.tf_padding: lowerCAmelCase = apply_tf_padding(_snake_case , self.convolution ) lowerCAmelCase = self.convolution(_snake_case ) if self.normalization is not None: lowerCAmelCase = self.normalization(_snake_case ) if self.activation is not None: lowerCAmelCase = self.activation(_snake_case ) return features class a ( a__ ): snake_case__ = MobileNetVaConfig snake_case__ = load_tf_weights_in_mobilenet_va snake_case__ = '''mobilenet_v1''' snake_case__ = '''pixel_values''' snake_case__ = False def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" if isinstance(_snake_case , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(_snake_case , nn.BatchNormad ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) __UpperCamelCase : Dict = R''' This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. ''' __UpperCamelCase : Any = R''' Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`MobileNetV1ImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' @add_start_docstrings( '''The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.''' , a__ , ) class a ( a__ ): def __init__( self , _snake_case , _snake_case = True ): """simple docstring""" super().__init__(_snake_case ) lowerCAmelCase = config lowerCAmelCase = 32 lowerCAmelCase = max(int(depth * config.depth_multiplier ) , config.min_depth ) lowerCAmelCase = MobileNetVaConvLayer( _snake_case , in_channels=config.num_channels , out_channels=_snake_case , kernel_size=3 , stride=2 , ) lowerCAmelCase = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1] lowerCAmelCase = nn.ModuleList() for i in range(13 ): lowerCAmelCase = out_channels if strides[i] == 2 or i == 0: depth *= 2 lowerCAmelCase = max(int(depth * config.depth_multiplier ) , config.min_depth ) self.layer.append( MobileNetVaConvLayer( _snake_case , in_channels=_snake_case , out_channels=_snake_case , kernel_size=3 , stride=strides[i] , groups=_snake_case , ) ) self.layer.append( MobileNetVaConvLayer( _snake_case , in_channels=_snake_case , out_channels=_snake_case , kernel_size=1 , ) ) lowerCAmelCase = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" raise NotImplementedError @add_start_docstrings_to_model_forward(_snake_case ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=_snake_case , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def UpperCamelCase__ ( self , _snake_case = None , _snake_case = None , _snake_case = None , ): """simple docstring""" lowerCAmelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('You have to specify pixel_values' ) lowerCAmelCase = self.conv_stem(_snake_case ) lowerCAmelCase = () if output_hidden_states else None for i, layer_module in enumerate(self.layer ): lowerCAmelCase = layer_module(_snake_case ) if output_hidden_states: lowerCAmelCase = all_hidden_states + (hidden_states,) lowerCAmelCase = hidden_states if self.pooler is not None: lowerCAmelCase = torch.flatten(self.pooler(_snake_case ) , start_dim=1 ) else: lowerCAmelCase = None if not return_dict: return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None ) return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=_snake_case , pooler_output=_snake_case , hidden_states=_snake_case , ) @add_start_docstrings( ''' MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. ''' , a__ , ) class a ( a__ ): def __init__( self , _snake_case ): """simple docstring""" super().__init__(_snake_case ) lowerCAmelCase = config.num_labels lowerCAmelCase = MobileNetVaModel(_snake_case ) lowerCAmelCase = self.mobilenet_va.layer[-1].convolution.out_channels # Classifier head lowerCAmelCase = nn.Dropout(config.classifier_dropout_prob , inplace=_snake_case ) lowerCAmelCase = nn.Linear(_snake_case , config.num_labels ) if config.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(_snake_case ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_snake_case , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def UpperCamelCase__ ( self , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , ): """simple docstring""" lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict lowerCAmelCase = self.mobilenet_va(_snake_case , output_hidden_states=_snake_case , return_dict=_snake_case ) lowerCAmelCase = outputs.pooler_output if return_dict else outputs[1] lowerCAmelCase = self.classifier(self.dropout(_snake_case ) ) lowerCAmelCase = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: lowerCAmelCase = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): lowerCAmelCase = 'single_label_classification' else: lowerCAmelCase = 'multi_label_classification' if self.config.problem_type == "regression": lowerCAmelCase = MSELoss() if self.num_labels == 1: lowerCAmelCase = loss_fct(logits.squeeze() , labels.squeeze() ) else: lowerCAmelCase = loss_fct(_snake_case , _snake_case ) elif self.config.problem_type == "single_label_classification": lowerCAmelCase = CrossEntropyLoss() lowerCAmelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": lowerCAmelCase = BCEWithLogitsLoss() lowerCAmelCase = loss_fct(_snake_case , _snake_case ) if not return_dict: lowerCAmelCase = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention( loss=_snake_case , logits=_snake_case , hidden_states=outputs.hidden_states , )
309
"""simple docstring""" __UpperCamelCase : Dict = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []} __UpperCamelCase : str = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]} def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] , _UpperCAmelCase : int , _UpperCAmelCase : list[bool] ): lowerCAmelCase = True lowerCAmelCase = [] for neighbour in graph[vert]: if not visited[neighbour]: order += topology_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) order.append(_UpperCAmelCase ) return order def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] , _UpperCAmelCase : int , _UpperCAmelCase : list[bool] ): lowerCAmelCase = True lowerCAmelCase = [vert] for neighbour in reversed_graph[vert]: if not visited[neighbour]: component += find_components(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) return component def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] ): lowerCAmelCase = len(_UpperCAmelCase ) * [False] lowerCAmelCase = {vert: [] for vert in range(len(_UpperCAmelCase ) )} for vert, neighbours in graph.items(): for neighbour in neighbours: reversed_graph[neighbour].append(_UpperCAmelCase ) lowerCAmelCase = [] for i, was_visited in enumerate(_UpperCAmelCase ): if not was_visited: order += topology_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) lowerCAmelCase = [] lowerCAmelCase = len(_UpperCAmelCase ) * [False] for i in range(len(_UpperCAmelCase ) ): lowerCAmelCase = order[len(_UpperCAmelCase ) - i - 1] if not visited[vert]: lowerCAmelCase = find_components(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) components_list.append(_UpperCAmelCase ) return components_list
309
1
"""simple docstring""" import baseaa import io import json import os from copy import deepcopy from ..optimizer import AcceleratedOptimizer from ..scheduler import AcceleratedScheduler class a : def __init__( self , _snake_case ): """simple docstring""" if isinstance(_snake_case , _snake_case ): # Don't modify user's data should they want to reuse it (e.g. in tests), because once we # modified it, it will not be accepted here again, since `auto` values would have been overridden lowerCAmelCase = deepcopy(_snake_case ) elif os.path.exists(_snake_case ): with io.open(_snake_case , 'r' , encoding='utf-8' ) as f: lowerCAmelCase = json.load(_snake_case ) else: try: lowerCAmelCase = baseaa.urlsafe_baadecode(_snake_case ).decode('utf-8' ) lowerCAmelCase = json.loads(_snake_case ) except (UnicodeDecodeError, AttributeError, ValueError): raise ValueError( F'Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}' ) lowerCAmelCase = config self.set_stage_and_offload() def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.get_value('zero_optimization.stage' , -1 ) # offload lowerCAmelCase = False if self.is_zeroa() or self.is_zeroa(): lowerCAmelCase = set(['cpu', 'nvme'] ) lowerCAmelCase = set( [ self.get_value('zero_optimization.offload_optimizer.device' ), self.get_value('zero_optimization.offload_param.device' ), ] ) if len(offload_devices & offload_devices_valid ) > 0: lowerCAmelCase = True def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" lowerCAmelCase = self.config # find the config node of interest if it exists lowerCAmelCase = ds_key_long.split('.' ) lowerCAmelCase = nodes.pop() for node in nodes: lowerCAmelCase = config.get(_snake_case ) if config is None: return None, ds_key return config, ds_key def UpperCamelCase__ ( self , _snake_case , _snake_case=None ): """simple docstring""" lowerCAmelCase ,lowerCAmelCase = self.find_config_node(_snake_case ) if config is None: return default return config.get(_snake_case , _snake_case ) def UpperCamelCase__ ( self , _snake_case , _snake_case=False ): """simple docstring""" lowerCAmelCase = self.config # find the config node of interest if it exists lowerCAmelCase = ds_key_long.split('.' ) for node in nodes: lowerCAmelCase = config lowerCAmelCase = config.get(_snake_case ) if config is None: if must_exist: raise ValueError(F'Can\'t find {ds_key_long} entry in the config: {self.config}' ) else: return # if found remove it if parent_config is not None: parent_config.pop(_snake_case ) def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" lowerCAmelCase = self.get_value(_snake_case ) return False if value is None else bool(_snake_case ) def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" lowerCAmelCase = self.get_value(_snake_case ) return False if value is None else not bool(_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" return self._stage == 2 def UpperCamelCase__ ( self ): """simple docstring""" return self._stage == 3 def UpperCamelCase__ ( self ): """simple docstring""" return self._offload class a : def __init__( self , _snake_case ): """simple docstring""" lowerCAmelCase = engine def UpperCamelCase__ ( self , _snake_case , **_snake_case ): """simple docstring""" self.engine.backward(_snake_case , **_snake_case ) # Deepspeed's `engine.step` performs the following operations: # - gradient accumulation check # - gradient clipping # - optimizer step # - zero grad # - checking overflow # - lr_scheduler step (only if engine.lr_scheduler is not None) self.engine.step() # and this plugin overrides the above calls with no-ops when Accelerate runs under # Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple # training loop that works transparently under many training regimes. class a ( a__ ): def __init__( self , _snake_case ): """simple docstring""" super().__init__(_snake_case , device_placement=_snake_case , scaler=_snake_case ) lowerCAmelCase = hasattr(self.optimizer , 'overflow' ) def UpperCamelCase__ ( self , _snake_case=None ): """simple docstring""" pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed def UpperCamelCase__ ( self ): """simple docstring""" pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed @property def UpperCamelCase__ ( self ): """simple docstring""" if self.__has_overflow__: return self.optimizer.overflow return False class a ( a__ ): def __init__( self , _snake_case , _snake_case ): """simple docstring""" super().__init__(_snake_case , _snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed class a : def __init__( self , _snake_case , _snake_case=0.001 , _snake_case=0 , **_snake_case ): """simple docstring""" lowerCAmelCase = params lowerCAmelCase = lr lowerCAmelCase = weight_decay lowerCAmelCase = kwargs class a : def __init__( self , _snake_case , _snake_case=None , _snake_case=0 , **_snake_case ): """simple docstring""" lowerCAmelCase = optimizer lowerCAmelCase = total_num_steps lowerCAmelCase = warmup_num_steps lowerCAmelCase = kwargs
309
"""simple docstring""" import os import time import warnings from dataclasses import dataclass, field from enum import Enum from typing import List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import logging from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors from ..processors.utils import InputFeatures __UpperCamelCase : Optional[Any] = logging.get_logger(__name__) @dataclass class a : snake_case__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(glue_processors.keys() )} ) snake_case__ = field( metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} ) snake_case__ = field( default=1_2_8 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) snake_case__ = field( default=a__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.task_name.lower() class a ( a__ ): snake_case__ = '''train''' snake_case__ = '''dev''' snake_case__ = '''test''' class a ( a__ ): snake_case__ = 42 snake_case__ = 42 snake_case__ = 42 def __init__( self , _snake_case , _snake_case , _snake_case = None , _snake_case = Split.train , _snake_case = None , ): """simple docstring""" warnings.warn( 'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets ' 'library. You can have a look at this example script for pointers: ' 'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , _snake_case , ) lowerCAmelCase = args lowerCAmelCase = glue_processors[args.task_name]() lowerCAmelCase = glue_output_modes[args.task_name] if isinstance(_snake_case , _snake_case ): try: lowerCAmelCase = Split[mode] except KeyError: raise KeyError('mode is not a valid split name' ) # Load data features from cache or dataset file lowerCAmelCase = os.path.join( cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , ) lowerCAmelCase = self.processor.get_labels() if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in ( "RobertaTokenizer", "RobertaTokenizerFast", "XLMRobertaTokenizer", "BartTokenizer", "BartTokenizerFast", ): # HACK(label indices are swapped in RoBERTa pretrained model) lowerCAmelCase ,lowerCAmelCase = label_list[2], label_list[1] lowerCAmelCase = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. lowerCAmelCase = cached_features_file + '.lock' with FileLock(_snake_case ): if os.path.exists(_snake_case ) and not args.overwrite_cache: lowerCAmelCase = time.time() lowerCAmelCase = torch.load(_snake_case ) logger.info( F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start ) else: logger.info(F'Creating features from dataset file at {args.data_dir}' ) if mode == Split.dev: lowerCAmelCase = self.processor.get_dev_examples(args.data_dir ) elif mode == Split.test: lowerCAmelCase = self.processor.get_test_examples(args.data_dir ) else: lowerCAmelCase = self.processor.get_train_examples(args.data_dir ) if limit_length is not None: lowerCAmelCase = examples[:limit_length] lowerCAmelCase = glue_convert_examples_to_features( _snake_case , _snake_case , max_length=args.max_seq_length , label_list=_snake_case , output_mode=self.output_mode , ) lowerCAmelCase = time.time() torch.save(self.features , _snake_case ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' ) def __len__( self ): """simple docstring""" return len(self.features ) def __getitem__( self , _snake_case ): """simple docstring""" return self.features[i] def UpperCamelCase__ ( self ): """simple docstring""" return self.label_list
309
1
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_distilbert import DistilBertTokenizer __UpperCamelCase : Dict = logging.get_logger(__name__) __UpperCamelCase : str = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} __UpperCamelCase : Optional[int] = { '''vocab_file''': { '''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''', '''distilbert-base-uncased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt''' ), '''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''', '''distilbert-base-cased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt''' ), '''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''', '''distilbert-base-multilingual-cased''': ( '''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''', '''distilbert-base-uncased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json''' ), '''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''', '''distilbert-base-cased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json''' ), '''distilbert-base-german-cased''': ( '''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json''' ), '''distilbert-base-multilingual-cased''': ( '''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json''' ), }, } __UpperCamelCase : str = { '''distilbert-base-uncased''': 512, '''distilbert-base-uncased-distilled-squad''': 512, '''distilbert-base-cased''': 512, '''distilbert-base-cased-distilled-squad''': 512, '''distilbert-base-german-cased''': 512, '''distilbert-base-multilingual-cased''': 512, } __UpperCamelCase : Any = { '''distilbert-base-uncased''': {'''do_lower_case''': True}, '''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True}, '''distilbert-base-cased''': {'''do_lower_case''': False}, '''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False}, '''distilbert-base-german-cased''': {'''do_lower_case''': False}, '''distilbert-base-multilingual-cased''': {'''do_lower_case''': False}, } class a ( a__ ): snake_case__ = VOCAB_FILES_NAMES snake_case__ = PRETRAINED_VOCAB_FILES_MAP snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case__ = PRETRAINED_INIT_CONFIGURATION snake_case__ = ['''input_ids''', '''attention_mask'''] snake_case__ = DistilBertTokenizer def __init__( self , _snake_case=None , _snake_case=None , _snake_case=True , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case=True , _snake_case=None , **_snake_case , ): """simple docstring""" super().__init__( _snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , tokenize_chinese_chars=_snake_case , strip_accents=_snake_case , **_snake_case , ) lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , _snake_case ) != do_lower_case or normalizer_state.get('strip_accents' , _snake_case ) != strip_accents or normalizer_state.get('handle_chinese_chars' , _snake_case ) != tokenize_chinese_chars ): lowerCAmelCase = getattr(_snake_case , normalizer_state.pop('type' ) ) lowerCAmelCase = do_lower_case lowerCAmelCase = strip_accents lowerCAmelCase = tokenize_chinese_chars lowerCAmelCase = normalizer_class(**_snake_case ) lowerCAmelCase = do_lower_case def UpperCamelCase__ ( self , _snake_case , _snake_case=None ): """simple docstring""" lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCamelCase__ ( self , _snake_case , _snake_case = None ): """simple docstring""" lowerCAmelCase = [self.sep_token_id] lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase__ ( self , _snake_case , _snake_case = None ): """simple docstring""" lowerCAmelCase = self._tokenizer.model.save(_snake_case , name=_snake_case ) return tuple(_snake_case )
309
"""simple docstring""" import os from collections.abc import Iterator def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "." ): for dir_path, dir_names, filenames in os.walk(_UpperCAmelCase ): lowerCAmelCase = [d for d in dir_names if d != 'scripts' and d[0] not in '._'] for filename in filenames: if filename == "__init__.py": continue if os.path.splitext(_UpperCAmelCase )[1] in (".py", ".ipynb"): yield os.path.join(_UpperCAmelCase , _UpperCAmelCase ).lstrip('./' ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ): return F'{i * " "}*' if i else "\n##" def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ): lowerCAmelCase = old_path.split(os.sep ) for i, new_part in enumerate(new_path.split(os.sep ) ): if (i + 1 > len(_UpperCAmelCase ) or old_parts[i] != new_part) and new_part: print(F'{md_prefix(_UpperCAmelCase )} {new_part.replace("_" , " " ).title()}' ) return new_path def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "." ): lowerCAmelCase = '' for filepath in sorted(good_file_paths(_UpperCAmelCase ) ): lowerCAmelCase ,lowerCAmelCase = os.path.split(_UpperCAmelCase ) if filepath != old_path: lowerCAmelCase = print_path(_UpperCAmelCase , _UpperCAmelCase ) lowerCAmelCase = (filepath.count(os.sep ) + 1) if filepath else 0 lowerCAmelCase = F'{filepath}/{filename}'.replace(' ' , '%20' ) lowerCAmelCase = os.path.splitext(filename.replace('_' , ' ' ).title() )[0] print(F'{md_prefix(_UpperCAmelCase )} [{filename}]({url})' ) if __name__ == "__main__": print_directory_md('''.''')
309
1
"""simple docstring""" from __future__ import annotations import time from math import sqrt # 1 for manhattan, 0 for euclidean __UpperCamelCase : Dict = 0 __UpperCamelCase : Optional[Any] = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] __UpperCamelCase : Union[str, Any] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right __UpperCamelCase : Any = tuple[int, int] class a : def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ): """simple docstring""" lowerCAmelCase = pos_x lowerCAmelCase = pos_y lowerCAmelCase = (pos_y, pos_x) lowerCAmelCase = goal_x lowerCAmelCase = goal_y lowerCAmelCase = g_cost lowerCAmelCase = parent lowerCAmelCase = self.calculate_heuristic() lowerCAmelCase = self.g_cost + self.h_cost def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.pos_x - self.goal_x lowerCAmelCase = self.pos_y - self.goal_y if HEURISTIC == 1: return abs(_snake_case ) + abs(_snake_case ) else: return sqrt(dy**2 + dx**2 ) def __lt__( self , _snake_case ): """simple docstring""" return self.f_cost < other.f_cost class a : def __init__( self , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _snake_case ) lowerCAmelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , _snake_case ) lowerCAmelCase = [self.start] lowerCAmelCase = [] lowerCAmelCase = False def UpperCamelCase__ ( self ): """simple docstring""" while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() lowerCAmelCase = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: return self.retrace_path(_snake_case ) self.closed_nodes.append(_snake_case ) lowerCAmelCase = self.get_successors(_snake_case ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(_snake_case ) else: # retrieve the best current path lowerCAmelCase = self.open_nodes.pop(self.open_nodes.index(_snake_case ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(_snake_case ) else: self.open_nodes.append(_snake_case ) return [self.start.pos] def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" lowerCAmelCase = [] for action in delta: lowerCAmelCase = parent.pos_x + action[1] lowerCAmelCase = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_snake_case ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( _snake_case , _snake_case , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _snake_case , ) ) return successors def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" lowerCAmelCase = node lowerCAmelCase = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) lowerCAmelCase = current_node.parent path.reverse() return path class a : def __init__( self , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = AStar(_snake_case , _snake_case ) lowerCAmelCase = AStar(_snake_case , _snake_case ) lowerCAmelCase = False def UpperCamelCase__ ( self ): """simple docstring""" while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes: self.fwd_astar.open_nodes.sort() self.bwd_astar.open_nodes.sort() lowerCAmelCase = self.fwd_astar.open_nodes.pop(0 ) lowerCAmelCase = self.bwd_astar.open_nodes.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: return self.retrace_bidirectional_path( _snake_case , _snake_case ) self.fwd_astar.closed_nodes.append(_snake_case ) self.bwd_astar.closed_nodes.append(_snake_case ) lowerCAmelCase = current_bwd_node lowerCAmelCase = current_fwd_node lowerCAmelCase = { self.fwd_astar: self.fwd_astar.get_successors(_snake_case ), self.bwd_astar: self.bwd_astar.get_successors(_snake_case ), } for astar in [self.fwd_astar, self.bwd_astar]: for child_node in successors[astar]: if child_node in astar.closed_nodes: continue if child_node not in astar.open_nodes: astar.open_nodes.append(_snake_case ) else: # retrieve the best current path lowerCAmelCase = astar.open_nodes.pop( astar.open_nodes.index(_snake_case ) ) if child_node.g_cost < better_node.g_cost: astar.open_nodes.append(_snake_case ) else: astar.open_nodes.append(_snake_case ) return [self.fwd_astar.start.pos] def UpperCamelCase__ ( self , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = self.fwd_astar.retrace_path(_snake_case ) lowerCAmelCase = self.bwd_astar.retrace_path(_snake_case ) bwd_path.pop() bwd_path.reverse() lowerCAmelCase = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] __UpperCamelCase : int = (0, 0) __UpperCamelCase : str = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) __UpperCamelCase : List[str] = time.time() __UpperCamelCase : Optional[int] = AStar(init, goal) __UpperCamelCase : int = a_star.search() __UpperCamelCase : Any = time.time() - start_time print(f'''AStar execution time = {end_time:f} seconds''') __UpperCamelCase : List[str] = time.time() __UpperCamelCase : Dict = BidirectionalAStar(init, goal) __UpperCamelCase : List[str] = time.time() - bd_start_time print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
309
"""simple docstring""" import os from datetime import datetime as dt from github import Github __UpperCamelCase : int = [ '''good first issue''', '''good second issue''', '''good difficult issue''', '''enhancement''', '''new pipeline/model''', '''new scheduler''', '''wip''', ] def _SCREAMING_SNAKE_CASE (): lowerCAmelCase = Github(os.environ['GITHUB_TOKEN'] ) lowerCAmelCase = g.get_repo('huggingface/diffusers' ) lowerCAmelCase = repo.get_issues(state='open' ) for issue in open_issues: lowerCAmelCase = sorted(issue.get_comments() , key=lambda _UpperCAmelCase : i.created_at , reverse=_UpperCAmelCase ) lowerCAmelCase = comments[0] if len(_UpperCAmelCase ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Closes the issue after 7 days of inactivity since the Stalebot notification. issue.edit(state='closed' ) elif ( "stale" in issue.get_labels() and last_comment is not None and last_comment.user.login != "github-actions[bot]" ): # Opens the issue if someone other than Stalebot commented. issue.edit(state='open' ) issue.remove_from_labels('stale' ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( 'This issue has been automatically marked as stale because it has not had ' 'recent activity. If you think this still needs to be addressed ' 'please comment on this thread.\n\nPlease note that issues that do not follow the ' '[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) ' 'are likely to be ignored.' ) issue.add_to_labels('stale' ) if __name__ == "__main__": main()
309
1
"""simple docstring""" def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list ): if any(not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or x < 0 for x in sequence ): raise TypeError('Sequence must be list of non-negative integers' ) for _ in range(len(_UpperCAmelCase ) ): for i, (rod_upper, rod_lower) in enumerate(zip(_UpperCAmelCase , sequence[1:] ) ): if rod_upper > rod_lower: sequence[i] -= rod_upper - rod_lower sequence[i + 1] += rod_upper - rod_lower return sequence if __name__ == "__main__": assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
309
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) __UpperCamelCase : Any = { '''configuration_layoutlmv2''': ['''LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LayoutLMv2Config'''], '''processing_layoutlmv2''': ['''LayoutLMv2Processor'''], '''tokenization_layoutlmv2''': ['''LayoutLMv2Tokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Tuple = ['''LayoutLMv2TokenizerFast'''] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Optional[int] = ['''LayoutLMv2FeatureExtractor'''] __UpperCamelCase : Optional[int] = ['''LayoutLMv2ImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Any = [ '''LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LayoutLMv2ForQuestionAnswering''', '''LayoutLMv2ForSequenceClassification''', '''LayoutLMv2ForTokenClassification''', '''LayoutLMv2Layer''', '''LayoutLMv2Model''', '''LayoutLMv2PreTrainedModel''', ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys __UpperCamelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
309
1
"""simple docstring""" import os import unittest from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer from transformers.testing_utils import require_jieba, tooslow from ...test_tokenization_common import TokenizerTesterMixin @require_jieba class a ( a__ , unittest.TestCase ): snake_case__ = CpmAntTokenizer snake_case__ = False def UpperCamelCase__ ( self ): """simple docstring""" super().setUp() lowerCAmelCase = [ '<d>', '</d>', '<s>', '</s>', '</_>', '<unk>', '<pad>', '</n>', '我', '是', 'C', 'P', 'M', 'A', 'n', 't', ] lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) @tooslow def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = CpmAntTokenizer.from_pretrained('openbmb/cpm-ant-10b' ) lowerCAmelCase = '今天天气真好!' lowerCAmelCase = ['今天', '天气', '真', '好', '!'] lowerCAmelCase = tokenizer.tokenize(_snake_case ) self.assertListEqual(_snake_case , _snake_case ) lowerCAmelCase = '今天天气真好!' lowerCAmelCase = [tokenizer.bos_token] + tokens lowerCAmelCase = [6, 98_02, 1_49_62, 20_82, 8_31, 2_44] self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , _snake_case ) lowerCAmelCase = tokenizer.decode(_snake_case ) self.assertEqual(_snake_case , _snake_case )
309
"""simple docstring""" import warnings from ...utils import logging from .image_processing_poolformer import PoolFormerImageProcessor __UpperCamelCase : Optional[Any] = logging.get_logger(__name__) class a ( a__ ): def __init__( self , *_snake_case , **_snake_case ): """simple docstring""" warnings.warn( 'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use PoolFormerImageProcessor instead.' , _snake_case , ) super().__init__(*_snake_case , **_snake_case )
309
1
"""simple docstring""" def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ): if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError('check_bouncy() accepts only integer arguments' ) lowerCAmelCase = str(_UpperCAmelCase ) lowerCAmelCase = ''.join(sorted(_UpperCAmelCase ) ) return sorted_str_n != str_n and sorted_str_n[::-1] != str_n def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : float = 99 ): if not 0 < percent < 100: raise ValueError('solution() only accepts values from 0 to 100' ) lowerCAmelCase = 0 lowerCAmelCase = 1 while True: if check_bouncy(_UpperCAmelCase ): bouncy_num += 1 if (bouncy_num / num) * 100 >= percent: return num num += 1 if __name__ == "__main__": from doctest import testmod testmod() print(f'''{solution(99)}''')
309
"""simple docstring""" from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. __UpperCamelCase : str = 200 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. __UpperCamelCase : Optional[Any] = 50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. __UpperCamelCase : Dict = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 1000)) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ): lowerCAmelCase = len([g for position, g in enumerate(_UpperCAmelCase ) if g == main_target[position]] ) return (item, float(_UpperCAmelCase )) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ): lowerCAmelCase = random.randint(0 , len(_UpperCAmelCase ) - 1 ) lowerCAmelCase = parent_a[:random_slice] + parent_a[random_slice:] lowerCAmelCase = parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] ): lowerCAmelCase = list(_UpperCAmelCase ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: lowerCAmelCase = random.choice(_UpperCAmelCase ) return "".join(_UpperCAmelCase ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : tuple[str, float] , _UpperCAmelCase : list[tuple[str, float]] , _UpperCAmelCase : list[str] , ): lowerCAmelCase = [] # Generate more children proportionally to the fitness score. lowerCAmelCase = int(parent_a[1] * 100 ) + 1 lowerCAmelCase = 10 if child_n >= 10 else child_n for _ in range(_UpperCAmelCase ): lowerCAmelCase = population_score[random.randint(0 , _UpperCAmelCase )][0] lowerCAmelCase ,lowerCAmelCase = crossover(parent_a[0] , _UpperCAmelCase ) # Append new string to the population list. pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) ) pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) ) return pop def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] , _UpperCAmelCase : bool = True ): # Verify if N_POPULATION is bigger than N_SELECTED if N_POPULATION < N_SELECTED: lowerCAmelCase = F'{N_POPULATION} must be bigger than {N_SELECTED}' raise ValueError(_UpperCAmelCase ) # Verify that the target contains no genes besides the ones inside genes variable. lowerCAmelCase = sorted({c for c in target if c not in genes} ) if not_in_genes_list: lowerCAmelCase = F'{not_in_genes_list} is not in genes list, evolution cannot converge' raise ValueError(_UpperCAmelCase ) # Generate random starting population. lowerCAmelCase = [] for _ in range(_UpperCAmelCase ): population.append(''.join([random.choice(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) )] ) ) # Just some logs to know what the algorithms is doing. lowerCAmelCase ,lowerCAmelCase = 0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(_UpperCAmelCase ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. lowerCAmelCase = [evaluate(_UpperCAmelCase , _UpperCAmelCase ) for item in population] # Check if there is a matching evolution. lowerCAmelCase = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[1] , reverse=_UpperCAmelCase ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( F'\nGeneration: {generation}' F'\nTotal Population:{total_population}' F'\nBest score: {population_score[0][1]}' F'\nBest string: {population_score[0][0]}' ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. lowerCAmelCase = population[: int(N_POPULATION / 3 )] population.clear() population.extend(_UpperCAmelCase ) # Normalize population score to be between 0 and 1. lowerCAmelCase = [ (item, score / len(_UpperCAmelCase )) for item, score in population_score ] # This is selection for i in range(_UpperCAmelCase ): population.extend(select(population_score[int(_UpperCAmelCase )] , _UpperCAmelCase , _UpperCAmelCase ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(_UpperCAmelCase ) > N_POPULATION: break if __name__ == "__main__": __UpperCamelCase : Tuple = ( '''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!''' ) __UpperCamelCase : str = list( ''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm''' '''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\''' ) __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase : Dict = basic(target_str, genes_list) print( f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}''' )
309
1
"""simple docstring""" import random import unittest import numpy as np import torch from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionUpscalePipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class a ( a__ , unittest.TestCase ): # TODO: is there an appropriate internal test set? snake_case__ = '''ssube/stable-diffusion-x4-upscaler-onnx''' def UpperCamelCase__ ( self , _snake_case=0 ): """simple docstring""" lowerCAmelCase = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(_snake_case ) ) lowerCAmelCase = torch.manual_seed(_snake_case ) lowerCAmelCase = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) pipe.set_progress_bar_config(disable=_snake_case ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**_snake_case ).images lowerCAmelCase = image[0, -3:, -3:, -1].flatten() # started as 128, should now be 512 assert image.shape == (1, 5_12, 5_12, 3) lowerCAmelCase = np.array( [0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] ) assert np.abs(image_slice - expected_slice ).max() < 1E-1 def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) lowerCAmelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_snake_case ) pipe.set_progress_bar_config(disable=_snake_case ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**_snake_case ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) lowerCAmelCase = np.array( [0.6_898_892, 0.59_240_556, 0.52_499_527, 0.58_866_215, 0.52_258_235, 0.52_572_715, 0.62_414_473, 0.6_174_387, 0.6_214_964] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_snake_case ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**_snake_case ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) lowerCAmelCase = np.array( [0.7_659_278, 0.76_437_664, 0.75_579_107, 0.7_691_116, 0.77_666_986, 0.7_727_672, 0.7_758_664, 0.7_812_226, 0.76_942_515] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) lowerCAmelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_snake_case ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**_snake_case ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) lowerCAmelCase = np.array( [0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) lowerCAmelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_snake_case ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**_snake_case ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) lowerCAmelCase = np.array( [0.77_424_496, 0.773_601, 0.7_645_288, 0.7_769_598, 0.7_772_739, 0.7_738_688, 0.78_187_233, 0.77_879_584, 0.767_043] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 @nightly @require_onnxruntime @require_torch_gpu class a ( unittest.TestCase ): @property def UpperCamelCase__ ( self ): """simple docstring""" return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = ort.SessionOptions() lowerCAmelCase = False return options def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) lowerCAmelCase = init_image.resize((1_28, 1_28) ) # using the PNDM scheduler by default lowerCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained( 'ssube/stable-diffusion-x4-upscaler-onnx' , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_snake_case ) lowerCAmelCase = 'A fantasy landscape, trending on artstation' lowerCAmelCase = torch.manual_seed(0 ) lowerCAmelCase = pipe( prompt=_snake_case , image=_snake_case , guidance_scale=7.5 , num_inference_steps=10 , generator=_snake_case , output_type='np' , ) lowerCAmelCase = output.images lowerCAmelCase = images[0, 2_55:2_58, 3_83:3_86, -1] assert images.shape == (1, 5_12, 5_12, 3) lowerCAmelCase = np.array([0.4_883, 0.4_947, 0.4_980, 0.4_975, 0.4_982, 0.4_980, 0.5_000, 0.5_006, 0.4_972] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) lowerCAmelCase = init_image.resize((1_28, 1_28) ) lowerCAmelCase = LMSDiscreteScheduler.from_pretrained( 'ssube/stable-diffusion-x4-upscaler-onnx' , subfolder='scheduler' ) lowerCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained( 'ssube/stable-diffusion-x4-upscaler-onnx' , scheduler=_snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_snake_case ) lowerCAmelCase = 'A fantasy landscape, trending on artstation' lowerCAmelCase = torch.manual_seed(0 ) lowerCAmelCase = pipe( prompt=_snake_case , image=_snake_case , guidance_scale=7.5 , num_inference_steps=20 , generator=_snake_case , output_type='np' , ) lowerCAmelCase = output.images lowerCAmelCase = images[0, 2_55:2_58, 3_83:3_86, -1] assert images.shape == (1, 5_12, 5_12, 3) lowerCAmelCase = np.array( [0.50_173_753, 0.50_223_356, 0.502_039, 0.50_233_036, 0.5_023_725, 0.5_022_601, 0.5_018_758, 0.50_234_085, 0.50_241_566] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
309
"""simple docstring""" import copy import os import cva import numpy as np from matplotlib import pyplot as plt class a : def __init__( self ): """simple docstring""" lowerCAmelCase = '' lowerCAmelCase = '' lowerCAmelCase = [] lowerCAmelCase = 0 lowerCAmelCase = 2_56 lowerCAmelCase = 0 lowerCAmelCase = 0 lowerCAmelCase = 0 lowerCAmelCase = 0 def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" lowerCAmelCase = cva.imread(_snake_case , 0 ) lowerCAmelCase = copy.deepcopy(self.img ) lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label='x' ) lowerCAmelCase = np.sum(_snake_case ) for i in range(len(_snake_case ) ): lowerCAmelCase = x[i] / self.k self.sk += prk lowerCAmelCase = (self.L - 1) * self.sk if self.rem != 0: lowerCAmelCase = int(last % last ) lowerCAmelCase = int(last + 1 if self.rem >= 0.5 else last ) self.last_list.append(_snake_case ) lowerCAmelCase = int(np.ma.count(self.img ) / self.img[1].size ) lowerCAmelCase = self.img[1].size for i in range(self.number_of_cols ): for j in range(self.number_of_rows ): lowerCAmelCase = self.img[j][i] if num != self.last_list[num]: lowerCAmelCase = self.last_list[num] cva.imwrite('output_data/output.jpg' , self.img ) def UpperCamelCase__ ( self ): """simple docstring""" plt.hist(self.img.ravel() , 2_56 , [0, 2_56] ) def UpperCamelCase__ ( self ): """simple docstring""" cva.imshow('Output-Image' , self.img ) cva.imshow('Input-Image' , self.original_image ) cva.waitKey(50_00 ) cva.destroyAllWindows() if __name__ == "__main__": __UpperCamelCase : int = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''') __UpperCamelCase : List[Any] = ConstantStretch() stretcher.stretch(file_path) stretcher.plot_histogram() stretcher.show_image()
309
1
"""simple docstring""" import itertools import random import unittest import numpy as np from transformers import is_speech_available from transformers.testing_utils import require_torch, require_torchaudio from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import SpeechaTextFeatureExtractor __UpperCamelCase : Any = random.Random() def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any]=1.0 , _UpperCAmelCase : Dict=None , _UpperCAmelCase : List[Any]=None ): if rng is None: lowerCAmelCase = global_rng lowerCAmelCase = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class a ( unittest.TestCase ): def __init__( self , _snake_case , _snake_case=7 , _snake_case=4_00 , _snake_case=20_00 , _snake_case=24 , _snake_case=24 , _snake_case=0.0 , _snake_case=1_60_00 , _snake_case=True , _snake_case=True , ): """simple docstring""" lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = min_seq_length lowerCAmelCase = max_seq_length lowerCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) lowerCAmelCase = feature_size lowerCAmelCase = num_mel_bins lowerCAmelCase = padding_value lowerCAmelCase = sampling_rate lowerCAmelCase = return_attention_mask lowerCAmelCase = do_normalize def UpperCamelCase__ ( self ): """simple docstring""" return { "feature_size": self.feature_size, "num_mel_bins": self.num_mel_bins, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def UpperCamelCase__ ( self , _snake_case=False , _snake_case=False ): """simple docstring""" def _flatten(_snake_case ): return list(itertools.chain(*_snake_case ) ) if equal_length: lowerCAmelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size lowerCAmelCase = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: lowerCAmelCase = [np.asarray(_snake_case ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class a ( a__ , unittest.TestCase ): snake_case__ = SpeechaTextFeatureExtractor if is_speech_available() else None def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = SpeechaTextFeatureExtractionTester(self ) def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" self.assertTrue(np.all(np.mean(_snake_case , axis=0 ) < 1E-3 ) ) self.assertTrue(np.all(np.abs(np.var(_snake_case , axis=0 ) - 1 ) < 1E-3 ) ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] lowerCAmelCase = [np.asarray(_snake_case ) for speech_input in speech_inputs] # Test feature size lowerCAmelCase = feature_extractor(_snake_case , padding=_snake_case , return_tensors='np' ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size ) # Test not batched input lowerCAmelCase = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features lowerCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features self.assertTrue(np.allclose(_snake_case , _snake_case , atol=1E-3 ) ) # Test batched lowerCAmelCase = feature_extractor(_snake_case , return_tensors='np' ).input_features lowerCAmelCase = feature_extractor(_snake_case , return_tensors='np' ).input_features for enc_seq_a, enc_seq_a in zip(_snake_case , _snake_case ): self.assertTrue(np.allclose(_snake_case , _snake_case , atol=1E-3 ) ) # Test 2-D numpy arrays are batched. lowerCAmelCase = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)] lowerCAmelCase = np.asarray(_snake_case ) lowerCAmelCase = feature_extractor(_snake_case , return_tensors='np' ).input_features lowerCAmelCase = feature_extractor(_snake_case , return_tensors='np' ).input_features for enc_seq_a, enc_seq_a in zip(_snake_case , _snake_case ): self.assertTrue(np.allclose(_snake_case , _snake_case , atol=1E-3 ) ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] lowerCAmelCase = ['longest', 'max_length', 'do_not_pad'] lowerCAmelCase = [None, 16, None] for max_length, padding in zip(_snake_case , _snake_case ): lowerCAmelCase = feature_extractor( _snake_case , padding=_snake_case , max_length=_snake_case , return_attention_mask=_snake_case ) lowerCAmelCase = inputs.input_features lowerCAmelCase = inputs.attention_mask lowerCAmelCase = [np.sum(_snake_case ) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] lowerCAmelCase = ['longest', 'max_length', 'do_not_pad'] lowerCAmelCase = [None, 16, None] for max_length, padding in zip(_snake_case , _snake_case ): lowerCAmelCase = feature_extractor( _snake_case , max_length=_snake_case , padding=_snake_case , return_tensors='np' , return_attention_mask=_snake_case ) lowerCAmelCase = inputs.input_features lowerCAmelCase = inputs.attention_mask lowerCAmelCase = [np.sum(_snake_case ) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] ) self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] ) self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] lowerCAmelCase = feature_extractor( _snake_case , padding='max_length' , max_length=4 , truncation=_snake_case , return_tensors='np' , return_attention_mask=_snake_case , ) lowerCAmelCase = inputs.input_features lowerCAmelCase = inputs.attention_mask lowerCAmelCase = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1] ) self._check_zero_mean_unit_variance(input_features[2] ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] lowerCAmelCase = feature_extractor( _snake_case , padding='longest' , max_length=4 , truncation=_snake_case , return_tensors='np' , return_attention_mask=_snake_case , ) lowerCAmelCase = inputs.input_features lowerCAmelCase = inputs.attention_mask lowerCAmelCase = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape , (3, 4, 24) ) lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] lowerCAmelCase = feature_extractor( _snake_case , padding='longest' , max_length=16 , truncation=_snake_case , return_tensors='np' , return_attention_mask=_snake_case , ) lowerCAmelCase = inputs.input_features lowerCAmelCase = inputs.attention_mask lowerCAmelCase = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape , (3, 6, 24) ) def UpperCamelCase__ ( self ): """simple docstring""" import torch lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCAmelCase = np.random.rand(1_00 , 32 ).astype(np.floataa ) lowerCAmelCase = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: lowerCAmelCase = feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) lowerCAmelCase = feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" from datasets import load_dataset lowerCAmelCase = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' ) # automatic decoding with librispeech lowerCAmelCase = ds.sort('id' ).select(range(_snake_case ) )[:num_samples]['audio'] return [x["array"] for x in speech_samples] def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = np.array([ -1.5_745, -1.7_713, -1.7_020, -1.6_069, -1.2_250, -1.1_105, -0.9_072, -0.8_241, -1.2_310, -0.8_098, -0.3_320, -0.4_101, -0.7_985, -0.4_996, -0.8_213, -0.9_128, -1.0_420, -1.1_286, -1.0_440, -0.7_999, -0.8_405, -1.2_275, -1.5_443, -1.4_625, ] ) # fmt: on lowerCAmelCase = self._load_datasamples(1 ) lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCAmelCase = feature_extractor(_snake_case , return_tensors='pt' ).input_features self.assertEquals(input_features.shape , (1, 5_84, 24) ) self.assertTrue(np.allclose(input_features[0, 0, :30] , _snake_case , atol=1E-4 ) )
309
"""simple docstring""" import pytest from datasets.splits import SplitDict, SplitInfo from datasets.utils.py_utils import asdict @pytest.mark.parametrize( 'split_dict' , [ SplitDict(), SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 , dataset_name='my_dataset' )} ), SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 )} ), SplitDict({'train': SplitInfo()} ), ] , ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : SplitDict ): lowerCAmelCase = split_dict._to_yaml_list() assert len(_UpperCAmelCase ) == len(_UpperCAmelCase ) lowerCAmelCase = SplitDict._from_yaml_list(_UpperCAmelCase ) for split_name, split_info in split_dict.items(): # dataset_name field is deprecated, and is therefore not part of the YAML dump lowerCAmelCase = None # the split name of split_dict takes over the name of the split info object lowerCAmelCase = split_name assert split_dict == reloaded @pytest.mark.parametrize( 'split_info' , [SplitInfo(), SplitInfo(dataset_name=_UpperCAmelCase ), SplitInfo(dataset_name='my_dataset' )] ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] ): # For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name" # field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files lowerCAmelCase = asdict(SplitDict({'train': split_info} ) ) assert "dataset_name" in split_dict_asdict["train"] assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
309
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __UpperCamelCase : Dict = { '''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig'''] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Any = [ '''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ResNetForImageClassification''', '''ResNetModel''', '''ResNetPreTrainedModel''', '''ResNetBackbone''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : int = [ '''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFResNetForImageClassification''', '''TFResNetModel''', '''TFResNetPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Tuple = [ '''FlaxResNetForImageClassification''', '''FlaxResNetModel''', '''FlaxResNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_resnet import ( RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, ResNetBackbone, ResNetForImageClassification, ResNetModel, ResNetPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_resnet import ( TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFResNetForImageClassification, TFResNetModel, TFResNetPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel else: import sys __UpperCamelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
309
"""simple docstring""" # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. __UpperCamelCase : Any = abspath(join(dirname(dirname(__file__)), '''src''')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='''ignore''', category=FutureWarning) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[Any] ): from diffusers.utils.testing_utils import pytest_addoption_shared pytest_addoption_shared(_UpperCAmelCase ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ): from diffusers.utils.testing_utils import pytest_terminal_summary_main lowerCAmelCase = terminalreporter.config.getoption('--make-reports' ) if make_reports: pytest_terminal_summary_main(_UpperCAmelCase , id=_UpperCAmelCase )
309
1