code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional from packaging import version if TYPE_CHECKING: from ... import PreTrainedTokenizer, TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import is_torch_available, logging lowercase__ :Optional[int] = logging.get_logger(__name__) lowercase__ :List[str] = { "bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json", "bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json", "bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json", "bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json", "bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json", "bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json", } class lowercase ( SCREAMING_SNAKE_CASE__ ): lowercase_ : Optional[Any] ='''bloom''' lowercase_ : List[str] =['''past_key_values'''] lowercase_ : Tuple ={ '''num_hidden_layers''': '''n_layer''', '''num_attention_heads''': '''n_head''', } def __init__( self ,A__=2_5_0_8_8_0 ,A__=6_4 ,A__=2 ,A__=8 ,A__=1E-5 ,A__=0.02 ,A__=True ,A__=1 ,A__=2 ,A__=False ,A__=0.0 ,A__=0.0 ,A__=1 ,A__=False ,**A__ ,): lowercase = vocab_size # Backward compatibility with n_embed kwarg lowercase = kwargs.pop('''n_embed''' ,A__) lowercase = hidden_size if n_embed is None else n_embed lowercase = n_layer lowercase = n_head lowercase = layer_norm_epsilon lowercase = initializer_range lowercase = use_cache lowercase = pretraining_tp lowercase = apply_residual_connection_post_layernorm lowercase = hidden_dropout lowercase = attention_dropout lowercase = bos_token_id lowercase = eos_token_id lowercase = slow_but_exact super().__init__(bos_token_id=A__ ,eos_token_id=A__ ,**A__) class lowercase ( SCREAMING_SNAKE_CASE__ ): lowercase_ : List[str] =version.parse('''1.12''' ) def __init__( self ,A__ ,A__ = "default" ,A__ = None ,A__ = False ,): super().__init__(A__ ,task=A__ ,patching_specs=A__ ,use_past=A__) if not getattr(self._config ,'''pad_token_id''' ,A__): # TODO: how to do that better? lowercase = 0 @property def A__ ( self): lowercase = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}}) if self.use_past: # BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344 self.fill_with_past_key_values_(A__ ,direction='''inputs''' ,inverted_values_shape=A__) lowercase = {0: '''batch''', 1: '''past_sequence + sequence'''} else: lowercase = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def A__ ( self): return self._config.n_layer @property def A__ ( self): return self._config.n_head @property def A__ ( self): return 1E-3 def A__ ( self ,A__ ,A__ = -1 ,A__ = -1 ,A__ = False ,A__ = None ,): lowercase = super(A__ ,self).generate_dummy_inputs( A__ ,batch_size=A__ ,seq_length=A__ ,is_pair=A__ ,framework=A__) # We need to order the input in the way they appears in the forward() lowercase = OrderedDict({'''input_ids''': common_inputs['''input_ids''']}) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''') else: import torch lowercase , lowercase = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values lowercase = seqlen + 2 lowercase = self._config.hidden_size // self.num_attention_heads lowercase = ( batch * self.num_attention_heads, head_dim, past_key_values_length, ) lowercase = ( batch * self.num_attention_heads, past_key_values_length, head_dim, ) lowercase = [ (torch.zeros(A__), torch.zeros(A__)) for _ in range(self.num_layers) ] lowercase = common_inputs['''attention_mask'''] if self.use_past: lowercase = ordered_inputs['''attention_mask'''].dtype lowercase = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(A__ ,A__ ,dtype=A__)] ,dim=1) return ordered_inputs @property def A__ ( self): return 1_3
101
"""simple docstring""" def snake_case_ ( A_ : int = 2_00_00_00 ): '''simple docstring''' _lowerCamelCase : int = [0 for i in range(n + 1 )] _lowerCamelCase : List[str] = 1 _lowerCamelCase : Any = 1 for i in range(2, int(n**0.5 ) + 1 ): if primality_list[i] == 0: for j in range(i * i, n + 1, A_ ): _lowerCamelCase : str = 1 _lowerCamelCase : Tuple = 0 for i in range(A_ ): if primality_list[i] == 0: sum_of_primes += i return sum_of_primes if __name__ == "__main__": print(F"""{solution() = }""")
72
0
"""simple docstring""" from diffusers.utils.testing_utils import require_onnxruntime @require_onnxruntime class _UpperCAmelCase : '''simple docstring''' pass
102
"""simple docstring""" import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def snake_case_ ( A_ : Any ): '''simple docstring''' _lowerCamelCase : Any = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''_float_tensor''', '''decoder.output_projection.weight''', ] for k in ignore_keys: state_dict.pop(A_, A_ ) def snake_case_ ( A_ : Union[str, Any] ): '''simple docstring''' _lowerCamelCase , _lowerCamelCase : Tuple = emb.weight.shape _lowerCamelCase : Dict = nn.Linear(A_, A_, bias=A_ ) _lowerCamelCase : str = emb.weight.data return lin_layer def snake_case_ ( A_ : str, A_ : Optional[int]="facebook/mbart-large-en-ro", A_ : Union[str, Any]=False, A_ : List[str]=False ): '''simple docstring''' _lowerCamelCase : Tuple = torch.load(A_, map_location='''cpu''' )['''model'''] remove_ignore_keys_(A_ ) _lowerCamelCase : int = state_dict['''encoder.embed_tokens.weight'''].shape[0] _lowerCamelCase : Any = MBartConfig.from_pretrained(A_, vocab_size=A_ ) if mbart_aa and finetuned: _lowerCamelCase : Any = '''relu''' _lowerCamelCase : Optional[int] = state_dict['''decoder.embed_tokens.weight'''] _lowerCamelCase : Any = MBartForConditionalGeneration(A_ ) model.model.load_state_dict(A_ ) if finetuned: _lowerCamelCase : str = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.''' ) parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--hf_config''', default='''facebook/mbart-large-cc25''', type=str, help='''Which huggingface architecture to use: mbart-large''', ) parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''') parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''') lowerCAmelCase__ = parser.parse_args() lowerCAmelCase__ = convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
72
0
from collections import Counter from pathlib import Path from typing import Optional, Tuple import yaml class __snake_case ( yaml.SafeLoader ): def UpperCAmelCase__ ( self : Dict , A_ : Tuple): lowerCAmelCase_ : Tuple = [self.constructed_objects[key_node] for key_node, _ in node.value] lowerCAmelCase_ : List[str] = [tuple(A_) if isinstance(A_ , A_) else key for key in keys] lowerCAmelCase_ : Tuple = Counter(A_) lowerCAmelCase_ : Union[str, Any] = [key for key in counter if counter[key] > 1] if duplicate_keys: raise TypeError(F"""Got duplicate yaml keys: {duplicate_keys}""") def UpperCAmelCase__ ( self : int , A_ : Optional[Any] , A_ : int=False): lowerCAmelCase_ : int = super().construct_mapping(A_ , deep=A_) self._check_no_duplicates_on_constructed_node(A_) return mapping def UpperCamelCase( __UpperCamelCase : str ): lowerCAmelCase_ : int = list(readme_content.splitlines() ) if full_content and full_content[0] == "---" and "---" in full_content[1:]: lowerCAmelCase_ : int = full_content[1:].index('''---''' ) + 1 lowerCAmelCase_ : List[str] = '''\n'''.join(full_content[1:sep_idx] ) return yamlblock, "\n".join(full_content[sep_idx + 1 :] ) return None, "\n".join(__UpperCamelCase ) class __snake_case ( UpperCamelCase_ ): # class attributes _a = {'''train_eval_index'''} # train-eval-index in the YAML metadata @classmethod def UpperCAmelCase__ ( cls : Optional[Any] , A_ : Path): with open(A_ , encoding='''utf-8''') as readme_file: lowerCAmelCase_ , lowerCAmelCase_ : Any = _split_yaml_from_readme(readme_file.read()) if yaml_string is not None: return cls.from_yaml_string(A_) else: return cls() def UpperCAmelCase__ ( self : Any , A_ : Path): if path.exists(): with open(A_ , encoding='''utf-8''') as readme_file: lowerCAmelCase_ : Tuple = readme_file.read() else: lowerCAmelCase_ : Optional[int] = None lowerCAmelCase_ : Any = self._to_readme(A_) with open(A_ , '''w''' , encoding='''utf-8''') as readme_file: readme_file.write(A_) def UpperCAmelCase__ ( self : Any , A_ : Optional[str] = None): if readme_content is not None: lowerCAmelCase_ , lowerCAmelCase_ : str = _split_yaml_from_readme(A_) lowerCAmelCase_ : Tuple = '''---\n''' + self.to_yaml_string() + '''---\n''' + content else: lowerCAmelCase_ : Tuple = '''---\n''' + self.to_yaml_string() + '''---\n''' return full_content @classmethod def UpperCAmelCase__ ( cls : str , A_ : str): lowerCAmelCase_ : str = yaml.load(A_ , Loader=_NoDuplicateSafeLoader) or {} # Convert the YAML keys to DatasetMetadata fields lowerCAmelCase_ : Union[str, Any] = { (key.replace('''-''' , '''_''') if key.replace('''-''' , '''_''') in cls._FIELDS_WITH_DASHES else key): value for key, value in metadata_dict.items() } return cls(**A_) def UpperCAmelCase__ ( self : Optional[int]): return yaml.safe_dump( { (key.replace('''_''' , '''-''') if key in self._FIELDS_WITH_DASHES else key): value for key, value in self.items() } , sort_keys=A_ , allow_unicode=A_ , encoding='''utf-8''' , ).decode('''utf-8''') A__ : str = { '''image-classification''': [], '''translation''': [], '''image-segmentation''': [], '''fill-mask''': [], '''automatic-speech-recognition''': [], '''token-classification''': [], '''sentence-similarity''': [], '''audio-classification''': [], '''question-answering''': [], '''summarization''': [], '''zero-shot-classification''': [], '''table-to-text''': [], '''feature-extraction''': [], '''other''': [], '''multiple-choice''': [], '''text-classification''': [], '''text-to-image''': [], '''text2text-generation''': [], '''zero-shot-image-classification''': [], '''tabular-classification''': [], '''tabular-regression''': [], '''image-to-image''': [], '''tabular-to-text''': [], '''unconditional-image-generation''': [], '''text-retrieval''': [], '''text-to-speech''': [], '''object-detection''': [], '''audio-to-audio''': [], '''text-generation''': [], '''conversational''': [], '''table-question-answering''': [], '''visual-question-answering''': [], '''image-to-text''': [], '''reinforcement-learning''': [], '''voice-activity-detection''': [], '''time-series-forecasting''': [], '''document-question-answering''': [], } if __name__ == "__main__": from argparse import ArgumentParser A__ : Dict = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''') ap.add_argument('''readme_filepath''') A__ : Optional[int] = ap.parse_args() A__ : Optional[Any] = Path(args.readme_filepath) A__ : Optional[Any] = DatasetMetadata.from_readme(readme_filepath) print(dataset_metadata) dataset_metadata.to_readme(readme_filepath)
103
"""simple docstring""" def snake_case_ ( A_ : list[list] ): '''simple docstring''' _lowerCamelCase : Optional[int] = current_set.copy() for row_index, row in enumerate(A_ ): _lowerCamelCase : Tuple = row[0] for column_index, column in enumerate(A_ ): if magnitude == 0: _lowerCamelCase : List[Any] = column continue _lowerCamelCase : List[Any] = column / magnitude # Subtract to cancel term _lowerCamelCase : Union[str, Any] = current_set[0] _lowerCamelCase : Dict = [first_row] _lowerCamelCase : str = current_set[1::] for row in current_set: _lowerCamelCase : Union[str, Any] = [] # If first term is 0, it is already in form we want, so we preserve it if row[0] == 0: final_set.append(A_ ) continue for column_index in range(len(A_ ) ): temp_row.append(first_row[column_index] - row[column_index] ) final_set.append(A_ ) # Create next recursion iteration set if len(final_set[0] ) != 3: _lowerCamelCase : Any = final_set[0] _lowerCamelCase : Any = [] _lowerCamelCase : Optional[int] = [] for row in final_set[1::]: current_first_column.append(row[0] ) next_iteration.append(row[1::] ) _lowerCamelCase : Dict = simplify(A_ ) for i in range(len(A_ ) ): resultant[i].insert(0, current_first_column[i] ) resultant.insert(0, A_ ) _lowerCamelCase : Tuple = resultant return final_set def snake_case_ ( A_ : list[list] ): '''simple docstring''' if len(A_ ) == 0: raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) _lowerCamelCase : Dict = len(A_ ) + 1 if any(len(A_ ) != _length for item in equations ): raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) for row in equations: if any(not isinstance(A_, (int, float) ) for column in row ): raise ValueError('''solve_simultaneous() requires lists of integers''' ) if len(A_ ) == 1: return [equations[0][-1] / equations[0][0]] _lowerCamelCase : Optional[Any] = equations.copy() if any(0 in row for row in data_set ): _lowerCamelCase : str = data_set.copy() _lowerCamelCase : List[Any] = [] for row_index, row in enumerate(A_ ): if 0 not in row: _lowerCamelCase : Union[str, Any] = data_set.pop(A_ ) break if not full_row: raise ValueError('''solve_simultaneous() requires at least 1 full equation''' ) data_set.insert(0, A_ ) _lowerCamelCase : List[str] = data_set.copy() _lowerCamelCase : int = simplify(A_ ) _lowerCamelCase : int = simplified[::-1] _lowerCamelCase : list = [] for row in simplified: _lowerCamelCase : Tuple = row[-1] if not solutions: if row[-2] == 0: solutions.append(0 ) continue solutions.append(current_solution / row[-2] ) continue _lowerCamelCase : Optional[Any] = row.copy()[: len(A_ ) - 1 :] while temp_row[0] == 0: temp_row.pop(0 ) if len(A_ ) == 0: solutions.append(0 ) continue _lowerCamelCase : Tuple = temp_row[1::] _lowerCamelCase : Tuple = temp_row[::-1] for column_index, column in enumerate(A_ ): current_solution -= column * solutions[column_index] solutions.append(A_ ) _lowerCamelCase : Optional[int] = [] for item in solutions: final.append(float(round(A_, 5 ) ) ) return final[::-1] if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase__ = [ [2, 1, 1, 1, 1, 4], [1, 2, 1, 1, 1, 5], [1, 1, 2, 1, 1, 6], [1, 1, 1, 2, 1, 7], [1, 1, 1, 1, 2, 8], ] print(solve_simultaneous(eq)) print(solve_simultaneous([[4, 2]]))
72
0
'''simple docstring''' import unittest from parameterized import parameterized from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXModel, ) class lowercase_ : """simple docstring""" def __init__( self : Tuple ,lowercase__ : int ,lowercase__ : str=1_3 ,lowercase__ : Union[str, Any]=7 ,lowercase__ : List[str]=True ,lowercase__ : List[str]=True ,lowercase__ : List[str]=True ,lowercase__ : Any=True ,lowercase__ : str=9_9 ,lowercase__ : List[Any]=6_4 ,lowercase__ : Tuple=5 ,lowercase__ : List[Any]=4 ,lowercase__ : Any=3_7 ,lowercase__ : str="gelu" ,lowercase__ : Any=0.1 ,lowercase__ : Union[str, Any]=0.1 ,lowercase__ : List[Any]=5_1_2 ,lowercase__ : int=1_6 ,lowercase__ : Dict=2 ,lowercase__ : Dict=0.0_2 ,lowercase__ : str=3 ,lowercase__ : Optional[Any]=4 ,lowercase__ : int=None ,): __lowercase = parent __lowercase = batch_size __lowercase = seq_length __lowercase = is_training __lowercase = use_input_mask __lowercase = use_token_type_ids __lowercase = use_labels __lowercase = vocab_size __lowercase = hidden_size __lowercase = num_hidden_layers __lowercase = num_attention_heads __lowercase = intermediate_size __lowercase = hidden_act __lowercase = hidden_dropout_prob __lowercase = attention_probs_dropout_prob __lowercase = max_position_embeddings __lowercase = type_vocab_size __lowercase = type_sequence_label_size __lowercase = initializer_range __lowercase = num_labels __lowercase = num_choices __lowercase = scope __lowercase = vocab_size - 1 def SCREAMING_SNAKE_CASE ( self : Optional[int] ): __lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) __lowercase = None if self.use_input_mask: __lowercase = random_attention_mask([self.batch_size, self.seq_length] ) __lowercase = None if self.use_labels: __lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) __lowercase = self.get_config() return config, input_ids, input_mask, token_labels def SCREAMING_SNAKE_CASE ( self : int ): return GPTNeoXConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowercase__ ,initializer_range=self.initializer_range ,pad_token_id=self.pad_token_id ,) def SCREAMING_SNAKE_CASE ( self : List[str] ): __lowercase , __lowercase , __lowercase , __lowercase = self.prepare_config_and_inputs() __lowercase = True return config, input_ids, input_mask, token_labels def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Tuple ,lowercase__ : Dict ,lowercase__ : Any ): __lowercase = GPTNeoXModel(config=lowercase__ ) model.to(lowercase__ ) model.eval() __lowercase = model(lowercase__ ,attention_mask=lowercase__ ) __lowercase = model(lowercase__ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Any ,lowercase__ : Union[str, Any] ,lowercase__ : int ): __lowercase = True __lowercase = GPTNeoXModel(lowercase__ ) model.to(lowercase__ ) model.eval() __lowercase = model(lowercase__ ,attention_mask=lowercase__ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : List[Any] ,lowercase__ : Dict ,lowercase__ : Optional[int] ,lowercase__ : Dict ): __lowercase = GPTNeoXForCausalLM(config=lowercase__ ) model.to(lowercase__ ) model.eval() __lowercase = model(lowercase__ ,attention_mask=lowercase__ ,labels=lowercase__ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Optional[Any] ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[Any] ,lowercase__ : Optional[Any] ): __lowercase = self.num_labels __lowercase = GPTNeoXForQuestionAnswering(lowercase__ ) model.to(lowercase__ ) model.eval() __lowercase = model(lowercase__ ,attention_mask=lowercase__ ) self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : List[Any] ,lowercase__ : Dict ,lowercase__ : List[str] ,lowercase__ : str ): __lowercase = self.num_labels __lowercase = GPTNeoXForSequenceClassification(lowercase__ ) model.to(lowercase__ ) model.eval() __lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) __lowercase = model(lowercase__ ,attention_mask=lowercase__ ,labels=lowercase__ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Any ,lowercase__ : str ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[int] ): __lowercase = self.num_labels __lowercase = GPTNeoXForTokenClassification(lowercase__ ) model.to(lowercase__ ) model.eval() __lowercase = model(lowercase__ ,attention_mask=lowercase__ ,labels=lowercase__ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : str ,lowercase__ : Optional[Any] ,lowercase__ : Optional[int] ): __lowercase = True __lowercase = GPTNeoXForCausalLM(config=lowercase__ ) model.to(lowercase__ ) model.eval() # first forward pass __lowercase = model(lowercase__ ,attention_mask=lowercase__ ,use_cache=lowercase__ ) __lowercase = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids __lowercase = ids_tensor((self.batch_size, 3) ,config.vocab_size ) __lowercase = ids_tensor((self.batch_size, 3) ,vocab_size=2 ) # append to next input_ids and __lowercase = torch.cat([input_ids, next_tokens] ,dim=-1 ) __lowercase = torch.cat([input_mask, next_mask] ,dim=-1 ) __lowercase = model(lowercase__ ,attention_mask=lowercase__ ,output_hidden_states=lowercase__ ) __lowercase = output_from_no_past['''hidden_states'''][0] __lowercase = model( lowercase__ ,attention_mask=lowercase__ ,past_key_values=lowercase__ ,output_hidden_states=lowercase__ ,)['''hidden_states'''][0] # select random slice __lowercase = ids_tensor((1,) ,output_from_past.shape[-1] ).item() __lowercase = output_from_no_past[:, -3:, random_slice_idx].detach() __lowercase = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowercase__ ,lowercase__ ,atol=1e-3 ) ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): __lowercase = self.prepare_config_and_inputs() __lowercase , __lowercase , __lowercase , __lowercase = config_and_inputs __lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = ( ( GPTNeoXModel, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE : Union[str, Any] = (GPTNeoXForCausalLM,) if is_torch_available() else () SCREAMING_SNAKE_CASE : Union[str, Any] = ( { 'feature-extraction': GPTNeoXModel, 'question-answering': GPTNeoXForQuestionAnswering, 'text-classification': GPTNeoXForSequenceClassification, 'text-generation': GPTNeoXForCausalLM, 'token-classification': GPTNeoXForTokenClassification, 'zero-shot': GPTNeoXForSequenceClassification, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE : List[str] = False SCREAMING_SNAKE_CASE : Optional[int] = False SCREAMING_SNAKE_CASE : List[str] = False SCREAMING_SNAKE_CASE : Dict = False def SCREAMING_SNAKE_CASE ( self : Optional[int] ): __lowercase = GPTNeoXModelTester(self ) __lowercase = ConfigTester(self ,config_class=lowercase__ ,hidden_size=6_4 ,num_attention_heads=8 ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ): self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self : Any ): __lowercase , __lowercase , __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(lowercase__ ,lowercase__ ,lowercase__ ) def SCREAMING_SNAKE_CASE ( self : str ): __lowercase , __lowercase , __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(lowercase__ ,lowercase__ ,lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Tuple ): # This regression test was failing with PyTorch < 1.3 __lowercase , __lowercase , __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_decoder() __lowercase = None self.model_tester.create_and_check_model_as_decoder(lowercase__ ,lowercase__ ,lowercase__ ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): __lowercase , __lowercase , __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(lowercase__ ,lowercase__ ,lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Tuple ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*lowercase__ ) def SCREAMING_SNAKE_CASE ( self : List[str] ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowercase__ ) def SCREAMING_SNAKE_CASE ( self : str ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowercase__ ) @unittest.skip(reason='''Feed forward chunking is not implemented''' ) def SCREAMING_SNAKE_CASE ( self : List[str] ): pass @parameterized.expand([('''linear''',), ('''dynamic''',)] ) def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Union[str, Any] ): __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() __lowercase = ids_tensor([1, 1_0] ,config.vocab_size ) __lowercase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size ) set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights __lowercase = GPTNeoXModel(lowercase__ ) original_model.to(lowercase__ ) original_model.eval() __lowercase = original_model(lowercase__ ).last_hidden_state __lowercase = original_model(lowercase__ ).last_hidden_state set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights __lowercase = {'''type''': scaling_type, '''factor''': 1_0.0} __lowercase = GPTNeoXModel(lowercase__ ) scaled_model.to(lowercase__ ) scaled_model.eval() __lowercase = scaled_model(lowercase__ ).last_hidden_state __lowercase = scaled_model(lowercase__ ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(lowercase__ ,lowercase__ ,atol=1e-5 ) ) else: self.assertFalse(torch.allclose(lowercase__ ,lowercase__ ,atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(lowercase__ ,lowercase__ ,atol=1e-5 ) ) @require_torch class lowercase_ (unittest.TestCase ): """simple docstring""" @slow def SCREAMING_SNAKE_CASE ( self : List[Any] ): __lowercase = AutoTokenizer.from_pretrained('''EleutherAI/pythia-410m-deduped''' ) for checkpointing in [True, False]: __lowercase = GPTNeoXForCausalLM.from_pretrained('''EleutherAI/pythia-410m-deduped''' ) if checkpointing: model.gradient_checkpointing_enable() else: model.gradient_checkpointing_disable() model.to(lowercase__ ) __lowercase = tokenizer('''My favorite food is''' ,return_tensors='''pt''' ).to(lowercase__ ) # The hub repo. is updated on 2023-04-04, resulting in poor outputs. # See: https://github.com/huggingface/transformers/pull/24193 __lowercase = '''My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure''' __lowercase = model.generate(**lowercase__ ,do_sample=lowercase__ ,max_new_tokens=2_0 ) __lowercase = tokenizer.batch_decode(lowercase__ )[0] self.assertEqual(lowercase__ ,lowercase__ )
104
"""simple docstring""" import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class __snake_case ( _lowercase): snake_case__ : List[Any] = "Speech2TextFeatureExtractor" snake_case__ : Union[str, Any] = "Speech2TextTokenizer" def __init__( self : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] ): """simple docstring""" super().__init__(__lowerCAmelCase , __lowerCAmelCase ) _lowerCamelCase : List[str] = self.feature_extractor _lowerCamelCase : str = False def __call__( self : List[Any] , *__lowerCAmelCase : int , **__lowerCAmelCase : List[str] ): """simple docstring""" if self._in_target_context_manager: return self.current_processor(*__lowerCAmelCase , **__lowerCAmelCase ) if "raw_speech" in kwargs: warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' ) _lowerCamelCase : str = kwargs.pop('''raw_speech''' ) else: _lowerCamelCase : Tuple = kwargs.pop('''audio''' , __lowerCAmelCase ) _lowerCamelCase : Optional[Any] = kwargs.pop('''sampling_rate''' , __lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = kwargs.pop('''text''' , __lowerCAmelCase ) if len(__lowerCAmelCase ) > 0: _lowerCamelCase : List[Any] = args[0] _lowerCamelCase : int = args[1:] if audio is None and text is None: raise ValueError('''You need to specify either an `audio` or `text` input to process.''' ) if audio is not None: _lowerCamelCase : List[Any] = self.feature_extractor(__lowerCAmelCase , *__lowerCAmelCase , sampling_rate=__lowerCAmelCase , **__lowerCAmelCase ) if text is not None: _lowerCamelCase : List[Any] = self.tokenizer(__lowerCAmelCase , **__lowerCAmelCase ) if text is None: return inputs elif audio is None: return encodings else: _lowerCamelCase : List[str] = encodings['''input_ids'''] return inputs def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : Tuple ): """simple docstring""" return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : int ): """simple docstring""" return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase ) @contextmanager def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" warnings.warn( '''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ''' '''labels by using the argument `text` of the regular `__call__` method (either in the same call as ''' '''your audio inputs, or in a separate call.''' ) _lowerCamelCase : Union[str, Any] = True _lowerCamelCase : Any = self.tokenizer yield _lowerCamelCase : List[str] = self.feature_extractor _lowerCamelCase : Tuple = False
72
0
"""simple docstring""" import unittest import numpy as np from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class __UpperCamelCase ( a__ , unittest.TestCase ): # FIXME: add fast tests pass @nightly @require_onnxruntime @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): @property def __a ( self ) -> Optional[int]: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def __a ( self ) -> Any: a : Union[str, Any] = ort.SessionOptions() a : Any = False return options def __a ( self ) -> Dict: a : List[str] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo.png" ) a : List[Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo_mask.png" ) a : int = OnnxStableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) a : Optional[int] = "A red cat sitting on a park bench" a : Dict = np.random.RandomState(0 ) a : str = pipe( prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=10 , generator=lowerCAmelCase__ , output_type="np" , ) a : Union[str, Any] = output.images a : str = images[0, 255:258, 255:258, -1] assert images.shape == (1, 512, 512, 3) a : int = np.array([0.2_514, 0.3_007, 0.3_517, 0.1_790, 0.2_382, 0.3_167, 0.1_944, 0.2_273, 0.2_464] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __a ( self ) -> Tuple: a : Union[str, Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo.png" ) a : Union[str, Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo_mask.png" ) a : Dict = LMSDiscreteScheduler.from_pretrained( "runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx" ) a : Union[str, Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) a : List[Any] = "A red cat sitting on a park bench" a : List[Any] = np.random.RandomState(0 ) a : List[Any] = pipe( prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=20 , generator=lowerCAmelCase__ , output_type="np" , ) a : Any = output.images a : List[Any] = images[0, 255:258, 255:258, -1] assert images.shape == (1, 512, 512, 3) a : Any = np.array([0.0_086, 0.0_077, 0.0_083, 0.0_093, 0.0_107, 0.0_139, 0.0_094, 0.0_097, 0.0_125] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
105
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase__ = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['''ReformerTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['''ReformerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ReformerAttention''', '''ReformerForMaskedLM''', '''ReformerForQuestionAnswering''', '''ReformerForSequenceClassification''', '''ReformerLayer''', '''ReformerModel''', '''ReformerModelWithLMHead''', '''ReformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
72
0
"""simple docstring""" class SCREAMING_SNAKE_CASE : # Public class to implement a graph """simple docstring""" def __init__( self : Dict ,lowercase_ : int ,lowercase_ : int ,lowercase_ : list[list[bool]] ): lowerCAmelCase__ : Union[str, Any] = row lowerCAmelCase__ : Tuple = col lowerCAmelCase__ : Optional[Any] = graph def __lowerCAmelCase ( self : Dict ,lowercase_ : int ,lowercase_ : int ,lowercase_ : list[list[bool]] ): return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] ) def __lowerCAmelCase ( self : Dict ,lowercase_ : int ,lowercase_ : int ,lowercase_ : list[list[bool]] ): # Checking all 8 elements surrounding nth element lowerCAmelCase__ : List[Any] = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order lowerCAmelCase__ : List[Any] = [-1, 0, 1, -1, 1, -1, 0, 1] lowerCAmelCase__ : Optional[Any] = True # Make those cells visited for k in range(8 ): if self.is_safe(i + row_nbr[k] ,j + col_nbr[k] ,lowercase_ ): self.diffs(i + row_nbr[k] ,j + col_nbr[k] ,lowercase_ ) def __lowerCAmelCase ( self : Union[str, Any] ): # And finally, count all islands. lowerCAmelCase__ : Tuple = [[False for j in range(self.COL )] for i in range(self.ROW )] lowerCAmelCase__ : List[Any] = 0 for i in range(self.ROW ): for j in range(self.COL ): if visited[i][j] is False and self.graph[i][j] == 1: self.diffs(lowercase_ ,lowercase_ ,lowercase_ ) count += 1 return count
106
"""simple docstring""" from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase__ = { '''configuration_autoformer''': [ '''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''AutoformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''AutoformerForPrediction''', '''AutoformerModel''', '''AutoformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_autoformer import ( AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_autoformer import ( AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, AutoformerForPrediction, AutoformerModel, AutoformerPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
72
0
class snake_case__ : """simple docstring""" def __init__( self : Union[str, Any] ) -> List[Any]: a = {} def __UpperCAmelCase ( self : Optional[Any] ) -> None: print(self.vertex ) for i in self.vertex: print(__lowerCamelCase , " -> " , " -> ".join([str(__lowerCamelCase ) for j in self.vertex[i]] ) ) def __UpperCAmelCase ( self : int , __lowerCamelCase : int , __lowerCamelCase : int ) -> None: # check if vertex is already present, if from_vertex in self.vertex: self.vertex[from_vertex].append(__lowerCamelCase ) else: # else make a new vertex a = [to_vertex] def __UpperCAmelCase ( self : int ) -> None: # visited array for storing already visited nodes a = [False] * len(self.vertex ) # call the recursive helper function for i in range(len(self.vertex ) ): if not visited[i]: self.dfs_recursive(__lowerCamelCase , __lowerCamelCase ) def __UpperCAmelCase ( self : int , __lowerCamelCase : int , __lowerCamelCase : list ) -> None: # mark start vertex as visited a = True print(__lowerCamelCase , end=" " ) # Recur for all the vertices that are adjacent to this node for i in self.vertex: if not visited[i]: self.dfs_recursive(__lowerCamelCase , __lowerCamelCase ) if __name__ == "__main__": __lowerCAmelCase : str = Graph() g.add_edge(0, 1) g.add_edge(0, 2) g.add_edge(1, 2) g.add_edge(2, 0) g.add_edge(2, 3) g.add_edge(3, 3) g.print_graph() print('DFS:') g.dfs() # OUTPUT: # 0 -> 1 -> 2 # 1 -> 2 # 2 -> 0 -> 3 # 3 -> 3 # DFS: # 0 1 2 3
107
"""simple docstring""" import math def snake_case_ ( A_ : int ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5, int(math.sqrt(A_ ) + 1 ), 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def snake_case_ ( A_ : float = 0.1 ): '''simple docstring''' _lowerCamelCase : Optional[int] = 3 _lowerCamelCase : List[str] = 3 while primes / (2 * j - 1) >= ratio: for i in range(j * j + j + 1, (j + 2) * (j + 2), j + 1 ): primes += is_prime(A_ ) j += 2 return j if __name__ == "__main__": import doctest doctest.testmod()
72
0
"""simple docstring""" from __future__ import annotations from collections import namedtuple def a__ ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ): '''simple docstring''' lowerCAmelCase : Optional[int] = namedtuple("result" , "name value" ) if (voltage, current, power).count(0 ) != 1: raise ValueError("Only one argument must be 0" ) elif power < 0: raise ValueError( "Power cannot be negative in any electrical/electronics system" ) elif voltage == 0: return result("voltage" , power / current ) elif current == 0: return result("current" , power / voltage ) elif power == 0: return result("power" , float(round(abs(voltage * current ) , 2 ) ) ) else: raise ValueError("Exactly one argument must be 0" ) if __name__ == "__main__": import doctest doctest.testmod()
108
"""simple docstring""" import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class __snake_case ( unittest.TestCase): def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _lowerCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase ) _lowerCamelCase : Tuple = -1 _lowerCamelCase : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase ) _lowerCamelCase : List[Any] = model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: _lowerCamelCase : Union[str, Any] = TextStreamer(__lowerCAmelCase ) model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _lowerCamelCase : int = cs.out[:-1] self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" _lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _lowerCamelCase : Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase ) _lowerCamelCase : Tuple = -1 _lowerCamelCase : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase ) _lowerCamelCase : Optional[int] = model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase ) _lowerCamelCase : List[str] = tokenizer.decode(greedy_ids[0] ) _lowerCamelCase : Tuple = TextIteratorStreamer(__lowerCAmelCase ) _lowerCamelCase : Tuple = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer} _lowerCamelCase : List[Any] = Thread(target=model.generate , kwargs=__lowerCAmelCase ) thread.start() _lowerCamelCase : int = '''''' for new_text in streamer: streamer_text += new_text self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" _lowerCamelCase : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _lowerCamelCase : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase ) _lowerCamelCase : Tuple = -1 _lowerCamelCase : Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase ) _lowerCamelCase : int = model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = greedy_ids[:, input_ids.shape[1] :] _lowerCamelCase : int = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: _lowerCamelCase : Any = TextStreamer(__lowerCAmelCase , skip_prompt=__lowerCAmelCase ) model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _lowerCamelCase : Union[str, Any] = cs.out[:-1] self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" _lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained('''distilgpt2''' ) _lowerCamelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(__lowerCAmelCase ) _lowerCamelCase : str = -1 _lowerCamelCase : Any = torch.ones((1, 5) , device=__lowerCAmelCase ).long() * model.config.bos_token_id with CaptureStdout() as cs: _lowerCamelCase : List[Any] = TextStreamer(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase ) model.generate(__lowerCAmelCase , max_new_tokens=1 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token _lowerCamelCase : Any = cs.out[:-1] # Remove the final "\n" _lowerCamelCase : int = tokenizer(__lowerCAmelCase , return_tensors='''pt''' ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _lowerCamelCase : Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = -1 _lowerCamelCase : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase ) _lowerCamelCase : List[str] = TextIteratorStreamer(__lowerCAmelCase , timeout=0.0_01 ) _lowerCamelCase : str = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer} _lowerCamelCase : List[Any] = Thread(target=model.generate , kwargs=__lowerCAmelCase ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(__lowerCAmelCase ): _lowerCamelCase : Optional[Any] = '''''' for new_text in streamer: streamer_text += new_text
72
0
"""simple docstring""" import argparse import torch # Step 1. clone https://github.com/microsoft/unilm # Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd # Step 3. cd unilm # Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink # import classes from unilm.wavlm.WavLM import WavLM as WavLMOrig from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig from transformers import WavLMConfig, WavLMModel, logging logging.set_verbosity_info() A: Optional[int] = logging.get_logger(__name__) A: Tuple = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn.grep_linear": "encoder.layers.*.attention.gru_rel_pos_linear", "self_attn.relative_attention_bias": "encoder.layers.*.attention.rel_attn_embed", "self_attn.grep_a": "encoder.layers.*.attention.gru_rel_pos_const", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "ctc_proj", "mask_emb": "masked_spec_embed", } A: List[str] = [ "ctc_proj", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", ] def _snake_case ( UpperCamelCase : str , UpperCamelCase : Tuple , UpperCamelCase : Optional[int] , UpperCamelCase : Tuple , UpperCamelCase : Any ): for attribute in key.split(""".""" ): UpperCAmelCase : Optional[Any] = getattr(UpperCamelCase , UpperCamelCase ) if weight_type is not None: UpperCAmelCase : List[Any] = getattr(UpperCamelCase , UpperCamelCase ).shape else: UpperCAmelCase : str = hf_pointer.shape assert hf_shape == value.shape, ( F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" F" {value.shape} for {full_name}" ) if weight_type == "weight": UpperCAmelCase : Optional[Any] = value elif weight_type == "weight_g": UpperCAmelCase : str = value elif weight_type == "weight_v": UpperCAmelCase : Union[str, Any] = value elif weight_type == "bias": UpperCAmelCase : str = value else: UpperCAmelCase : Union[str, Any] = value logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def _snake_case ( UpperCamelCase : List[Any] , UpperCamelCase : Optional[Any] ): UpperCAmelCase : Tuple = [] UpperCAmelCase : Any = fairseq_model.state_dict() UpperCAmelCase : Tuple = hf_model.feature_extractor for name, value in fairseq_dict.items(): UpperCAmelCase : str = False if "conv_layers" in name: load_conv_layer( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , hf_model.config.feat_extract_norm == """group""" , ) UpperCAmelCase : Union[str, Any] = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: UpperCAmelCase : Dict = True if "*" in mapped_key: UpperCAmelCase : str = name.split(UpperCamelCase )[0].split(""".""" )[-2] UpperCAmelCase : Tuple = mapped_key.replace("""*""" , UpperCamelCase ) if "weight_g" in name: UpperCAmelCase : Any = """weight_g""" elif "weight_v" in name: UpperCAmelCase : Optional[Any] = """weight_v""" elif "bias" in name and "relative_attention_bias" not in name: UpperCAmelCase : Union[str, Any] = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj UpperCAmelCase : str = """weight""" else: UpperCAmelCase : Optional[Any] = None set_recursively(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) continue if not is_used: unused_weights.append(UpperCamelCase ) logger.warning(F"Unused weights: {unused_weights}" ) def _snake_case ( UpperCamelCase : Optional[Any] , UpperCamelCase : Dict , UpperCamelCase : Tuple , UpperCamelCase : Any , UpperCamelCase : Any ): UpperCAmelCase : str = full_name.split("""conv_layers.""" )[-1] UpperCAmelCase : Dict = name.split(""".""" ) UpperCAmelCase : List[str] = int(items[0] ) UpperCAmelCase : Any = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) UpperCAmelCase : Optional[Any] = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) UpperCAmelCase : Tuple = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was" " found." ) UpperCAmelCase : str = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." ) UpperCAmelCase : Optional[Any] = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(UpperCamelCase ) @torch.no_grad() def _snake_case ( UpperCamelCase : int , UpperCamelCase : List[Any] , UpperCamelCase : List[Any]=None ): # load the pre-trained checkpoints UpperCAmelCase : List[Any] = torch.load(UpperCamelCase ) UpperCAmelCase : List[str] = WavLMConfigOrig(checkpoint["""cfg"""] ) UpperCAmelCase : Optional[int] = WavLMOrig(UpperCamelCase ) model.load_state_dict(checkpoint["""model"""] ) model.eval() if config_path is not None: UpperCAmelCase : List[str] = WavLMConfig.from_pretrained(UpperCamelCase ) else: UpperCAmelCase : List[Any] = WavLMConfig() UpperCAmelCase : Any = WavLMModel(UpperCamelCase ) recursively_load_weights(UpperCamelCase , UpperCamelCase ) hf_wavlm.save_pretrained(UpperCamelCase ) if __name__ == "__main__": A: int = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") A: Tuple = parser.parse_args() convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
109
"""simple docstring""" from ....configuration_utils import PretrainedConfig from ....utils import logging lowerCAmelCase__ = logging.get_logger(__name__) # TODO: upload to AWS lowerCAmelCase__ = { '''yjernite/retribert-base-uncased''': ( '''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json''' ), } class __snake_case ( _lowercase): snake_case__ : int = "retribert" def __init__( self : Optional[int] , __lowerCAmelCase : str=3_0_5_2_2 , __lowerCAmelCase : Tuple=7_6_8 , __lowerCAmelCase : Union[str, Any]=8 , __lowerCAmelCase : Any=1_2 , __lowerCAmelCase : Optional[int]=3_0_7_2 , __lowerCAmelCase : List[str]="gelu" , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Tuple=5_1_2 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : Optional[Any]=1E-12 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Any=1_2_8 , __lowerCAmelCase : Optional[int]=0 , **__lowerCAmelCase : str , ): """simple docstring""" super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase ) _lowerCamelCase : Dict = vocab_size _lowerCamelCase : Union[str, Any] = hidden_size _lowerCamelCase : Dict = num_hidden_layers _lowerCamelCase : int = num_attention_heads _lowerCamelCase : int = hidden_act _lowerCamelCase : str = intermediate_size _lowerCamelCase : Union[str, Any] = hidden_dropout_prob _lowerCamelCase : List[Any] = attention_probs_dropout_prob _lowerCamelCase : Optional[int] = max_position_embeddings _lowerCamelCase : List[Any] = type_vocab_size _lowerCamelCase : Any = initializer_range _lowerCamelCase : Optional[int] = layer_norm_eps _lowerCamelCase : int = share_encoders _lowerCamelCase : Optional[Any] = projection_dim
72
0
import argparse import collections import json import os import re import string import sys import numpy as np lowerCAmelCase = re.compile(R'\b(a|an|the)\b', re.UNICODE) lowerCAmelCase = None def _a ( ): """simple docstring""" lowercase__ = argparse.ArgumentParser('''Official evaluation script for SQuAD version 2.0.''' ) parser.add_argument('''data_file''' , metavar='''data.json''' , help='''Input data JSON file.''' ) parser.add_argument('''pred_file''' , metavar='''pred.json''' , help='''Model predictions.''' ) parser.add_argument( '''--out-file''' , '''-o''' , metavar='''eval.json''' , help='''Write accuracy metrics to file (default is stdout).''' ) parser.add_argument( '''--na-prob-file''' , '''-n''' , metavar='''na_prob.json''' , help='''Model estimates of probability of no answer.''' ) parser.add_argument( '''--na-prob-thresh''' , '''-t''' , type=SCREAMING_SNAKE_CASE , default=1.0 , help='''Predict "" if no-answer probability exceeds this (default = 1.0).''' , ) parser.add_argument( '''--out-image-dir''' , '''-p''' , metavar='''out_images''' , default=SCREAMING_SNAKE_CASE , help='''Save precision-recall curves to directory.''' ) parser.add_argument('''--verbose''' , '''-v''' , action='''store_true''' ) if len(sys.argv ) == 1: parser.print_help() sys.exit(1 ) return parser.parse_args() def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: lowercase__ = bool(qa['''answers''']['''text'''] ) return qid_to_has_ans def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" def remove_articles(SCREAMING_SNAKE_CASE ): return ARTICLES_REGEX.sub(''' ''' , SCREAMING_SNAKE_CASE ) def white_space_fix(SCREAMING_SNAKE_CASE ): return " ".join(text.split() ) def remove_punc(SCREAMING_SNAKE_CASE ): lowercase__ = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(SCREAMING_SNAKE_CASE ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(SCREAMING_SNAKE_CASE ) ) ) ) def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" if not s: return [] return normalize_answer(SCREAMING_SNAKE_CASE ).split() def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" return int(normalize_answer(SCREAMING_SNAKE_CASE ) == normalize_answer(SCREAMING_SNAKE_CASE ) ) def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = get_tokens(SCREAMING_SNAKE_CASE ) lowercase__ = get_tokens(SCREAMING_SNAKE_CASE ) lowercase__ = collections.Counter(SCREAMING_SNAKE_CASE ) & collections.Counter(SCREAMING_SNAKE_CASE ) lowercase__ = sum(common.values() ) if len(SCREAMING_SNAKE_CASE ) == 0 or len(SCREAMING_SNAKE_CASE ) == 0: # If either is no-answer, then F1 is 1 if they agree, 0 otherwise return int(gold_toks == pred_toks ) if num_same == 0: return 0 lowercase__ = 1.0 * num_same / len(SCREAMING_SNAKE_CASE ) lowercase__ = 1.0 * num_same / len(SCREAMING_SNAKE_CASE ) lowercase__ = (2 * precision * recall) / (precision + recall) return fa def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = {} lowercase__ = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: lowercase__ = qa['''id'''] lowercase__ = [t for t in qa['''answers''']['''text'''] if normalize_answer(SCREAMING_SNAKE_CASE )] if not gold_answers: # For unanswerable questions, only correct answer is empty string lowercase__ = [''''''] if qid not in preds: print(f'Missing prediction for {qid}' ) continue lowercase__ = preds[qid] # Take max over all gold answers lowercase__ = max(compute_exact(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for a in gold_answers ) lowercase__ = max(compute_fa(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for a in gold_answers ) return exact_scores, fa_scores def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = {} for qid, s in scores.items(): lowercase__ = na_probs[qid] > na_prob_thresh if pred_na: lowercase__ = float(not qid_to_has_ans[qid] ) else: lowercase__ = s return new_scores def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ): """simple docstring""" if not qid_list: lowercase__ = len(SCREAMING_SNAKE_CASE ) return collections.OrderedDict( [ ('''exact''', 100.0 * sum(exact_scores.values() ) / total), ('''f1''', 100.0 * sum(fa_scores.values() ) / total), ('''total''', total), ] ) else: lowercase__ = len(SCREAMING_SNAKE_CASE ) return collections.OrderedDict( [ ('''exact''', 100.0 * sum(exact_scores[k] for k in qid_list ) / total), ('''f1''', 100.0 * sum(fa_scores[k] for k in qid_list ) / total), ('''total''', total), ] ) def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" for k in new_eval: lowercase__ = new_eval[k] def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" plt.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , color='''b''' , alpha=0.2 , where='''post''' ) plt.fill_between(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , step='''post''' , alpha=0.2 , color='''b''' ) plt.xlabel('''Recall''' ) plt.ylabel('''Precision''' ) plt.xlim([0.0, 1.05] ) plt.ylim([0.0, 1.05] ) plt.title(SCREAMING_SNAKE_CASE ) plt.savefig(SCREAMING_SNAKE_CASE ) plt.clf() def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None ): """simple docstring""" lowercase__ = sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : na_probs[k] ) lowercase__ = 0.0 lowercase__ = 1.0 lowercase__ = 0.0 lowercase__ = [1.0] lowercase__ = [0.0] lowercase__ = 0.0 for i, qid in enumerate(SCREAMING_SNAKE_CASE ): if qid_to_has_ans[qid]: true_pos += scores[qid] lowercase__ = true_pos / float(i + 1 ) lowercase__ = true_pos / float(SCREAMING_SNAKE_CASE ) if i == len(SCREAMING_SNAKE_CASE ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]: # i.e., if we can put a threshold after this point avg_prec += cur_p * (cur_r - recalls[-1]) precisions.append(SCREAMING_SNAKE_CASE ) recalls.append(SCREAMING_SNAKE_CASE ) if out_image: plot_pr_curve(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return {"ap": 100.0 * avg_prec} def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" if out_image_dir and not os.path.exists(SCREAMING_SNAKE_CASE ): os.makedirs(SCREAMING_SNAKE_CASE ) lowercase__ = sum(1 for v in qid_to_has_ans.values() if v ) if num_true_pos == 0: return lowercase__ = make_precision_recall_eval( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , out_image=os.path.join(SCREAMING_SNAKE_CASE , '''pr_exact.png''' ) , title='''Precision-Recall curve for Exact Match score''' , ) lowercase__ = make_precision_recall_eval( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , out_image=os.path.join(SCREAMING_SNAKE_CASE , '''pr_f1.png''' ) , title='''Precision-Recall curve for F1 score''' , ) lowercase__ = {k: float(SCREAMING_SNAKE_CASE ) for k, v in qid_to_has_ans.items()} lowercase__ = make_precision_recall_eval( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , out_image=os.path.join(SCREAMING_SNAKE_CASE , '''pr_oracle.png''' ) , title='''Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)''' , ) merge_eval(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''pr_exact''' ) merge_eval(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''pr_f1''' ) merge_eval(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''pr_oracle''' ) def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" if not qid_list: return lowercase__ = [na_probs[k] for k in qid_list] lowercase__ = np.ones_like(SCREAMING_SNAKE_CASE ) / float(len(SCREAMING_SNAKE_CASE ) ) plt.hist(SCREAMING_SNAKE_CASE , weights=SCREAMING_SNAKE_CASE , bins=20 , range=(0.0, 1.0) ) plt.xlabel('''Model probability of no-answer''' ) plt.ylabel('''Proportion of dataset''' ) plt.title(f'Histogram of no-answer probability: {name}' ) plt.savefig(os.path.join(SCREAMING_SNAKE_CASE , f'na_prob_hist_{name}.png' ) ) plt.clf() def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] ) lowercase__ = num_no_ans lowercase__ = cur_score lowercase__ = 0.0 lowercase__ = sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : na_probs[k] ) for i, qid in enumerate(SCREAMING_SNAKE_CASE ): if qid not in scores: continue if qid_to_has_ans[qid]: lowercase__ = scores[qid] else: if preds[qid]: lowercase__ = -1 else: lowercase__ = 0 cur_score += diff if cur_score > best_score: lowercase__ = cur_score lowercase__ = na_probs[qid] return 100.0 * best_score / len(SCREAMING_SNAKE_CASE ), best_thresh def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ , lowercase__ = find_best_thresh(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowercase__ , lowercase__ = find_best_thresh(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowercase__ = best_exact lowercase__ = exact_thresh lowercase__ = best_fa lowercase__ = fa_thresh def _a ( ): """simple docstring""" with open(OPTS.data_file ) as f: lowercase__ = json.load(SCREAMING_SNAKE_CASE ) lowercase__ = dataset_json['''data'''] with open(OPTS.pred_file ) as f: lowercase__ = json.load(SCREAMING_SNAKE_CASE ) if OPTS.na_prob_file: with open(OPTS.na_prob_file ) as f: lowercase__ = json.load(SCREAMING_SNAKE_CASE ) else: lowercase__ = {k: 0.0 for k in preds} lowercase__ = make_qid_to_has_ans(SCREAMING_SNAKE_CASE ) # maps qid to True/False lowercase__ = [k for k, v in qid_to_has_ans.items() if v] lowercase__ = [k for k, v in qid_to_has_ans.items() if not v] lowercase__ , lowercase__ = get_raw_scores(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowercase__ = apply_no_ans_threshold(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , OPTS.na_prob_thresh ) lowercase__ = apply_no_ans_threshold(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , OPTS.na_prob_thresh ) lowercase__ = make_eval_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if has_ans_qids: lowercase__ = make_eval_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , qid_list=SCREAMING_SNAKE_CASE ) merge_eval(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''HasAns''' ) if no_ans_qids: lowercase__ = make_eval_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , qid_list=SCREAMING_SNAKE_CASE ) merge_eval(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''NoAns''' ) if OPTS.na_prob_file: find_all_best_thresh(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if OPTS.na_prob_file and OPTS.out_image_dir: run_precision_recall_analysis(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , OPTS.out_image_dir ) histogram_na_prob(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , OPTS.out_image_dir , '''hasAns''' ) histogram_na_prob(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , OPTS.out_image_dir , '''noAns''' ) if OPTS.out_file: with open(OPTS.out_file , '''w''' ) as f: json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else: print(json.dumps(SCREAMING_SNAKE_CASE , indent=2 ) ) if __name__ == "__main__": lowerCAmelCase = parse_args() if OPTS.out_image_dir: import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt main()
110
"""simple docstring""" import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class __snake_case ( unittest.TestCase): def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" _lowerCamelCase : Union[str, Any] = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : List[Any] = [ '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" _lowerCamelCase : Optional[int] = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', '''unet/diffusion_pytorch_model.bin''', # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : Optional[Any] = [ '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', ] self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" _lowerCamelCase : int = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', # Removed: 'text_encoder/model.safetensors', '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertFalse(is_safetensors_compatible(__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase : int = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] _lowerCamelCase : Optional[int] = '''fp16''' self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" _lowerCamelCase : Optional[Any] = [ '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] _lowerCamelCase : Union[str, Any] = '''fp16''' self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" _lowerCamelCase : str = [ '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] _lowerCamelCase : Optional[Any] = '''fp16''' self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase : Tuple = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', '''unet/diffusion_pytorch_model.fp16.bin''', # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] _lowerCamelCase : Any = '''fp16''' self.assertFalse(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" _lowerCamelCase : Optional[Any] = [ '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', ] _lowerCamelCase : str = '''fp16''' self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" _lowerCamelCase : Optional[Any] = [ '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', ] _lowerCamelCase : Union[str, Any] = '''fp16''' self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : int = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', # 'text_encoder/model.fp16.safetensors', '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] _lowerCamelCase : int = '''fp16''' self.assertFalse(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
72
0
import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bart import BartTokenizer _lowerCamelCase : str = logging.get_logger(__name__) _lowerCamelCase : Any = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} # See all BART models at https://huggingface.co/models?filter=bart _lowerCamelCase : List[str] = { """vocab_file""": { """facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""", """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""", """facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""", """facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""", """facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""", """yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""", }, """merges_file""": { """facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""", """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""", """facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""", """facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""", """facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""", """yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""", }, """tokenizer_file""": { """facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json""", """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json""", """facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json""", """facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json""", """facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json""", """yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json""", }, } _lowerCamelCase : Dict = { """facebook/bart-base""": 1024, """facebook/bart-large""": 1024, """facebook/bart-large-mnli""": 1024, """facebook/bart-large-cnn""": 1024, """facebook/bart-large-xsum""": 1024, """yjernite/bart_eli5""": 1024, } class UpperCamelCase_ ( _lowercase ): '''simple docstring''' UpperCAmelCase__ = VOCAB_FILES_NAMES UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ = ["input_ids", "attention_mask"] UpperCAmelCase__ = BartTokenizer def __init__( self : int , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : List[Any]="replace" , UpperCAmelCase__ : Any="<s>" , UpperCAmelCase__ : Optional[int]="</s>" , UpperCAmelCase__ : str="</s>" , UpperCAmelCase__ : Dict="<s>" , UpperCAmelCase__ : Union[str, Any]="<unk>" , UpperCAmelCase__ : Any="<pad>" , UpperCAmelCase__ : Optional[Any]="<mask>" , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : Dict=True , **UpperCAmelCase__ : Optional[Any] , ) ->Optional[int]: '''simple docstring''' super().__init__( __lowerCAmelCase , __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , errors=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase , **__lowerCAmelCase , ) A__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if pre_tok_state.get('''add_prefix_space''' , __lowerCAmelCase) != add_prefix_space: A__ = getattr(__lowerCAmelCase , pre_tok_state.pop('''type''')) A__ = add_prefix_space A__ = pre_tok_class(**__lowerCAmelCase) A__ = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` A__ = '''post_processor''' A__ = getattr(self.backend_tokenizer , __lowerCAmelCase , __lowerCAmelCase) if tokenizer_component_instance: A__ = json.loads(tokenizer_component_instance.__getstate__()) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: A__ = tuple(state['''sep''']) if "cls" in state: A__ = tuple(state['''cls''']) A__ = False if state.get('''add_prefix_space''' , __lowerCAmelCase) != add_prefix_space: A__ = add_prefix_space A__ = True if state.get('''trim_offsets''' , __lowerCAmelCase) != trim_offsets: A__ = trim_offsets A__ = True if changes_to_apply: A__ = getattr(__lowerCAmelCase , state.pop('''type''')) A__ = component_class(**__lowerCAmelCase) setattr(self.backend_tokenizer , __lowerCAmelCase , __lowerCAmelCase) @property def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any: '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error('''Using mask_token, but it is not set yet.''') return None return str(self._mask_token) @mask_token.setter def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : int) ->int: '''simple docstring''' A__ = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase) if isinstance(__lowerCAmelCase , __lowerCAmelCase) else value A__ = value def SCREAMING_SNAKE_CASE ( self : int , *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : List[Any]) ->str: '''simple docstring''' A__ = kwargs.get('''is_split_into_words''' , __lowerCAmelCase) if is_split_into_words and not self.add_prefix_space: raise ValueError( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ '''to use it with pretokenized inputs.''') return super()._batch_encode_plus(*__lowerCAmelCase , **__lowerCAmelCase) def SCREAMING_SNAKE_CASE ( self : Any , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : List[Any]) ->Any: '''simple docstring''' A__ = kwargs.get('''is_split_into_words''' , __lowerCAmelCase) if is_split_into_words and not self.add_prefix_space: raise ValueError( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ '''to use it with pretokenized inputs.''') return super()._encode_plus(*__lowerCAmelCase , **__lowerCAmelCase) def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None) ->Any: '''simple docstring''' A__ = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase) return tuple(__lowerCAmelCase) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str]=None) ->Union[str, Any]: '''simple docstring''' A__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None) ->int: '''simple docstring''' A__ = [self.sep_token_id] A__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
14
"""simple docstring""" import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class __snake_case : def __init__( self : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any=2 , __lowerCAmelCase : Any=True , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Optional[Any]=1_0 , __lowerCAmelCase : List[str]=3 , __lowerCAmelCase : int=3_2 * 4 , __lowerCAmelCase : Dict=3_2 * 6 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : List[str]=3_2 , ): """simple docstring""" _lowerCamelCase : List[str] = parent _lowerCamelCase : str = batch_size _lowerCamelCase : Dict = is_training _lowerCamelCase : str = use_auxiliary_loss _lowerCamelCase : Any = num_queries _lowerCamelCase : List[Any] = num_channels _lowerCamelCase : int = min_size _lowerCamelCase : Any = max_size _lowerCamelCase : int = num_labels _lowerCamelCase : List[str] = mask_feature_size def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( __lowerCAmelCase ) _lowerCamelCase : List[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowerCAmelCase ) > 0.5 ).float() _lowerCamelCase : Optional[int] = (torch.rand((self.batch_size, self.num_labels) , device=__lowerCAmelCase ) > 0.5).long() _lowerCamelCase : Dict = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = self.prepare_config_and_inputs() _lowerCamelCase : List[str] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask} return config, inputs_dict def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] ): """simple docstring""" _lowerCamelCase : List[str] = output.encoder_hidden_states _lowerCamelCase : Tuple = output.pixel_decoder_hidden_states _lowerCamelCase : Dict = output.transformer_decoder_hidden_states self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__lowerCAmelCase ) , config.decoder_config.decoder_layers ) def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any]=False ): """simple docstring""" with torch.no_grad(): _lowerCamelCase : Optional[int] = MaskFormerModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : Tuple = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase ) _lowerCamelCase : List[str] = model(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(__lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] ): """simple docstring""" _lowerCamelCase : str = MaskFormerForInstanceSegmentation(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() def comm_check_on_output(__lowerCAmelCase : Dict ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): _lowerCamelCase : str = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = model(__lowerCAmelCase ) comm_check_on_output(__lowerCAmelCase ) _lowerCamelCase : List[str] = model( pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ) comm_check_on_output(__lowerCAmelCase ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class __snake_case ( _lowercase , _lowercase , unittest.TestCase): snake_case__ : Dict = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () snake_case__ : Any = ( {"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) snake_case__ : List[str] = False snake_case__ : List[str] = False snake_case__ : Optional[int] = False snake_case__ : Dict = False def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" _lowerCamelCase : Optional[int] = MaskFormerModelTester(self ) _lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowerCAmelCase ) @unittest.skip(reason='''MaskFormer does not use inputs_embeds''' ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" pass @unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' ) def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" pass @unittest.skip(reason='''MaskFormer is not a generative model''' ) def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" pass @unittest.skip(reason='''MaskFormer does not use token embeddings''' ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" pass @require_torch_multi_gpu @unittest.skip( reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" pass def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : str = model_class(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCamelCase : Dict = [*signature.parameters.keys()] _lowerCamelCase : Optional[Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __lowerCAmelCase ) @slow def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" for model_name in ["facebook/maskformer-swin-small-coco"]: _lowerCamelCase : Union[str, Any] = MaskFormerModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : List[Any] = (self.model_tester.min_size,) * 2 _lowerCamelCase : Union[str, Any] = { '''pixel_values''': torch.randn((2, 3, *size) , device=__lowerCAmelCase ), '''mask_labels''': torch.randn((2, 1_0, *size) , device=__lowerCAmelCase ), '''class_labels''': torch.zeros(2 , 1_0 , device=__lowerCAmelCase ).long(), } _lowerCamelCase : int = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = model(**__lowerCAmelCase ) self.assertTrue(outputs.loss is not None ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : List[Any] = model_class(__lowerCAmelCase ).to(__lowerCAmelCase ) _lowerCamelCase : List[str] = model(**__lowerCAmelCase , output_attentions=__lowerCAmelCase ) self.assertTrue(outputs.attentions is not None ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss _lowerCamelCase : Union[str, Any] = self.all_model_classes[1] _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() _lowerCamelCase : Any = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.train() _lowerCamelCase : List[Any] = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ).loss loss.backward() def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase : int = self.all_model_classes[1] _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs() _lowerCamelCase : List[str] = True _lowerCamelCase : Optional[int] = True _lowerCamelCase : Optional[Any] = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.train() _lowerCamelCase : Optional[Any] = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ) _lowerCamelCase : List[Any] = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() _lowerCamelCase : Tuple = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't _lowerCamelCase : List[str] = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() _lowerCamelCase : Optional[int] = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=__lowerCAmelCase ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) lowerCAmelCase__ = 1E-4 def snake_case_ ( ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_vision @slow class __snake_case ( unittest.TestCase): @cached_property def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" return ( MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' ) if is_vision_available() else None ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase : str = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__lowerCAmelCase ) _lowerCamelCase : Any = self.default_image_processor _lowerCamelCase : List[Any] = prepare_img() _lowerCamelCase : Any = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase ) _lowerCamelCase : Any = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): _lowerCamelCase : int = model(**__lowerCAmelCase ) _lowerCamelCase : str = torch.tensor( [[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(__lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) _lowerCamelCase : Union[str, Any] = torch.tensor( [[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(__lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) _lowerCamelCase : Optional[int] = torch.tensor( [[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(__lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" _lowerCamelCase : Optional[int] = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__lowerCAmelCase ) .eval() ) _lowerCamelCase : int = self.default_image_processor _lowerCamelCase : str = prepare_img() _lowerCamelCase : int = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase ) _lowerCamelCase : str = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): _lowerCamelCase : Optional[int] = model(**__lowerCAmelCase ) # masks_queries_logits _lowerCamelCase : Optional[int] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _lowerCamelCase : List[str] = [ [-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33], [-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95], [-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42], ] _lowerCamelCase : Any = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) # class_queries_logits _lowerCamelCase : List[Any] = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _lowerCamelCase : str = torch.tensor( [ [1.6512E00, -5.2572E00, -3.3519E00], [3.6169E-02, -5.9025E00, -2.9313E00], [1.0766E-04, -7.7630E00, -5.1263E00], ] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : Any = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' ) .to(__lowerCAmelCase ) .eval() ) _lowerCamelCase : Tuple = self.default_image_processor _lowerCamelCase : Tuple = prepare_img() _lowerCamelCase : Optional[Any] = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase ) _lowerCamelCase : List[Any] = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): _lowerCamelCase : Optional[Any] = model(**__lowerCAmelCase ) # masks_queries_logits _lowerCamelCase : List[Any] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _lowerCamelCase : int = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]] _lowerCamelCase : List[Any] = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) # class_queries_logits _lowerCamelCase : Dict = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _lowerCamelCase : Any = torch.tensor( [[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" _lowerCamelCase : str = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__lowerCAmelCase ) .eval() ) _lowerCamelCase : int = self.default_image_processor _lowerCamelCase : List[str] = image_processor( [np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors='''pt''' , ) _lowerCamelCase : Union[str, Any] = inputs['''pixel_values'''].to(__lowerCAmelCase ) _lowerCamelCase : Dict = [el.to(__lowerCAmelCase ) for el in inputs['''mask_labels''']] _lowerCamelCase : Optional[Any] = [el.to(__lowerCAmelCase ) for el in inputs['''class_labels''']] with torch.no_grad(): _lowerCamelCase : Tuple = model(**__lowerCAmelCase ) self.assertTrue(outputs.loss is not None )
72
0
'''simple docstring''' import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int UpperCAmelCase = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class lowerCAmelCase ( datasets.BuilderConfig ): lowerCAmelCase_ = None def __UpperCamelCase ( lowercase__ : "pyspark.sql.DataFrame", lowercase__ : List[int], ): '''simple docstring''' import pyspark def generate_fn(): __lowercase =df.select('*', pyspark.sql.functions.spark_partition_id().alias('part_id' ) ) for partition_id in partition_order: __lowercase =df_with_partition_id.select('*' ).where(F'''part_id = {partition_id}''' ).drop('part_id' ) __lowercase =partition_df.collect() __lowercase =0 for row in rows: yield F'''{partition_id}_{row_id}''', row.asDict() row_id += 1 return generate_fn class lowerCAmelCase ( _BaseExamplesIterable ): def __init__( self : List[str] , __lowercase : "pyspark.sql.DataFrame" , __lowercase : List[str]=None , ): """simple docstring""" __lowercase =df __lowercase =partition_order or range(self.df.rdd.getNumPartitions() ) __lowercase =_generate_iterable_examples(self.df , self.partition_order ) def __iter__( self : Optional[int] ): """simple docstring""" yield from self.generate_examples_fn() def snake_case ( self : List[Any] , __lowercase : np.random.Generator ): """simple docstring""" __lowercase =list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(__lowerCAmelCase ) return SparkExamplesIterable(self.df , partition_order=__lowerCAmelCase ) def snake_case ( self : List[str] , __lowercase : int , __lowercase : int ): """simple docstring""" __lowercase =self.split_shard_indices_by_worker(__lowerCAmelCase , __lowerCAmelCase ) return SparkExamplesIterable(self.df , partition_order=__lowerCAmelCase ) @property def snake_case ( self : Optional[int] ): """simple docstring""" return len(self.partition_order ) class lowerCAmelCase ( datasets.DatasetBuilder ): lowerCAmelCase_ = SparkConfig def __init__( self : str , __lowercase : "pyspark.sql.DataFrame" , __lowercase : str = None , __lowercase : str = None , **__lowercase : str , ): """simple docstring""" import pyspark __lowercase =pyspark.sql.SparkSession.builder.getOrCreate() __lowercase =df __lowercase =working_dir super().__init__( cache_dir=__lowerCAmelCase , config_name=str(self.df.semanticHash() ) , **__lowerCAmelCase , ) def snake_case ( self : str ): """simple docstring""" def create_cache_and_write_probe(__lowercase : Optional[int] ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir , exist_ok=__lowerCAmelCase ) __lowercase =os.path.join(self._cache_dir , 'fs_test' + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(__lowerCAmelCase , 'a' ) return [probe_file] if self._spark.conf.get('spark.master' , '' ).startswith('local' ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: __lowercase =( self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(__lowerCAmelCase ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( 'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' ) def snake_case ( self : str ): """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def snake_case ( self : List[Any] , __lowercase : datasets.download.download_manager.DownloadManager ): """simple docstring""" return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def snake_case ( self : int , __lowercase : str ): """simple docstring""" import pyspark def get_arrow_batch_size(__lowercase : Union[str, Any] ): for batch in it: yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} ) __lowercase =self.df.count() __lowercase =df_num_rows if df_num_rows <= 100 else 100 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. __lowercase =( self.df.limit(__lowerCAmelCase ) .repartition(1 ) .mapInArrow(__lowerCAmelCase , 'batch_bytes: long' ) .agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) ) .collect()[0] .sample_bytes / sample_num_rows ) __lowercase =approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. __lowercase =min(__lowerCAmelCase , int(approx_total_size / max_shard_size ) ) __lowercase =self.df.repartition(__lowerCAmelCase ) def snake_case ( self : List[str] , __lowercase : str , __lowercase : str , __lowercase : int , ): """simple docstring""" import pyspark __lowercase =ParquetWriter if file_format == '''parquet''' else ArrowWriter __lowercase =os.path.join(self._working_dir , os.path.basename(__lowerCAmelCase ) ) if self._working_dir else fpath __lowercase =file_format == '''parquet''' # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. __lowercase =self.config.features __lowercase =self._writer_batch_size __lowercase =self._fs.storage_options def write_arrow(__lowercase : Optional[int] ): # Within the same SparkContext, no two task attempts will share the same attempt ID. __lowercase =pyspark.TaskContext().taskAttemptId() __lowercase =next(__lowerCAmelCase , __lowerCAmelCase ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]] , names=['task_id', 'num_examples', 'num_bytes'] , ) __lowercase =0 __lowercase =writer_class( features=__lowerCAmelCase , path=working_fpath.replace('SSSSS' , f'''{shard_id:05d}''' ).replace('TTTTT' , f'''{task_id:05d}''' ) , writer_batch_size=__lowerCAmelCase , storage_options=__lowerCAmelCase , embed_local_files=__lowerCAmelCase , ) __lowercase =pa.Table.from_batches([first_batch] ) writer.write_table(__lowerCAmelCase ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: __lowercase =writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , ) shard_id += 1 __lowercase =writer_class( features=writer._features , path=working_fpath.replace('SSSSS' , f'''{shard_id:05d}''' ).replace('TTTTT' , f'''{task_id:05d}''' ) , writer_batch_size=__lowerCAmelCase , storage_options=__lowerCAmelCase , embed_local_files=__lowerCAmelCase , ) __lowercase =pa.Table.from_batches([batch] ) writer.write_table(__lowerCAmelCase ) if writer._num_bytes > 0: __lowercase =writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , ) if working_fpath != fpath: for file in os.listdir(os.path.dirname(__lowerCAmelCase ) ): __lowercase =os.path.join(os.path.dirname(__lowerCAmelCase ) , os.path.basename(__lowerCAmelCase ) ) shutil.move(__lowerCAmelCase , __lowerCAmelCase ) __lowercase =( self.df.mapInArrow(__lowerCAmelCase , 'task_id: long, num_examples: long, num_bytes: long' ) .groupBy('task_id' ) .agg( pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ) , pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ) , pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ) , pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ) , ) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def snake_case ( self : Any , __lowercase : "datasets.SplitGenerator" , __lowercase : str = "arrow" , __lowercase : Optional[Union[str, int]] = None , __lowercase : Optional[int] = None , **__lowercase : List[Any] , ): """simple docstring""" self._validate_cache_dir() __lowercase =convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(__lowerCAmelCase ) __lowercase =not is_remote_filesystem(self._fs ) __lowercase =os.path.join if is_local else posixpath.join __lowercase ='''-TTTTT-SSSSS-of-NNNNN''' __lowercase =f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}''' __lowercase =path_join(self._output_dir , __lowerCAmelCase ) __lowercase =0 __lowercase =0 __lowercase =0 __lowercase =[] __lowercase =[] for task_id, content in self._prepare_split_single(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): ( __lowercase ) =content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(__lowerCAmelCase ) __lowercase =total_num_examples __lowercase =total_num_bytes # should rename everything at the end logger.debug(f'''Renaming {total_shards} shards.''' ) if total_shards > 1: __lowercase =all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. __lowercase =self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( __lowercase : int , __lowercase : int , __lowercase : int , ): rename( __lowerCAmelCase , fpath.replace('SSSSS' , f'''{shard_id:05d}''' ).replace('TTTTT' , f'''{task_id:05d}''' ) , fpath.replace('TTTTT-SSSSS' , f'''{global_shard_id:05d}''' ).replace('NNNNN' , f'''{total_shards:05d}''' ) , ) __lowercase =[] __lowercase =0 for i in range(len(__lowerCAmelCase ) ): __lowercase =task_id_and_num_shards[i] for shard_id in range(__lowerCAmelCase ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(__lowerCAmelCase , len(__lowerCAmelCase ) ).map(lambda __lowercase : _rename_shard(*__lowerCAmelCase ) ).collect() else: # don't use any pattern __lowercase =0 __lowercase =task_id_and_num_shards[0][0] self._rename( fpath.replace('SSSSS' , f'''{shard_id:05d}''' ).replace('TTTTT' , f'''{task_id:05d}''' ) , fpath.replace(__lowerCAmelCase , '' ) , ) def snake_case ( self : List[Any] , __lowercase : "datasets.SplitGenerator" , ): """simple docstring""" return SparkExamplesIterable(self.df )
141
"""simple docstring""" lowerCAmelCase__ = { '''A''': ['''B''', '''C''', '''E'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F''', '''G'''], '''D''': ['''B'''], '''E''': ['''A''', '''B''', '''D'''], '''F''': ['''C'''], '''G''': ['''C'''], } def snake_case_ ( A_ : dict, A_ : int, A_ : int ): '''simple docstring''' _lowerCamelCase : List[str] = set() # keep track of all the paths to be checked _lowerCamelCase : str = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue _lowerCamelCase : str = queue.pop(0 ) # get the last node from the path _lowerCamelCase : List[Any] = path[-1] if node not in explored: _lowerCamelCase : Union[str, Any] = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: _lowerCamelCase : Union[str, Any] = list(A_ ) new_path.append(A_ ) queue.append(A_ ) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(A_ ) # in case there's no path between the 2 nodes return [] def snake_case_ ( A_ : dict, A_ : int, A_ : Dict ): '''simple docstring''' if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 _lowerCamelCase : Optional[int] = [start] _lowerCamelCase : int = set(A_ ) # Keep tab on distances from `start` node. _lowerCamelCase : int = {start: 0, target: -1} while queue: _lowerCamelCase : Optional[Any] = queue.pop(0 ) if node == target: _lowerCamelCase : Any = ( dist[node] if dist[target] == -1 else min(dist[target], dist[node] ) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(A_ ) queue.append(A_ ) _lowerCamelCase : Any = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
72
0
'''simple docstring''' UpperCamelCase__ = '''0.21.0''' from .accelerator import Accelerator from .big_modeling import ( cpu_offload, cpu_offload_with_hook, disk_offload, dispatch_model, init_empty_weights, init_on_device, load_checkpoint_and_dispatch, ) from .data_loader import skip_first_batches from .launchers import debug_launcher, notebook_launcher from .state import PartialState from .utils import ( DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, FullyShardedDataParallelPlugin, GradScalerKwargs, InitProcessGroupKwargs, find_executable_batch_size, infer_auto_device_map, is_rich_available, load_checkpoint_in_model, synchronize_rng_states, ) if is_rich_available(): from .utils import rich
181
"""simple docstring""" import argparse import torch from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert from transformers.utils import logging logging.set_verbosity_info() def snake_case_ ( A_ : Tuple, A_ : int, A_ : Dict ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = LxmertConfig.from_json_file(A_ ) print(F'''Building PyTorch model from configuration: {config}''' ) _lowerCamelCase : List[str] = LxmertForPreTraining(A_ ) # Load weights from tf checkpoint load_tf_weights_in_lxmert(A_, A_, A_ ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict(), A_ ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowerCAmelCase__ = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
72
0
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class a_ ( unittest.TestCase ): @slow def lowercase__ ( self : List[Any] ): """simple docstring""" lowercase_ :Optional[int] = XLMRobertaModel.from_pretrained("xlm-roberta-base" ) lowercase_ :str = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] ) # The dog is cute and lives in the garden house lowercase_ :Optional[int] = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim lowercase_ :int = torch.tensor( [[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): lowercase_ :Dict = model(__lowerCAmelCase )['''last_hidden_state'''].detach() self.assertEqual(output.shape , __lowerCAmelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , __lowerCAmelCase , atol=1e-3 ) ) @slow def lowercase__ ( self : Any ): """simple docstring""" lowercase_ :Tuple = XLMRobertaModel.from_pretrained("xlm-roberta-large" ) lowercase_ :Tuple = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] ) # The dog is cute and lives in the garden house lowercase_ :Optional[Any] = torch.Size((1, 12, 1_024) ) # batch_size, sequence_length, embedding_vector_dim lowercase_ :int = torch.tensor( [[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): lowercase_ :List[str] = model(__lowerCAmelCase )['''last_hidden_state'''].detach() self.assertEqual(output.shape , __lowerCAmelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , __lowerCAmelCase , atol=1e-3 ) )
223
"""simple docstring""" from collections import defaultdict from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def snake_case_ ( ): '''simple docstring''' _lowerCamelCase , _lowerCamelCase : int = 9, 14 # noqa: F841 _lowerCamelCase : List[Any] = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] _lowerCamelCase : Any = defaultdict(A_ ) for nodea, nodea, cost in edges: adjancency[nodea].append([nodea, cost] ) adjancency[nodea].append([nodea, cost] ) _lowerCamelCase : List[str] = mst(A_ ) _lowerCamelCase : Optional[int] = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] for answer in expected: _lowerCamelCase : int = tuple(answer[:2] ) _lowerCamelCase : int = tuple(edge[::-1] ) assert edge in result or reverse in result
72
0
"""simple docstring""" from typing import TYPE_CHECKING from ..models.auto import AutoModelForVisionaSeq from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class _UpperCAmelCase ( _lowercase): __a : Dict = "Salesforce/blip-image-captioning-base" __a : Dict = ( "This is a tool that generates a description of an image. It takes an input named `image` which should be the " "image to caption, and returns a text that contains the description in English." ) __a : Tuple = "image_captioner" __a : Optional[Any] = AutoModelForVisionaSeq __a : List[str] = ["image"] __a : int = ["text"] def __init__( self , *_A , **_A ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ["""vision"""] ) super().__init__(*__lowerCAmelCase , **__lowerCAmelCase ) def __snake_case ( self , _A ) -> List[str]: '''simple docstring''' return self.pre_processor(images=__lowerCAmelCase , return_tensors="""pt""" ) def __snake_case ( self , _A ) -> Any: '''simple docstring''' return self.model.generate(**__lowerCAmelCase ) def __snake_case ( self , _A ) -> Union[str, Any]: '''simple docstring''' return self.pre_processor.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )[0].strip()
246
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bart import BartTokenizer lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} # See all BART models at https://huggingface.co/models?filter=bart lowerCAmelCase__ = { '''vocab_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''', }, '''merges_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json''', }, } lowerCAmelCase__ = { '''facebook/bart-base''': 1024, '''facebook/bart-large''': 1024, '''facebook/bart-large-mnli''': 1024, '''facebook/bart-large-cnn''': 1024, '''facebook/bart-large-xsum''': 1024, '''yjernite/bart_eli5''': 1024, } class __snake_case ( _lowercase): snake_case__ : Any = VOCAB_FILES_NAMES snake_case__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP snake_case__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case__ : Optional[int] = ["input_ids", "attention_mask"] snake_case__ : Any = BartTokenizer def __init__( self : int , __lowerCAmelCase : Dict=None , __lowerCAmelCase : int=None , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[Any]="replace" , __lowerCAmelCase : Any="<s>" , __lowerCAmelCase : Optional[int]="</s>" , __lowerCAmelCase : str="</s>" , __lowerCAmelCase : Dict="<s>" , __lowerCAmelCase : Union[str, Any]="<unk>" , __lowerCAmelCase : Any="<pad>" , __lowerCAmelCase : Optional[Any]="<mask>" , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Dict=True , **__lowerCAmelCase : Optional[Any] , ): """simple docstring""" super().__init__( __lowerCAmelCase , __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , errors=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase , **__lowerCAmelCase , ) _lowerCamelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , __lowerCAmelCase ) != add_prefix_space: _lowerCamelCase : Dict = getattr(__lowerCAmelCase , pre_tok_state.pop('''type''' ) ) _lowerCamelCase : Any = add_prefix_space _lowerCamelCase : int = pre_tok_class(**__lowerCAmelCase ) _lowerCamelCase : Optional[int] = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` _lowerCamelCase : List[str] = '''post_processor''' _lowerCamelCase : List[str] = getattr(self.backend_tokenizer , __lowerCAmelCase , __lowerCAmelCase ) if tokenizer_component_instance: _lowerCamelCase : int = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: _lowerCamelCase : Tuple = tuple(state['''sep'''] ) if "cls" in state: _lowerCamelCase : int = tuple(state['''cls'''] ) _lowerCamelCase : Union[str, Any] = False if state.get('''add_prefix_space''' , __lowerCAmelCase ) != add_prefix_space: _lowerCamelCase : Dict = add_prefix_space _lowerCamelCase : Optional[Any] = True if state.get('''trim_offsets''' , __lowerCAmelCase ) != trim_offsets: _lowerCamelCase : Any = trim_offsets _lowerCamelCase : str = True if changes_to_apply: _lowerCamelCase : List[str] = getattr(__lowerCAmelCase , state.pop('''type''' ) ) _lowerCamelCase : str = component_class(**__lowerCAmelCase ) setattr(self.backend_tokenizer , __lowerCAmelCase , __lowerCAmelCase ) @property def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" if self._mask_token is None: if self.verbose: logger.error('''Using mask_token, but it is not set yet.''' ) return None return str(self._mask_token ) @mask_token.setter def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : int ): """simple docstring""" _lowerCamelCase : Tuple = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else value _lowerCamelCase : str = value def SCREAMING_SNAKE_CASE ( self : int , *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : List[Any] ): """simple docstring""" _lowerCamelCase : Dict = kwargs.get('''is_split_into_words''' , __lowerCAmelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' '''to use it with pretokenized inputs.''' ) return super()._batch_encode_plus(*__lowerCAmelCase , **__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : Tuple , **__lowerCAmelCase : List[Any] ): """simple docstring""" _lowerCamelCase : Any = kwargs.get('''is_split_into_words''' , __lowerCAmelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' '''to use it with pretokenized inputs.''' ) return super()._encode_plus(*__lowerCAmelCase , **__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ): """simple docstring""" _lowerCamelCase : Tuple = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase ) return tuple(__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str]=None ): """simple docstring""" _lowerCamelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ): """simple docstring""" _lowerCamelCase : List[str] = [self.sep_token_id] _lowerCamelCase : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
72
0
def lowerCAmelCase_ ( __a , __a ) -> str: """simple docstring""" lowerCamelCase__: Tuple =1 # To kept the Calculated Value # Since C(n, k) = C(n, n-k) if k > (n - k): lowerCamelCase__: List[Any] =n - k # Calculate C(n,k) for i in range(A_ ): result *= n - i result //= i + 1 return result def lowerCAmelCase_ ( __a ) -> Tuple: """simple docstring""" return binomial_coefficient(2 * node_count , A_ ) // (node_count + 1) def lowerCAmelCase_ ( __a ) -> Union[str, Any]: """simple docstring""" if n < 0: raise ValueError("factorial() not defined for negative values" ) lowerCamelCase__: Dict =1 for i in range(1 , n + 1 ): result *= i return result def lowerCAmelCase_ ( __a ) -> List[Any]: """simple docstring""" return catalan_number(A_ ) * factorial(A_ ) if __name__ == "__main__": __A = int(input("Enter the number of nodes: ").strip() or 0) if node_count <= 0: raise ValueError("We need some nodes to work with.") print( f'Given {node_count} nodes, there are {binary_tree_count(node_count)} ' f'binary trees and {catalan_number(node_count)} binary search trees.' )
10
"""simple docstring""" from __future__ import annotations def snake_case_ ( A_ : str ): '''simple docstring''' return [ord(A_ ) - 96 for elem in plain] def snake_case_ ( A_ : list[int] ): '''simple docstring''' return "".join(chr(elem + 96 ) for elem in encoded ) def snake_case_ ( ): '''simple docstring''' _lowerCamelCase : Dict = encode(input('''-> ''' ).strip().lower() ) print('''Encoded: ''', A_ ) print('''Decoded:''', decode(A_ ) ) if __name__ == "__main__": main()
72
0
from __future__ import annotations def lowerCamelCase_ ( UpperCamelCase__ : str ) -> str: """simple docstring""" return [ord(A_ ) - 96 for elem in plain] def lowerCamelCase_ ( UpperCamelCase__ : list[int] ) -> Any: """simple docstring""" return "".join(chr(elem + 96 ) for elem in encoded ) def lowerCamelCase_ ( ) -> Dict: """simple docstring""" __lowerCamelCase = encode(input('-> ' ).strip().lower() ) print('Encoded: ' , A_ ) print('Decoded:' , decode(A_ ) ) if __name__ == "__main__": main()
90
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase__ = { '''configuration_lxmert''': ['''LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LxmertConfig'''], '''tokenization_lxmert''': ['''LxmertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['''LxmertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''LxmertEncoder''', '''LxmertForPreTraining''', '''LxmertForQuestionAnswering''', '''LxmertModel''', '''LxmertPreTrainedModel''', '''LxmertVisualFeatureEncoder''', '''LxmertXLayer''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFLxmertForPreTraining''', '''TFLxmertMainLayer''', '''TFLxmertModel''', '''TFLxmertPreTrainedModel''', '''TFLxmertVisualFeatureEncoder''', ] if TYPE_CHECKING: from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig from .tokenization_lxmert import LxmertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_lxmert_fast import LxmertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_lxmert import ( LxmertEncoder, LxmertForPreTraining, LxmertForQuestionAnswering, LxmertModel, LxmertPreTrainedModel, LxmertVisualFeatureEncoder, LxmertXLayer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_lxmert import ( TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFLxmertForPreTraining, TFLxmertMainLayer, TFLxmertModel, TFLxmertPreTrainedModel, TFLxmertVisualFeatureEncoder, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
72
0
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging a__: Optional[int] = logging.get_logger(__name__) a__: str = {'vocab_file': 'sentencepiece.bpe.model'} a__: Tuple = { 'vocab_file': { 'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model', 'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model', 'moussaKam/barthez-orangesum-title': ( 'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model' ), }, } a__: Optional[Any] = { 'moussaKam/mbarthez': 1_024, 'moussaKam/barthez': 1_024, 'moussaKam/barthez-orangesum-title': 1_024, } a__: Union[str, Any] = '▁' class SCREAMING_SNAKE_CASE__ ( _lowercase ): __SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"] def __init__( self,__lowerCamelCase,__lowerCamelCase="<s>",__lowerCamelCase="</s>",__lowerCamelCase="</s>",__lowerCamelCase="<s>",__lowerCamelCase="<unk>",__lowerCamelCase="<pad>",__lowerCamelCase="<mask>",__lowerCamelCase = None,**__lowerCamelCase,): A__ = AddedToken(__lowerCAmelCase,lstrip=__lowerCAmelCase,rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase,__lowerCAmelCase ) else mask_token A__ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__lowerCAmelCase,eos_token=__lowerCAmelCase,unk_token=__lowerCAmelCase,sep_token=__lowerCAmelCase,cls_token=__lowerCAmelCase,pad_token=__lowerCAmelCase,mask_token=__lowerCAmelCase,sp_model_kwargs=self.sp_model_kwargs,**__lowerCAmelCase,) A__ = vocab_file A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__lowerCAmelCase ) ) A__ = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} A__ = len(self.sp_model ) - 1 A__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] A__ = [self.cls_token_id] A__ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None,__lowerCamelCase = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCAmelCase,token_ids_a=__lowerCAmelCase,already_has_special_tokens=__lowerCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(__lowerCAmelCase )) + [1] return [1] + ([0] * len(__lowerCAmelCase )) + [1, 1] + ([0] * len(__lowerCAmelCase )) + [1] def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None ): A__ = [self.sep_token_id] A__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def UpperCamelCase ( self ): return len(self.sp_model ) def UpperCamelCase ( self ): A__ = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCamelCase ( self,__lowerCamelCase ): return self.sp_model.encode(__lowerCAmelCase,out_type=__lowerCAmelCase ) def UpperCamelCase ( self,__lowerCamelCase ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] A__ = self.sp_model.PieceToId(__lowerCAmelCase ) return spm_id if spm_id else self.unk_token_id def UpperCamelCase ( self,__lowerCamelCase ): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(__lowerCAmelCase ) def UpperCamelCase ( self,__lowerCamelCase ): A__ = [] A__ = '''''' A__ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__lowerCAmelCase ) + token A__ = True A__ = [] else: current_sub_tokens.append(__lowerCAmelCase ) A__ = False out_string += self.sp_model.decode(__lowerCAmelCase ) return out_string.strip() def __getstate__( self ): A__ = self.__dict__.copy() A__ = None return state def __setstate__( self,__lowerCamelCase ): A__ = d # for backward compatibility if not hasattr(self,'''sp_model_kwargs''' ): A__ = {} A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None ): if not os.path.isdir(__lowerCAmelCase ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return A__ = os.path.join( __lowerCAmelCase,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file,__lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__lowerCAmelCase,'''wb''' ) as fi: A__ = self.sp_model.serialized_model_proto() fi.write(__lowerCAmelCase ) return (out_vocab_file,)
193
"""simple docstring""" def snake_case_ ( A_ : list[list[float]] ): '''simple docstring''' _lowerCamelCase : list[list[float]] = [] for data in source_data: for i, el in enumerate(A_ ): if len(A_ ) < i + 1: data_lists.append([] ) data_lists[i].append(float(A_ ) ) return data_lists def snake_case_ ( A_ : list[list[float]], A_ : list[int] ): '''simple docstring''' _lowerCamelCase : list[list[float]] = [] for dlist, weight in zip(A_, A_ ): _lowerCamelCase : Any = min(A_ ) _lowerCamelCase : Optional[Any] = max(A_ ) _lowerCamelCase : list[float] = [] # for weight 0 score is 1 - actual score if weight == 0: for item in dlist: try: score.append(1 - ((item - mind) / (maxd - mind)) ) except ZeroDivisionError: score.append(1 ) elif weight == 1: for item in dlist: try: score.append((item - mind) / (maxd - mind) ) except ZeroDivisionError: score.append(0 ) # weight not 0 or 1 else: _lowerCamelCase : str = F'''Invalid weight of {weight:f} provided''' raise ValueError(A_ ) score_lists.append(A_ ) return score_lists def snake_case_ ( A_ : list[list[float]] ): '''simple docstring''' _lowerCamelCase : list[float] = [0 for i in range(len(score_lists[0] ) )] for slist in score_lists: for j, ele in enumerate(A_ ): _lowerCamelCase : List[str] = final_scores[j] + ele return final_scores def snake_case_ ( A_ : list[list[float]], A_ : list[int] ): '''simple docstring''' _lowerCamelCase : Tuple = get_data(A_ ) _lowerCamelCase : Optional[Any] = calculate_each_score(A_, A_ ) _lowerCamelCase : str = generate_final_scores(A_ ) # append scores to source data for i, ele in enumerate(A_ ): source_data[i].append(A_ ) return source_data
72
0
import unittest from transformers import AutoTokenizer, NystromformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, NystromformerModel, ) from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST class A_ : def __init__( self : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any]=1_3 , UpperCAmelCase : Any=7 , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Dict=True , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : int=9_9 , UpperCAmelCase : str=3_2 , UpperCAmelCase : Tuple=5 , UpperCAmelCase : Optional[Any]=4 , UpperCAmelCase : Optional[Any]=3_7 , UpperCAmelCase : Optional[Any]="gelu" , UpperCAmelCase : int=0.1 , UpperCAmelCase : Any=0.1 , UpperCAmelCase : int=5_1_2 , UpperCAmelCase : Union[str, Any]=1_6 , UpperCAmelCase : int=2 , UpperCAmelCase : List[str]=0.02 , UpperCAmelCase : Optional[int]=3 , UpperCAmelCase : Optional[int]=4 , UpperCAmelCase : Dict=None , ) -> List[Any]: __lowerCAmelCase: Tuple = parent __lowerCAmelCase: int = batch_size __lowerCAmelCase: List[Any] = seq_length __lowerCAmelCase: Tuple = is_training __lowerCAmelCase: int = use_input_mask __lowerCAmelCase: Dict = use_token_type_ids __lowerCAmelCase: Optional[int] = use_labels __lowerCAmelCase: Dict = vocab_size __lowerCAmelCase: List[str] = hidden_size __lowerCAmelCase: Optional[int] = num_hidden_layers __lowerCAmelCase: Tuple = num_attention_heads __lowerCAmelCase: Optional[int] = intermediate_size __lowerCAmelCase: Tuple = hidden_act __lowerCAmelCase: List[Any] = hidden_dropout_prob __lowerCAmelCase: Union[str, Any] = attention_probs_dropout_prob __lowerCAmelCase: Tuple = max_position_embeddings __lowerCAmelCase: Dict = type_vocab_size __lowerCAmelCase: Dict = type_sequence_label_size __lowerCAmelCase: str = initializer_range __lowerCAmelCase: int = num_labels __lowerCAmelCase: str = num_choices __lowerCAmelCase: str = scope def UpperCAmelCase ( self : List[str] ) -> Optional[Any]: __lowerCAmelCase: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCAmelCase: Union[str, Any] = None if self.use_input_mask: __lowerCAmelCase: Dict = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCAmelCase: Optional[int] = None if self.use_token_type_ids: __lowerCAmelCase: Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowerCAmelCase: Optional[int] = None __lowerCAmelCase: int = None __lowerCAmelCase: Optional[int] = None if self.use_labels: __lowerCAmelCase: Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCAmelCase: Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowerCAmelCase: Dict = ids_tensor([self.batch_size] , self.num_choices ) __lowerCAmelCase: Union[str, Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase ( self : Optional[Any] ) -> int: return NystromformerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , ) def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str ) -> Union[str, Any]: __lowerCAmelCase: Dict = NystromformerModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() __lowerCAmelCase: int = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) __lowerCAmelCase: Any = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) __lowerCAmelCase: str = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase ( self : Any , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Any , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict ) -> int: __lowerCAmelCase: int = NystromformerForMaskedLM(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() __lowerCAmelCase: Optional[int] = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase ( self : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] ) -> List[str]: __lowerCAmelCase: Dict = NystromformerForQuestionAnswering(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() __lowerCAmelCase: List[Any] = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase ( self : int , UpperCAmelCase : Tuple , UpperCAmelCase : List[str] , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : str ) -> Tuple: __lowerCAmelCase: int = self.num_labels __lowerCAmelCase: Union[str, Any] = NystromformerForSequenceClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() __lowerCAmelCase: Tuple = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase ( self : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] ) -> str: __lowerCAmelCase: str = self.num_labels __lowerCAmelCase: int = NystromformerForTokenClassification(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() __lowerCAmelCase: Tuple = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : Any , UpperCAmelCase : Any , UpperCAmelCase : Tuple ) -> Tuple: __lowerCAmelCase: List[Any] = self.num_choices __lowerCAmelCase: str = NystromformerForMultipleChoice(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() __lowerCAmelCase: str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCAmelCase: int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCAmelCase: Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCAmelCase: int = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]: __lowerCAmelCase: List[str] = self.prepare_config_and_inputs() ( __lowerCAmelCase ): List[Any] = config_and_inputs __lowerCAmelCase: Optional[int] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class A_ ( _lowercase , _lowercase , unittest.TestCase ): _lowercase : List[str] = ( ( NystromformerModel, NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, ) if is_torch_available() else () ) _lowercase : Optional[Any] = ( { "feature-extraction": NystromformerModel, "fill-mask": NystromformerForMaskedLM, "question-answering": NystromformerForQuestionAnswering, "text-classification": NystromformerForSequenceClassification, "token-classification": NystromformerForTokenClassification, "zero-shot": NystromformerForSequenceClassification, } if is_torch_available() else {} ) _lowercase : Optional[int] = False _lowercase : int = False def UpperCAmelCase ( self : List[str] ) -> List[Any]: __lowerCAmelCase: Any = NystromformerModelTester(self ) __lowerCAmelCase: List[str] = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 ) def UpperCAmelCase ( self : List[str] ) -> Dict: self.config_tester.run_common_tests() def UpperCAmelCase ( self : Optional[int] ) -> str: __lowerCAmelCase: Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def UpperCAmelCase ( self : List[str] ) -> int: __lowerCAmelCase: Dict = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __lowerCAmelCase: str = type self.model_tester.create_and_check_model(*__lowerCAmelCase ) def UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]: __lowerCAmelCase: List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase ) def UpperCAmelCase ( self : Any ) -> Union[str, Any]: __lowerCAmelCase: Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__lowerCAmelCase ) def UpperCAmelCase ( self : int ) -> Optional[int]: __lowerCAmelCase: Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase ) def UpperCAmelCase ( self : Tuple ) -> str: __lowerCAmelCase: int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase ) def UpperCAmelCase ( self : Union[str, Any] ) -> Dict: __lowerCAmelCase: List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase ) @slow def UpperCAmelCase ( self : List[Any] ) -> List[str]: for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCAmelCase: Tuple = NystromformerModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) @require_torch class A_ ( unittest.TestCase ): @slow def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]: __lowerCAmelCase: Tuple = NystromformerModel.from_pretrained('uw-madison/nystromformer-512' ) __lowerCAmelCase: List[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] ) with torch.no_grad(): __lowerCAmelCase: Dict = model(__lowerCAmelCase )[0] __lowerCAmelCase: str = torch.Size((1, 6, 7_6_8) ) self.assertEqual(output.shape , __lowerCAmelCase ) __lowerCAmelCase: int = torch.tensor( [[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCAmelCase , atol=1E-4 ) ) @slow def UpperCAmelCase ( self : List[str] ) -> List[Any]: __lowerCAmelCase: Dict = '''the [MASK] of Belgium is Brussels''' __lowerCAmelCase: str = AutoTokenizer.from_pretrained('uw-madison/nystromformer-512' ) __lowerCAmelCase: Any = NystromformerForMaskedLM.from_pretrained('uw-madison/nystromformer-512' ) __lowerCAmelCase: Tuple = tokenizer(__lowerCAmelCase , return_tensors='pt' ) with torch.no_grad(): __lowerCAmelCase: List[Any] = model(encoding.input_ids ).logits __lowerCAmelCase: str = token_logits[:, 2, :].argmax(-1 )[0] self.assertEqual(tokenizer.decode(__lowerCAmelCase ) , 'capital' )
322
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''microsoft/unispeech-large-1500h-cv''': ( '''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json''' ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech } class __snake_case ( _lowercase): snake_case__ : List[str] = "unispeech" def __init__( self : List[str] , __lowerCAmelCase : List[Any]=3_2 , __lowerCAmelCase : str=7_6_8 , __lowerCAmelCase : int=1_2 , __lowerCAmelCase : int=1_2 , __lowerCAmelCase : int=3_0_7_2 , __lowerCAmelCase : Tuple="gelu" , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : Tuple=0.0 , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : Dict=1E-5 , __lowerCAmelCase : Optional[int]="group" , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : int=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , __lowerCAmelCase : Optional[int]=(5, 2, 2, 2, 2, 2, 2) , __lowerCAmelCase : Union[str, Any]=(1_0, 3, 3, 3, 3, 2, 2) , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : List[str]=1_2_8 , __lowerCAmelCase : Any=1_6 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Union[str, Any]=0.05 , __lowerCAmelCase : Union[str, Any]=1_0 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Dict=0.0 , __lowerCAmelCase : Optional[int]=1_0 , __lowerCAmelCase : Dict=0 , __lowerCAmelCase : List[str]=3_2_0 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Tuple=1_0_0 , __lowerCAmelCase : Dict=2_5_6 , __lowerCAmelCase : str=2_5_6 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Dict="mean" , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Optional[Any]=2_5_6 , __lowerCAmelCase : Dict=8_0 , __lowerCAmelCase : int=0 , __lowerCAmelCase : Optional[int]=1 , __lowerCAmelCase : Dict=2 , __lowerCAmelCase : Any=0.5 , **__lowerCAmelCase : Optional[Any] , ): """simple docstring""" super().__init__(**__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase ) _lowerCamelCase : Dict = hidden_size _lowerCamelCase : Any = feat_extract_norm _lowerCamelCase : List[Any] = feat_extract_activation _lowerCamelCase : Any = list(__lowerCAmelCase ) _lowerCamelCase : Tuple = list(__lowerCAmelCase ) _lowerCamelCase : int = list(__lowerCAmelCase ) _lowerCamelCase : List[str] = conv_bias _lowerCamelCase : List[str] = num_conv_pos_embeddings _lowerCamelCase : Tuple = num_conv_pos_embedding_groups _lowerCamelCase : List[str] = len(self.conv_dim ) _lowerCamelCase : Tuple = num_hidden_layers _lowerCamelCase : List[Any] = intermediate_size _lowerCamelCase : Dict = hidden_act _lowerCamelCase : Union[str, Any] = num_attention_heads _lowerCamelCase : Tuple = hidden_dropout _lowerCamelCase : List[Any] = attention_dropout _lowerCamelCase : Optional[int] = activation_dropout _lowerCamelCase : Optional[Any] = feat_proj_dropout _lowerCamelCase : Optional[int] = final_dropout _lowerCamelCase : Any = layerdrop _lowerCamelCase : Any = layer_norm_eps _lowerCamelCase : List[Any] = initializer_range _lowerCamelCase : List[str] = num_ctc_classes _lowerCamelCase : List[Any] = vocab_size _lowerCamelCase : Optional[Any] = do_stable_layer_norm _lowerCamelCase : Tuple = use_weighted_layer_sum _lowerCamelCase : List[Any] = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _lowerCamelCase : Any = apply_spec_augment _lowerCamelCase : Dict = mask_time_prob _lowerCamelCase : List[str] = mask_time_length _lowerCamelCase : Optional[Any] = mask_time_min_masks _lowerCamelCase : List[str] = mask_feature_prob _lowerCamelCase : int = mask_feature_length _lowerCamelCase : Dict = mask_feature_min_masks # parameters for pretraining with codevector quantized representations _lowerCamelCase : Optional[Any] = num_codevectors_per_group _lowerCamelCase : int = num_codevector_groups _lowerCamelCase : List[Any] = contrastive_logits_temperature _lowerCamelCase : List[str] = feat_quantizer_dropout _lowerCamelCase : Dict = num_negatives _lowerCamelCase : Optional[int] = codevector_dim _lowerCamelCase : List[Any] = proj_codevector_dim _lowerCamelCase : List[Any] = diversity_loss_weight # ctc loss _lowerCamelCase : Union[str, Any] = ctc_loss_reduction _lowerCamelCase : Any = ctc_zero_infinity # pretraining loss _lowerCamelCase : str = replace_prob @property def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
72
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) __UpperCamelCase : Any = { '''configuration_deberta''': ['''DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DebertaConfig''', '''DebertaOnnxConfig'''], '''tokenization_deberta''': ['''DebertaTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Tuple = ['''DebertaTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : str = [ '''DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''DebertaForMaskedLM''', '''DebertaForQuestionAnswering''', '''DebertaForSequenceClassification''', '''DebertaForTokenClassification''', '''DebertaModel''', '''DebertaPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : int = [ '''TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFDebertaForMaskedLM''', '''TFDebertaForQuestionAnswering''', '''TFDebertaForSequenceClassification''', '''TFDebertaForTokenClassification''', '''TFDebertaModel''', '''TFDebertaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig from .tokenization_deberta import DebertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_deberta_fast import DebertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deberta import ( DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, DebertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deberta import ( TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFDebertaForMaskedLM, TFDebertaForQuestionAnswering, TFDebertaForSequenceClassification, TFDebertaForTokenClassification, TFDebertaModel, TFDebertaPreTrainedModel, ) else: import sys __UpperCamelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
106
"""simple docstring""" from typing import Optional from urllib.parse import quote import huggingface_hub as hfh from packaging import version def snake_case_ ( A_ : str, A_ : str, A_ : Optional[str] = None ): '''simple docstring''' if version.parse(hfh.__version__ ).release < version.parse('''0.11.0''' ).release: # old versions of hfh don't url-encode the file path _lowerCamelCase : Optional[Any] = quote(A_ ) return hfh.hf_hub_url(A_, A_, repo_type='''dataset''', revision=A_ )
72
0
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" if not len(A_ ) == len(A_ ) == 3: raise ValueError("""Please enter a valid equation.""" ) if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0: raise ValueError("""Both a & b of two equations can\'t be zero.""" ) # Extract the coefficients lowerCAmelCase__ : int = equationa lowerCAmelCase__ : Dict = equationa # Calculate the determinants of the matrices lowerCAmelCase__ : Any = aa * ba - aa * ba lowerCAmelCase__ : str = ca * ba - ca * ba lowerCAmelCase__ : Any = aa * ca - aa * ca # Check if the system of linear equations has a solution (using Cramer's rule) if determinant == 0: if determinant_x == determinant_y == 0: raise ValueError("""Infinite solutions. (Consistent system)""" ) else: raise ValueError("""No solution. (Inconsistent system)""" ) else: if determinant_x == determinant_y == 0: # Trivial solution (Inconsistent system) return (0.0, 0.0) else: lowerCAmelCase__ : str = determinant_x / determinant lowerCAmelCase__ : Optional[int] = determinant_y / determinant # Non-Trivial Solution (Consistent system) return (x, y)
37
"""simple docstring""" import unittest import numpy as np def snake_case_ ( A_ : np.ndarray, A_ : np.ndarray, A_ : np.ndarray, A_ : np.ndarray | None = None, ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = np.shape(A_ ) _lowerCamelCase : List[str] = np.shape(A_ ) _lowerCamelCase : List[str] = np.shape(A_ ) if shape_a[0] != shape_b[0]: _lowerCamelCase : Tuple = ( '''Expected the same number of rows for A and B. ''' F'''Instead found A of size {shape_a} and B of size {shape_b}''' ) raise ValueError(A_ ) if shape_b[1] != shape_c[1]: _lowerCamelCase : Tuple = ( '''Expected the same number of columns for B and C. ''' F'''Instead found B of size {shape_b} and C of size {shape_c}''' ) raise ValueError(A_ ) _lowerCamelCase : List[str] = pseudo_inv if a_inv is None: try: _lowerCamelCase : Any = np.linalg.inv(A_ ) except np.linalg.LinAlgError: raise ValueError( '''Input matrix A is not invertible. Cannot compute Schur complement.''' ) return mat_c - mat_b.T @ a_inv @ mat_b class __snake_case ( unittest.TestCase): def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _lowerCamelCase : List[str] = np.array([[0, 3], [3, 0], [2, 3]] ) _lowerCamelCase : List[str] = np.array([[2, 1], [6, 3]] ) _lowerCamelCase : List[Any] = schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) _lowerCamelCase : Dict = np.block([[a, b], [b.T, c]] ) _lowerCamelCase : Tuple = np.linalg.det(__lowerCAmelCase ) _lowerCamelCase : List[str] = np.linalg.det(__lowerCAmelCase ) _lowerCamelCase : Any = np.linalg.det(__lowerCAmelCase ) self.assertAlmostEqual(__lowerCAmelCase , det_a * det_s ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" _lowerCamelCase : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _lowerCamelCase : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] ) _lowerCamelCase : int = np.array([[2, 1], [6, 3]] ) with self.assertRaises(__lowerCAmelCase ): schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _lowerCamelCase : List[str] = np.array([[0, 3], [3, 0], [2, 3]] ) _lowerCamelCase : Union[str, Any] = np.array([[2, 1, 3], [6, 3, 5]] ) with self.assertRaises(__lowerCAmelCase ): schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
72
0
from ....configuration_utils import PretrainedConfig from ....utils import logging _lowerCamelCase : List[str] = logging.get_logger(__name__) # TODO: upload to AWS _lowerCamelCase : List[str] = { """yjernite/retribert-base-uncased""": ( """https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json""" ), } class UpperCamelCase_ ( _lowercase ): '''simple docstring''' UpperCAmelCase__ = "retribert" def __init__( self : Optional[int] , UpperCAmelCase__ : str=30_522 , UpperCAmelCase__ : Tuple=768 , UpperCAmelCase__ : Union[str, Any]=8 , UpperCAmelCase__ : Any=12 , UpperCAmelCase__ : Optional[int]=3_072 , UpperCAmelCase__ : List[str]="gelu" , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : Tuple=512 , UpperCAmelCase__ : List[str]=2 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : Optional[Any]=1e-12 , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Any=128 , UpperCAmelCase__ : Optional[int]=0 , **UpperCAmelCase__ : str , ) ->Any: '''simple docstring''' super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase) A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = hidden_act A__ = intermediate_size A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = type_vocab_size A__ = initializer_range A__ = layer_norm_eps A__ = share_encoders A__ = projection_dim
14
"""simple docstring""" def snake_case_ ( A_ : list[int], A_ : str ): '''simple docstring''' _lowerCamelCase : Tuple = int(A_ ) # Initialize Result _lowerCamelCase : Dict = [] # Traverse through all denomination for denomination in reversed(A_ ): # Find denominations while int(A_ ) >= int(A_ ): total_value -= int(A_ ) answer.append(A_ ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": lowerCAmelCase__ = [] lowerCAmelCase__ = '''0''' if ( input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower() == "y" ): lowerCAmelCase__ = int(input('''Enter the number of denominations you want to add: ''').strip()) for i in range(0, n): denominations.append(int(input(F"""Denomination {i}: """).strip())) lowerCAmelCase__ = input('''Enter the change you want to make in Indian Currency: ''').strip() else: # All denominations of Indian Currency if user does not enter lowerCAmelCase__ = [1, 2, 5, 10, 20, 50, 100, 500, 2000] lowerCAmelCase__ = input('''Enter the change you want to make: ''').strip() if int(value) == 0 or int(value) < 0: print('''The total value cannot be zero or negative.''') else: print(F"""Following is minimal change for {value}: """) lowerCAmelCase__ = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=''' ''')
72
0
'''simple docstring''' def __UpperCamelCase ( lowercase__ : float, lowercase__ : float, lowercase__ : int ): '''simple docstring''' if principal <= 0: raise Exception('Principal borrowed must be > 0' ) if rate_per_annum < 0: raise Exception('Rate of interest must be >= 0' ) if years_to_repay <= 0 or not isinstance(A_, A_ ): raise Exception('Years to repay must be an integer > 0' ) # Yearly rate is divided by 12 to get monthly rate __lowercase =rate_per_annum / 12 # Years to repay is multiplied by 12 to get number of payments as payment is monthly __lowercase =years_to_repay * 12 return ( principal * rate_per_month * (1 + rate_per_month) ** number_of_payments / ((1 + rate_per_month) ** number_of_payments - 1) ) if __name__ == "__main__": import doctest doctest.testmod()
141
"""simple docstring""" def snake_case_ ( A_ : int = 2_00_00_00 ): '''simple docstring''' _lowerCamelCase : int = [0 for i in range(n + 1 )] _lowerCamelCase : List[str] = 1 _lowerCamelCase : Any = 1 for i in range(2, int(n**0.5 ) + 1 ): if primality_list[i] == 0: for j in range(i * i, n + 1, A_ ): _lowerCamelCase : str = 1 _lowerCamelCase : Tuple = 0 for i in range(A_ ): if primality_list[i] == 0: sum_of_primes += i return sum_of_primes if __name__ == "__main__": print(F"""{solution() = }""")
72
0
'''simple docstring''' UpperCamelCase__ = 6_5_5_2_1 def a__ ( lowerCAmelCase__ ) -> Any: UpperCAmelCase__ : Optional[int] = 1 UpperCAmelCase__ : Union[str, Any] = 0 for plain_chr in plain_text: UpperCAmelCase__ : Tuple = (a + ord(A_ )) % MOD_ADLER UpperCAmelCase__ : List[str] = (b + a) % MOD_ADLER return (b << 16) | a
181
"""simple docstring""" import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def snake_case_ ( A_ : Any ): '''simple docstring''' _lowerCamelCase : Any = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''_float_tensor''', '''decoder.output_projection.weight''', ] for k in ignore_keys: state_dict.pop(A_, A_ ) def snake_case_ ( A_ : Union[str, Any] ): '''simple docstring''' _lowerCamelCase , _lowerCamelCase : Tuple = emb.weight.shape _lowerCamelCase : Dict = nn.Linear(A_, A_, bias=A_ ) _lowerCamelCase : str = emb.weight.data return lin_layer def snake_case_ ( A_ : str, A_ : Optional[int]="facebook/mbart-large-en-ro", A_ : Union[str, Any]=False, A_ : List[str]=False ): '''simple docstring''' _lowerCamelCase : Tuple = torch.load(A_, map_location='''cpu''' )['''model'''] remove_ignore_keys_(A_ ) _lowerCamelCase : int = state_dict['''encoder.embed_tokens.weight'''].shape[0] _lowerCamelCase : Any = MBartConfig.from_pretrained(A_, vocab_size=A_ ) if mbart_aa and finetuned: _lowerCamelCase : Any = '''relu''' _lowerCamelCase : Optional[int] = state_dict['''decoder.embed_tokens.weight'''] _lowerCamelCase : Any = MBartForConditionalGeneration(A_ ) model.model.load_state_dict(A_ ) if finetuned: _lowerCamelCase : str = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.''' ) parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--hf_config''', default='''facebook/mbart-large-cc25''', type=str, help='''Which huggingface architecture to use: mbart-large''', ) parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''') parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''') lowerCAmelCase__ = parser.parse_args() lowerCAmelCase__ = convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
72
0
'''simple docstring''' import warnings from ...utils import logging from .image_processing_layoutlmva import LayoutLMvaImageProcessor lowerCAmelCase : List[Any] =logging.get_logger(__name__) class a_ ( _lowercase ): def __init__( self : List[Any] , *lowercase : Dict , **lowercase : Tuple ): """simple docstring""" warnings.warn( "The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use LayoutLMv2ImageProcessor instead." , __lowerCAmelCase , ) super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
223
"""simple docstring""" def snake_case_ ( A_ : list[list] ): '''simple docstring''' _lowerCamelCase : Optional[int] = current_set.copy() for row_index, row in enumerate(A_ ): _lowerCamelCase : Tuple = row[0] for column_index, column in enumerate(A_ ): if magnitude == 0: _lowerCamelCase : List[Any] = column continue _lowerCamelCase : List[Any] = column / magnitude # Subtract to cancel term _lowerCamelCase : Union[str, Any] = current_set[0] _lowerCamelCase : Dict = [first_row] _lowerCamelCase : str = current_set[1::] for row in current_set: _lowerCamelCase : Union[str, Any] = [] # If first term is 0, it is already in form we want, so we preserve it if row[0] == 0: final_set.append(A_ ) continue for column_index in range(len(A_ ) ): temp_row.append(first_row[column_index] - row[column_index] ) final_set.append(A_ ) # Create next recursion iteration set if len(final_set[0] ) != 3: _lowerCamelCase : Any = final_set[0] _lowerCamelCase : Any = [] _lowerCamelCase : Optional[int] = [] for row in final_set[1::]: current_first_column.append(row[0] ) next_iteration.append(row[1::] ) _lowerCamelCase : Dict = simplify(A_ ) for i in range(len(A_ ) ): resultant[i].insert(0, current_first_column[i] ) resultant.insert(0, A_ ) _lowerCamelCase : Tuple = resultant return final_set def snake_case_ ( A_ : list[list] ): '''simple docstring''' if len(A_ ) == 0: raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) _lowerCamelCase : Dict = len(A_ ) + 1 if any(len(A_ ) != _length for item in equations ): raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) for row in equations: if any(not isinstance(A_, (int, float) ) for column in row ): raise ValueError('''solve_simultaneous() requires lists of integers''' ) if len(A_ ) == 1: return [equations[0][-1] / equations[0][0]] _lowerCamelCase : Optional[Any] = equations.copy() if any(0 in row for row in data_set ): _lowerCamelCase : str = data_set.copy() _lowerCamelCase : List[Any] = [] for row_index, row in enumerate(A_ ): if 0 not in row: _lowerCamelCase : Union[str, Any] = data_set.pop(A_ ) break if not full_row: raise ValueError('''solve_simultaneous() requires at least 1 full equation''' ) data_set.insert(0, A_ ) _lowerCamelCase : List[str] = data_set.copy() _lowerCamelCase : int = simplify(A_ ) _lowerCamelCase : int = simplified[::-1] _lowerCamelCase : list = [] for row in simplified: _lowerCamelCase : Tuple = row[-1] if not solutions: if row[-2] == 0: solutions.append(0 ) continue solutions.append(current_solution / row[-2] ) continue _lowerCamelCase : Optional[Any] = row.copy()[: len(A_ ) - 1 :] while temp_row[0] == 0: temp_row.pop(0 ) if len(A_ ) == 0: solutions.append(0 ) continue _lowerCamelCase : Tuple = temp_row[1::] _lowerCamelCase : Tuple = temp_row[::-1] for column_index, column in enumerate(A_ ): current_solution -= column * solutions[column_index] solutions.append(A_ ) _lowerCamelCase : Optional[int] = [] for item in solutions: final.append(float(round(A_, 5 ) ) ) return final[::-1] if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase__ = [ [2, 1, 1, 1, 1, 4], [1, 2, 1, 1, 1, 5], [1, 1, 2, 1, 1, 6], [1, 1, 1, 2, 1, 7], [1, 1, 1, 1, 2, 8], ] print(solve_simultaneous(eq)) print(solve_simultaneous([[4, 2]]))
72
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowerCamelCase__ : Optional[int] = { '''configuration_tapas''': ['''TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TapasConfig'''], '''tokenization_tapas''': ['''TapasTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : Union[str, Any] = [ '''TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TapasForMaskedLM''', '''TapasForQuestionAnswering''', '''TapasForSequenceClassification''', '''TapasModel''', '''TapasPreTrainedModel''', '''load_tf_weights_in_tapas''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : Dict = [ '''TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFTapasForMaskedLM''', '''TFTapasForQuestionAnswering''', '''TFTapasForSequenceClassification''', '''TFTapasModel''', '''TFTapasPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig from .tokenization_tapas import TapasTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tapas import ( TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasPreTrainedModel, load_tf_weights_in_tapas, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_tapas import ( TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TFTapasForMaskedLM, TFTapasForQuestionAnswering, TFTapasForSequenceClassification, TFTapasModel, TFTapasPreTrainedModel, ) else: import sys lowerCamelCase__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
246
"""simple docstring""" import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class __snake_case ( _lowercase): snake_case__ : List[Any] = "Speech2TextFeatureExtractor" snake_case__ : Union[str, Any] = "Speech2TextTokenizer" def __init__( self : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] ): """simple docstring""" super().__init__(__lowerCAmelCase , __lowerCAmelCase ) _lowerCamelCase : List[str] = self.feature_extractor _lowerCamelCase : str = False def __call__( self : List[Any] , *__lowerCAmelCase : int , **__lowerCAmelCase : List[str] ): """simple docstring""" if self._in_target_context_manager: return self.current_processor(*__lowerCAmelCase , **__lowerCAmelCase ) if "raw_speech" in kwargs: warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' ) _lowerCamelCase : str = kwargs.pop('''raw_speech''' ) else: _lowerCamelCase : Tuple = kwargs.pop('''audio''' , __lowerCAmelCase ) _lowerCamelCase : Optional[Any] = kwargs.pop('''sampling_rate''' , __lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = kwargs.pop('''text''' , __lowerCAmelCase ) if len(__lowerCAmelCase ) > 0: _lowerCamelCase : List[Any] = args[0] _lowerCamelCase : int = args[1:] if audio is None and text is None: raise ValueError('''You need to specify either an `audio` or `text` input to process.''' ) if audio is not None: _lowerCamelCase : List[Any] = self.feature_extractor(__lowerCAmelCase , *__lowerCAmelCase , sampling_rate=__lowerCAmelCase , **__lowerCAmelCase ) if text is not None: _lowerCamelCase : List[Any] = self.tokenizer(__lowerCAmelCase , **__lowerCAmelCase ) if text is None: return inputs elif audio is None: return encodings else: _lowerCamelCase : List[str] = encodings['''input_ids'''] return inputs def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : Tuple ): """simple docstring""" return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : int ): """simple docstring""" return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase ) @contextmanager def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" warnings.warn( '''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ''' '''labels by using the argument `text` of the regular `__call__` method (either in the same call as ''' '''your audio inputs, or in a separate call.''' ) _lowerCamelCase : Union[str, Any] = True _lowerCamelCase : Any = self.tokenizer yield _lowerCamelCase : List[str] = self.feature_extractor _lowerCamelCase : Tuple = False
72
0
import warnings from typing import Dict import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING def lowerCAmelCase_ ( __a ) -> Optional[int]: """simple docstring""" return 1.0 / (1.0 + np.exp(-_outputs )) def lowerCAmelCase_ ( __a ) -> int: """simple docstring""" lowerCamelCase__: Tuple =np.max(_outputs , axis=-1 , keepdims=A_ ) lowerCamelCase__: str =np.exp(_outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=A_ ) class _SCREAMING_SNAKE_CASE ( _lowercase ): '''simple docstring''' lowercase_ = "sigmoid" lowercase_ = "softmax" lowercase_ = "none" @add_end_docstrings( _lowercase , R"\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `\"default\"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `\"sigmoid\"`: Applies the sigmoid function on the output.\n - `\"softmax\"`: Applies the softmax function on the output.\n - `\"none\"`: Does not apply any function on the output.\n " , ) class _SCREAMING_SNAKE_CASE ( _lowercase ): '''simple docstring''' lowercase_ = False lowercase_ = ClassificationFunction.NONE def __init__(self : List[str] , **UpperCAmelCase_ : Optional[Any]) ->Tuple: '''simple docstring''' super().__init__(**__lowerCAmelCase) self.check_model_type( TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if self.framework == "tf" else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING) def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : List[str]="" , **UpperCAmelCase_ : Optional[Any]) ->str: '''simple docstring''' lowerCamelCase__: Any =tokenizer_kwargs lowerCamelCase__: List[Any] ={} if hasattr(self.model.config , "return_all_scores") and return_all_scores is None: lowerCamelCase__: str =self.model.config.return_all_scores if isinstance(__lowerCAmelCase , __lowerCAmelCase) or top_k is None: lowerCamelCase__: Tuple =top_k lowerCamelCase__: List[str] =False elif return_all_scores is not None: warnings.warn( "`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of" " `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`." , __lowerCAmelCase , ) if return_all_scores: lowerCamelCase__: Dict =None else: lowerCamelCase__: Any =1 if isinstance(__lowerCAmelCase , __lowerCAmelCase): lowerCamelCase__: str =ClassificationFunction[function_to_apply.upper()] if function_to_apply is not None: lowerCamelCase__: int =function_to_apply return preprocess_params, {}, postprocess_params def __call__(self : Tuple , *UpperCAmelCase_ : int , **UpperCAmelCase_ : List[str]) ->int: '''simple docstring''' lowerCamelCase__: Union[str, Any] =super().__call__(*__lowerCAmelCase , **__lowerCAmelCase) # TODO try and retrieve it in a nicer way from _sanitize_parameters. lowerCamelCase__: Dict ='''top_k''' not in kwargs if isinstance(args[0] , __lowerCAmelCase) and _legacy: # This pipeline is odd, and return a list when single item is run return [result] else: return result def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Optional[Any]) ->int: '''simple docstring''' lowerCamelCase__: Optional[int] =self.framework if isinstance(__lowerCAmelCase , __lowerCAmelCase): return self.tokenizer(**__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase) elif isinstance(__lowerCAmelCase , __lowerCAmelCase) and len(__lowerCAmelCase) == 1 and isinstance(inputs[0] , __lowerCAmelCase) and len(inputs[0]) == 2: # It used to be valid to use a list of list of list for text pairs, keeping this path for BC return self.tokenizer( text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=__lowerCAmelCase , **__lowerCAmelCase) elif isinstance(__lowerCAmelCase , __lowerCAmelCase): # This is likely an invalid usage of the pipeline attempting to pass text pairs. raise ValueError( "The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a" " dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair.") return self.tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase) def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Union[str, Any]) ->List[str]: '''simple docstring''' return self.model(**__lowerCAmelCase) def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : int=1 , UpperCAmelCase_ : Dict=True) ->List[str]: '''simple docstring''' if function_to_apply is None: if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: lowerCamelCase__: Optional[int] =ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: lowerCamelCase__: Optional[int] =ClassificationFunction.SOFTMAX elif hasattr(self.model.config , "function_to_apply") and function_to_apply is None: lowerCamelCase__: List[str] =self.model.config.function_to_apply else: lowerCamelCase__: List[Any] =ClassificationFunction.NONE lowerCamelCase__: Union[str, Any] =model_outputs['''logits'''][0] lowerCamelCase__: Dict =outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: lowerCamelCase__: Dict =sigmoid(__lowerCAmelCase) elif function_to_apply == ClassificationFunction.SOFTMAX: lowerCamelCase__: Tuple =softmax(__lowerCAmelCase) elif function_to_apply == ClassificationFunction.NONE: lowerCamelCase__: str =outputs else: raise ValueError(F"""Unrecognized `function_to_apply` argument: {function_to_apply}""") if top_k == 1 and _legacy: return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()} lowerCamelCase__: Tuple =[ {'''label''': self.model.config.idalabel[i], '''score''': score.item()} for i, score in enumerate(__lowerCAmelCase) ] if not _legacy: dict_scores.sort(key=lambda UpperCAmelCase_: x["score"] , reverse=__lowerCAmelCase) if top_k is not None: lowerCamelCase__: Tuple =dict_scores[:top_k] return dict_scores
10
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase__ = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['''ReformerTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['''ReformerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ReformerAttention''', '''ReformerForMaskedLM''', '''ReformerForQuestionAnswering''', '''ReformerForSequenceClassification''', '''ReformerLayer''', '''ReformerModel''', '''ReformerModelWithLMHead''', '''ReformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
72
0
from math import factorial __A = {str(d): factorial(d) for d in range(10)} def lowerCamelCase_ ( UpperCamelCase__ : int ) -> str: """simple docstring""" return sum(DIGIT_FACTORIAL[d] for d in str(A_ ) ) def lowerCamelCase_ ( ) -> Any: """simple docstring""" __lowerCamelCase = 7 * factorial(9 ) + 1 return sum(i for i in range(3 , A_ ) if sum_of_digit_factorial(A_ ) == i ) if __name__ == "__main__": print(f'''{solution() = }''')
90
"""simple docstring""" from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase__ = { '''configuration_autoformer''': [ '''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''AutoformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''AutoformerForPrediction''', '''AutoformerModel''', '''AutoformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_autoformer import ( AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_autoformer import ( AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, AutoformerForPrediction, AutoformerModel, AutoformerPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
72
0
from __future__ import annotations from decimal import Decimal from math import * # noqa: F403 from sympy import diff def UpperCamelCase__( UpperCamelCase__ : str , UpperCamelCase__ : float | Decimal , UpperCamelCase__ : float = 10**-10 )->Optional[Any]: A__ = a while True: A__ = Decimal(A_ ) - ( Decimal(eval(A_ ) ) / Decimal(eval(str(diff(A_ ) ) ) ) # noqa: S307 ) # This number dictates the accuracy of the answer if abs(eval(A_ ) ) < precision: # noqa: S307 return float(A_ ) # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(F"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}") # Find root of polynomial print(F"The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}") # Find Square Root of 5 print(F"The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}") # Exponential Roots print(F"The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}")
193
"""simple docstring""" import math def snake_case_ ( A_ : int ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5, int(math.sqrt(A_ ) + 1 ), 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def snake_case_ ( A_ : float = 0.1 ): '''simple docstring''' _lowerCamelCase : Optional[int] = 3 _lowerCamelCase : List[str] = 3 while primes / (2 * j - 1) >= ratio: for i in range(j * j + j + 1, (j + 2) * (j + 2), j + 1 ): primes += is_prime(A_ ) j += 2 return j if __name__ == "__main__": import doctest doctest.testmod()
72
0
import os from pathlib import Path import numpy as np import pytest from pack_dataset import pack_data_dir from parameterized import parameterized from save_len_file import save_len_file from torch.utils.data import DataLoader from transformers import AutoTokenizer from transformers.models.mbart.modeling_mbart import shift_tokens_right from transformers.testing_utils import TestCasePlus, slow from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset _a = '''bert-base-cased''' _a = '''google/pegasus-xsum''' _a = [''' Sam ate lunch today.''', '''Sams lunch ingredients.'''] _a = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee'''] _a = '''patrickvonplaten/t5-tiny-random''' _a = '''sshleifer/bart-tiny-random''' _a = '''sshleifer/tiny-mbart''' _a = '''sshleifer/tiny-marian-en-de''' def _a ( SCREAMING_SNAKE_CASE : Path , SCREAMING_SNAKE_CASE : list ) -> List[str]: """simple docstring""" __lowerCAmelCase: List[Any] = '''\n'''.join(A_ ) Path(A_ ).open('w' ).writelines(A_ ) def _a ( SCREAMING_SNAKE_CASE : Tuple ) -> Any: """simple docstring""" for split in ["train", "val", "test"]: _dump_articles(os.path.join(A_ , f'''{split}.source''' ) , A_ ) _dump_articles(os.path.join(A_ , f'''{split}.target''' ) , A_ ) return tmp_dir class A_ ( _lowercase ): @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) @slow def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : Optional[int] ) -> Tuple: __lowerCAmelCase: Any = AutoTokenizer.from_pretrained(__lowerCAmelCase ) __lowerCAmelCase: Union[str, Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) __lowerCAmelCase: str = max(len(tokenizer.encode(__lowerCAmelCase ) ) for a in ARTICLES ) __lowerCAmelCase: List[Any] = max(len(tokenizer.encode(__lowerCAmelCase ) ) for a in SUMMARIES ) __lowerCAmelCase: Union[str, Any] = 4 __lowerCAmelCase: Dict = 8 assert max_len_target > max_src_len # Will be truncated assert max_len_source > max_src_len # Will be truncated __lowerCAmelCase: Tuple = '''ro_RO''', '''de_DE''' # ignored for all but mbart, but never causes error. __lowerCAmelCase: int = SeqaSeqDataset( __lowerCAmelCase , data_dir=__lowerCAmelCase , type_path='train' , max_source_length=__lowerCAmelCase , max_target_length=__lowerCAmelCase , src_lang=__lowerCAmelCase , tgt_lang=__lowerCAmelCase , ) __lowerCAmelCase: Any = DataLoader(__lowerCAmelCase , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_src_len # show that targets are the same len assert batch["labels"].shape[1] == max_tgt_len if tok_name != MBART_TINY: continue # check language codes in correct place __lowerCAmelCase: List[Any] = shift_tokens_right(batch['labels'] , tokenizer.pad_token_id ) assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang] assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang] break # No need to test every batch @parameterized.expand([BART_TINY, BERT_BASE_CASED] ) def UpperCAmelCase ( self : int , UpperCAmelCase : Tuple ) -> Optional[int]: __lowerCAmelCase: Union[str, Any] = AutoTokenizer.from_pretrained(__lowerCAmelCase ) __lowerCAmelCase: List[Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) __lowerCAmelCase: List[str] = max(len(tokenizer.encode(__lowerCAmelCase ) ) for a in ARTICLES ) __lowerCAmelCase: Any = max(len(tokenizer.encode(__lowerCAmelCase ) ) for a in SUMMARIES ) __lowerCAmelCase: List[str] = 4 __lowerCAmelCase: Tuple = LegacySeqaSeqDataset( __lowerCAmelCase , data_dir=__lowerCAmelCase , type_path='train' , max_source_length=2_0 , max_target_length=__lowerCAmelCase , ) __lowerCAmelCase: Dict = DataLoader(__lowerCAmelCase , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_len_source assert 2_0 >= batch["input_ids"].shape[1] # trimmed significantly # show that targets were truncated assert batch["labels"].shape[1] == trunc_target # Truncated assert max_len_target > trunc_target # Truncated break # No need to test every batch def UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]: __lowerCAmelCase: Dict = AutoTokenizer.from_pretrained('facebook/mbart-large-cc25' ) __lowerCAmelCase: int = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) __lowerCAmelCase: str = tmp_dir.joinpath('train.source' ).open().readlines() __lowerCAmelCase: Union[str, Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) pack_data_dir(__lowerCAmelCase , __lowerCAmelCase , 1_2_8 , __lowerCAmelCase ) __lowerCAmelCase: Optional[int] = {x.name for x in tmp_dir.iterdir()} __lowerCAmelCase: Dict = {x.name for x in save_dir.iterdir()} __lowerCAmelCase: Optional[Any] = save_dir.joinpath('train.source' ).open().readlines() # orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.'] # desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.'] assert len(__lowerCAmelCase ) < len(__lowerCAmelCase ) assert len(__lowerCAmelCase ) == 1 assert len(packed_examples[0] ) == sum(len(__lowerCAmelCase ) for x in orig_examples ) assert orig_paths == new_paths @pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='This test requires fairseq' ) def UpperCAmelCase ( self : Dict ) -> Tuple: if not FAIRSEQ_AVAILABLE: return __lowerCAmelCase: List[Any] = self._get_dataset(max_len=6_4 ) __lowerCAmelCase: Any = 6_4 __lowerCAmelCase: str = ds.make_dynamic_sampler(__lowerCAmelCase , required_batch_size_multiple=__lowerCAmelCase ) __lowerCAmelCase: List[str] = [len(__lowerCAmelCase ) for x in batch_sampler] assert len(set(__lowerCAmelCase ) ) > 1 # it's not dynamic batch size if every batch is the same length assert sum(__lowerCAmelCase ) == len(__lowerCAmelCase ) # no dropped or added examples __lowerCAmelCase: Optional[int] = DataLoader(__lowerCAmelCase , batch_sampler=__lowerCAmelCase , collate_fn=ds.collate_fn , num_workers=2 ) __lowerCAmelCase: Optional[Any] = [] __lowerCAmelCase: List[Any] = [] for batch in data_loader: __lowerCAmelCase: Union[str, Any] = batch['''input_ids'''].shape __lowerCAmelCase: Optional[Any] = src_shape[0] assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple __lowerCAmelCase: str = np.product(batch['input_ids'].shape ) num_src_per_batch.append(__lowerCAmelCase ) if num_src_tokens > (max_tokens * 1.1): failures.append(__lowerCAmelCase ) assert num_src_per_batch[0] == max(__lowerCAmelCase ) if failures: raise AssertionError(F'''too many tokens in {len(__lowerCAmelCase )} batches''' ) def UpperCAmelCase ( self : int ) -> Dict: __lowerCAmelCase: List[str] = self._get_dataset(max_len=5_1_2 ) __lowerCAmelCase: Optional[int] = 2 __lowerCAmelCase: List[Any] = ds.make_sortish_sampler(__lowerCAmelCase , shuffle=__lowerCAmelCase ) __lowerCAmelCase: Optional[int] = DataLoader(__lowerCAmelCase , batch_size=__lowerCAmelCase , collate_fn=ds.collate_fn , num_workers=2 ) __lowerCAmelCase: Optional[int] = DataLoader(__lowerCAmelCase , batch_size=__lowerCAmelCase , collate_fn=ds.collate_fn , num_workers=2 , sampler=__lowerCAmelCase ) __lowerCAmelCase: Tuple = tokenizer.pad_token_id def count_pad_tokens(UpperCAmelCase : Dict , UpperCAmelCase : str="input_ids" ): return [batch[k].eq(__lowerCAmelCase ).sum().item() for batch in data_loader] assert sum(count_pad_tokens(__lowerCAmelCase , k='labels' ) ) < sum(count_pad_tokens(__lowerCAmelCase , k='labels' ) ) assert sum(count_pad_tokens(__lowerCAmelCase ) ) < sum(count_pad_tokens(__lowerCAmelCase ) ) assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ) def UpperCAmelCase ( self : str , UpperCAmelCase : List[Any]=1_0_0_0 , UpperCAmelCase : Dict=1_2_8 ) -> List[Any]: if os.getenv('USE_REAL_DATA' , __lowerCAmelCase ): __lowerCAmelCase: Union[str, Any] = '''examples/seq2seq/wmt_en_ro''' __lowerCAmelCase: Dict = max_len * 2 * 6_4 if not Path(__lowerCAmelCase ).joinpath('train.len' ).exists(): save_len_file(__lowerCAmelCase , __lowerCAmelCase ) else: __lowerCAmelCase: int = '''examples/seq2seq/test_data/wmt_en_ro''' __lowerCAmelCase: Optional[int] = max_len * 4 save_len_file(__lowerCAmelCase , __lowerCAmelCase ) __lowerCAmelCase: Optional[Any] = AutoTokenizer.from_pretrained(__lowerCAmelCase ) __lowerCAmelCase: Dict = SeqaSeqDataset( __lowerCAmelCase , data_dir=__lowerCAmelCase , type_path='train' , max_source_length=__lowerCAmelCase , max_target_length=__lowerCAmelCase , n_obs=__lowerCAmelCase , ) return ds, max_tokens, tokenizer def UpperCAmelCase ( self : int ) -> str: __lowerCAmelCase: Dict = self._get_dataset() __lowerCAmelCase: Optional[int] = set(DistributedSortishSampler(__lowerCAmelCase , 2_5_6 , num_replicas=2 , rank=0 , add_extra_examples=__lowerCAmelCase ) ) __lowerCAmelCase: Tuple = set(DistributedSortishSampler(__lowerCAmelCase , 2_5_6 , num_replicas=2 , rank=1 , add_extra_examples=__lowerCAmelCase ) ) assert idsa.intersection(__lowerCAmelCase ) == set() @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) def UpperCAmelCase ( self : Dict , UpperCAmelCase : Optional[Any] ) -> Dict: __lowerCAmelCase: Any = AutoTokenizer.from_pretrained(__lowerCAmelCase , use_fast=__lowerCAmelCase ) if tok_name == MBART_TINY: __lowerCAmelCase: str = SeqaSeqDataset( __lowerCAmelCase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , src_lang='EN' , tgt_lang='FR' , ) __lowerCAmelCase: Union[str, Any] = train_dataset.dataset_kwargs assert "src_lang" in kwargs and "tgt_lang" in kwargs else: __lowerCAmelCase: str = SeqaSeqDataset( __lowerCAmelCase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , ) __lowerCAmelCase: Optional[int] = train_dataset.dataset_kwargs assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs assert len(__lowerCAmelCase ) == 1 if tok_name == BART_TINY else len(__lowerCAmelCase ) == 0
322
"""simple docstring""" import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class __snake_case ( unittest.TestCase): def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _lowerCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase ) _lowerCamelCase : Tuple = -1 _lowerCamelCase : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase ) _lowerCamelCase : List[Any] = model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: _lowerCamelCase : Union[str, Any] = TextStreamer(__lowerCAmelCase ) model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _lowerCamelCase : int = cs.out[:-1] self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" _lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _lowerCamelCase : Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase ) _lowerCamelCase : Tuple = -1 _lowerCamelCase : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase ) _lowerCamelCase : Optional[int] = model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase ) _lowerCamelCase : List[str] = tokenizer.decode(greedy_ids[0] ) _lowerCamelCase : Tuple = TextIteratorStreamer(__lowerCAmelCase ) _lowerCamelCase : Tuple = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer} _lowerCamelCase : List[Any] = Thread(target=model.generate , kwargs=__lowerCAmelCase ) thread.start() _lowerCamelCase : int = '''''' for new_text in streamer: streamer_text += new_text self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" _lowerCamelCase : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _lowerCamelCase : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase ) _lowerCamelCase : Tuple = -1 _lowerCamelCase : Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase ) _lowerCamelCase : int = model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = greedy_ids[:, input_ids.shape[1] :] _lowerCamelCase : int = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: _lowerCamelCase : Any = TextStreamer(__lowerCAmelCase , skip_prompt=__lowerCAmelCase ) model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _lowerCamelCase : Union[str, Any] = cs.out[:-1] self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" _lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained('''distilgpt2''' ) _lowerCamelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(__lowerCAmelCase ) _lowerCamelCase : str = -1 _lowerCamelCase : Any = torch.ones((1, 5) , device=__lowerCAmelCase ).long() * model.config.bos_token_id with CaptureStdout() as cs: _lowerCamelCase : List[Any] = TextStreamer(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase ) model.generate(__lowerCAmelCase , max_new_tokens=1 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token _lowerCamelCase : Any = cs.out[:-1] # Remove the final "\n" _lowerCamelCase : int = tokenizer(__lowerCAmelCase , return_tensors='''pt''' ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _lowerCamelCase : Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = -1 _lowerCamelCase : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase ) _lowerCamelCase : List[str] = TextIteratorStreamer(__lowerCAmelCase , timeout=0.0_01 ) _lowerCamelCase : str = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer} _lowerCamelCase : List[Any] = Thread(target=model.generate , kwargs=__lowerCAmelCase ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(__lowerCAmelCase ): _lowerCamelCase : Optional[Any] = '''''' for new_text in streamer: streamer_text += new_text
72
0
"""simple docstring""" import warnings from contextlib import contextmanager from ....processing_utils import ProcessorMixin class SCREAMING_SNAKE_CASE ( _lowercase ): """simple docstring""" lowercase__ = "MCTCTFeatureExtractor" lowercase__ = "AutoTokenizer" def __init__( self : Optional[int] ,lowercase_ : List[Any] ,lowercase_ : Optional[Any] ): super().__init__(__lowerCAmelCase ,__lowerCAmelCase ) lowerCAmelCase__ : Optional[Any] = self.feature_extractor lowerCAmelCase__ : Optional[Any] = False def __call__( self : Optional[int] ,*lowercase_ : Dict ,**lowercase_ : str ): if self._in_target_context_manager: return self.current_processor(*__lowerCAmelCase ,**__lowerCAmelCase ) if "raw_speech" in kwargs: warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' ) lowerCAmelCase__ : str = kwargs.pop('''raw_speech''' ) else: lowerCAmelCase__ : str = kwargs.pop('''audio''' ,__lowerCAmelCase ) lowerCAmelCase__ : Dict = kwargs.pop('''sampling_rate''' ,__lowerCAmelCase ) lowerCAmelCase__ : List[str] = kwargs.pop('''text''' ,__lowerCAmelCase ) if len(__lowerCAmelCase ) > 0: lowerCAmelCase__ : Optional[Any] = args[0] lowerCAmelCase__ : str = args[1:] if audio is None and text is None: raise ValueError('''You need to specify either an `audio` or `text` input to process.''' ) if audio is not None: lowerCAmelCase__ : str = self.feature_extractor(__lowerCAmelCase ,*__lowerCAmelCase ,sampling_rate=__lowerCAmelCase ,**__lowerCAmelCase ) if text is not None: lowerCAmelCase__ : Union[str, Any] = self.tokenizer(__lowerCAmelCase ,**__lowerCAmelCase ) if text is None: return inputs elif audio is None: return encodings else: lowerCAmelCase__ : Dict = encodings['''input_ids'''] return inputs def __lowerCAmelCase ( self : Dict ,*lowercase_ : List[Any] ,**lowercase_ : int ): return self.tokenizer.batch_decode(*__lowerCAmelCase ,**__lowerCAmelCase ) def __lowerCAmelCase ( self : Optional[Any] ,*lowercase_ : Union[str, Any] ,**lowercase_ : Optional[int] ): if self._in_target_context_manager: return self.current_processor.pad(*__lowerCAmelCase ,**__lowerCAmelCase ) lowerCAmelCase__ : Tuple = kwargs.pop('''input_features''' ,__lowerCAmelCase ) lowerCAmelCase__ : int = kwargs.pop('''labels''' ,__lowerCAmelCase ) if len(__lowerCAmelCase ) > 0: lowerCAmelCase__ : List[str] = args[0] lowerCAmelCase__ : Optional[int] = args[1:] if input_features is not None: lowerCAmelCase__ : int = self.feature_extractor.pad(__lowerCAmelCase ,*__lowerCAmelCase ,**__lowerCAmelCase ) if labels is not None: lowerCAmelCase__ : Tuple = self.tokenizer.pad(__lowerCAmelCase ,**__lowerCAmelCase ) if labels is None: return input_features elif input_features is None: return labels else: lowerCAmelCase__ : Any = labels['''input_ids'''] return input_features def __lowerCAmelCase ( self : List[Any] ,*lowercase_ : Any ,**lowercase_ : Union[str, Any] ): return self.tokenizer.decode(*__lowerCAmelCase ,**__lowerCAmelCase ) @contextmanager def __lowerCAmelCase ( self : List[str] ): warnings.warn( '''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ''' '''labels by using the argument `text` of the regular `__call__` method (either in the same call as ''' '''your audio inputs, or in a separate call.''' ) lowerCAmelCase__ : Tuple = True lowerCAmelCase__ : str = self.tokenizer yield lowerCAmelCase__ : Union[str, Any] = self.feature_extractor lowerCAmelCase__ : Optional[Any] = False
106
"""simple docstring""" from ....configuration_utils import PretrainedConfig from ....utils import logging lowerCAmelCase__ = logging.get_logger(__name__) # TODO: upload to AWS lowerCAmelCase__ = { '''yjernite/retribert-base-uncased''': ( '''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json''' ), } class __snake_case ( _lowercase): snake_case__ : int = "retribert" def __init__( self : Optional[int] , __lowerCAmelCase : str=3_0_5_2_2 , __lowerCAmelCase : Tuple=7_6_8 , __lowerCAmelCase : Union[str, Any]=8 , __lowerCAmelCase : Any=1_2 , __lowerCAmelCase : Optional[int]=3_0_7_2 , __lowerCAmelCase : List[str]="gelu" , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Tuple=5_1_2 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : Optional[Any]=1E-12 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Any=1_2_8 , __lowerCAmelCase : Optional[int]=0 , **__lowerCAmelCase : str , ): """simple docstring""" super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase ) _lowerCamelCase : Dict = vocab_size _lowerCamelCase : Union[str, Any] = hidden_size _lowerCamelCase : Dict = num_hidden_layers _lowerCamelCase : int = num_attention_heads _lowerCamelCase : int = hidden_act _lowerCamelCase : str = intermediate_size _lowerCamelCase : Union[str, Any] = hidden_dropout_prob _lowerCamelCase : List[Any] = attention_probs_dropout_prob _lowerCamelCase : Optional[int] = max_position_embeddings _lowerCamelCase : List[Any] = type_vocab_size _lowerCamelCase : Any = initializer_range _lowerCamelCase : Optional[int] = layer_norm_eps _lowerCamelCase : int = share_encoders _lowerCamelCase : Optional[Any] = projection_dim
72
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''', '''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''', '''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''', } class lowerCAmelCase_( _lowercase ): '''simple docstring''' __lowercase : List[Any] = "roberta" def __init__( self ,__UpperCAmelCase=5_0265 ,__UpperCAmelCase=768 ,__UpperCAmelCase=12 ,__UpperCAmelCase=12 ,__UpperCAmelCase=3072 ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=512 ,__UpperCAmelCase=2 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=1E-12 ,__UpperCAmelCase=1 ,__UpperCAmelCase=0 ,__UpperCAmelCase=2 ,__UpperCAmelCase="absolute" ,__UpperCAmelCase=True ,__UpperCAmelCase=None ,**__UpperCAmelCase ,) -> List[str]: super().__init__(pad_token_id=__lowerCAmelCase ,bos_token_id=__lowerCAmelCase ,eos_token_id=__lowerCAmelCase ,**__lowerCAmelCase ) lowerCAmelCase__ : Dict = vocab_size lowerCAmelCase__ : List[str] = hidden_size lowerCAmelCase__ : Any = num_hidden_layers lowerCAmelCase__ : List[str] = num_attention_heads lowerCAmelCase__ : Dict = hidden_act lowerCAmelCase__ : str = intermediate_size lowerCAmelCase__ : Optional[Any] = hidden_dropout_prob lowerCAmelCase__ : Dict = attention_probs_dropout_prob lowerCAmelCase__ : Tuple = max_position_embeddings lowerCAmelCase__ : Any = type_vocab_size lowerCAmelCase__ : Optional[Any] = initializer_range lowerCAmelCase__ : Optional[Any] = layer_norm_eps lowerCAmelCase__ : int = position_embedding_type lowerCAmelCase__ : List[Any] = use_cache lowerCAmelCase__ : Any = classifier_dropout class lowerCAmelCase_( _lowercase ): '''simple docstring''' @property def UpperCAmelCase_ ( self ) -> Any: if self.task == "multiple-choice": lowerCAmelCase__ : List[str] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: lowerCAmelCase__ : Optional[Any] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
37
"""simple docstring""" import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class __snake_case ( unittest.TestCase): def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" _lowerCamelCase : Union[str, Any] = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : List[Any] = [ '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" _lowerCamelCase : Optional[int] = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', '''unet/diffusion_pytorch_model.bin''', # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : Optional[Any] = [ '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', ] self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" _lowerCamelCase : int = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', # Removed: 'text_encoder/model.safetensors', '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertFalse(is_safetensors_compatible(__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase : int = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] _lowerCamelCase : Optional[int] = '''fp16''' self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" _lowerCamelCase : Optional[Any] = [ '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] _lowerCamelCase : Union[str, Any] = '''fp16''' self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" _lowerCamelCase : str = [ '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] _lowerCamelCase : Optional[Any] = '''fp16''' self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase : Tuple = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', '''unet/diffusion_pytorch_model.fp16.bin''', # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] _lowerCamelCase : Any = '''fp16''' self.assertFalse(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" _lowerCamelCase : Optional[Any] = [ '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', ] _lowerCamelCase : str = '''fp16''' self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" _lowerCamelCase : Optional[Any] = [ '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', ] _lowerCamelCase : Union[str, Any] = '''fp16''' self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : int = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', # 'text_encoder/model.fp16.safetensors', '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] _lowerCamelCase : int = '''fp16''' self.assertFalse(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
72
0
from __future__ import annotations import math def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Union[str, Any]: """simple docstring""" A__ = u for i in range(1 , A_ ): A__ = temp * (u - i) return temp def SCREAMING_SNAKE_CASE ( ) -> Any: """simple docstring""" A__ = int(input('''enter the numbers of values: ''' ) ) A__ = [] for _ in range(A_ ): y.append([] ) for i in range(A_ ): for j in range(A_ ): y[i].append(A_ ) A__ = 0 print('''enter the values of parameters in a list: ''' ) A__ = list(map(A_ , input().split() ) ) print('''enter the values of corresponding parameters: ''' ) for i in range(A_ ): A__ = float(input() ) A__ = int(input('''enter the value to interpolate: ''' ) ) A__ = (value - x[0]) / (x[1] - x[0]) # for calculating forward difference table for i in range(1 , A_ ): for j in range(n - i ): A__ = y[j + 1][i - 1] - y[j][i - 1] A__ = y[0][0] for i in range(1 , A_ ): summ += (ucal(A_ , A_ ) * y[0][i]) / math.factorial(A_ ) print(f"""the value at {value} is {summ}""" ) if __name__ == "__main__": main()
14
"""simple docstring""" import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class __snake_case : def __init__( self : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any=2 , __lowerCAmelCase : Any=True , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Optional[Any]=1_0 , __lowerCAmelCase : List[str]=3 , __lowerCAmelCase : int=3_2 * 4 , __lowerCAmelCase : Dict=3_2 * 6 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : List[str]=3_2 , ): """simple docstring""" _lowerCamelCase : List[str] = parent _lowerCamelCase : str = batch_size _lowerCamelCase : Dict = is_training _lowerCamelCase : str = use_auxiliary_loss _lowerCamelCase : Any = num_queries _lowerCamelCase : List[Any] = num_channels _lowerCamelCase : int = min_size _lowerCamelCase : Any = max_size _lowerCamelCase : int = num_labels _lowerCamelCase : List[str] = mask_feature_size def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( __lowerCAmelCase ) _lowerCamelCase : List[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowerCAmelCase ) > 0.5 ).float() _lowerCamelCase : Optional[int] = (torch.rand((self.batch_size, self.num_labels) , device=__lowerCAmelCase ) > 0.5).long() _lowerCamelCase : Dict = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = self.prepare_config_and_inputs() _lowerCamelCase : List[str] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask} return config, inputs_dict def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] ): """simple docstring""" _lowerCamelCase : List[str] = output.encoder_hidden_states _lowerCamelCase : Tuple = output.pixel_decoder_hidden_states _lowerCamelCase : Dict = output.transformer_decoder_hidden_states self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__lowerCAmelCase ) , config.decoder_config.decoder_layers ) def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any]=False ): """simple docstring""" with torch.no_grad(): _lowerCamelCase : Optional[int] = MaskFormerModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : Tuple = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase ) _lowerCamelCase : List[str] = model(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(__lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] ): """simple docstring""" _lowerCamelCase : str = MaskFormerForInstanceSegmentation(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() def comm_check_on_output(__lowerCAmelCase : Dict ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): _lowerCamelCase : str = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = model(__lowerCAmelCase ) comm_check_on_output(__lowerCAmelCase ) _lowerCamelCase : List[str] = model( pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ) comm_check_on_output(__lowerCAmelCase ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class __snake_case ( _lowercase , _lowercase , unittest.TestCase): snake_case__ : Dict = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () snake_case__ : Any = ( {"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) snake_case__ : List[str] = False snake_case__ : List[str] = False snake_case__ : Optional[int] = False snake_case__ : Dict = False def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" _lowerCamelCase : Optional[int] = MaskFormerModelTester(self ) _lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowerCAmelCase ) @unittest.skip(reason='''MaskFormer does not use inputs_embeds''' ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" pass @unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' ) def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" pass @unittest.skip(reason='''MaskFormer is not a generative model''' ) def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" pass @unittest.skip(reason='''MaskFormer does not use token embeddings''' ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" pass @require_torch_multi_gpu @unittest.skip( reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" pass def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : str = model_class(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCamelCase : Dict = [*signature.parameters.keys()] _lowerCamelCase : Optional[Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __lowerCAmelCase ) @slow def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" for model_name in ["facebook/maskformer-swin-small-coco"]: _lowerCamelCase : Union[str, Any] = MaskFormerModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : List[Any] = (self.model_tester.min_size,) * 2 _lowerCamelCase : Union[str, Any] = { '''pixel_values''': torch.randn((2, 3, *size) , device=__lowerCAmelCase ), '''mask_labels''': torch.randn((2, 1_0, *size) , device=__lowerCAmelCase ), '''class_labels''': torch.zeros(2 , 1_0 , device=__lowerCAmelCase ).long(), } _lowerCamelCase : int = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = model(**__lowerCAmelCase ) self.assertTrue(outputs.loss is not None ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : List[Any] = model_class(__lowerCAmelCase ).to(__lowerCAmelCase ) _lowerCamelCase : List[str] = model(**__lowerCAmelCase , output_attentions=__lowerCAmelCase ) self.assertTrue(outputs.attentions is not None ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss _lowerCamelCase : Union[str, Any] = self.all_model_classes[1] _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() _lowerCamelCase : Any = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.train() _lowerCamelCase : List[Any] = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ).loss loss.backward() def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase : int = self.all_model_classes[1] _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs() _lowerCamelCase : List[str] = True _lowerCamelCase : Optional[int] = True _lowerCamelCase : Optional[Any] = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.train() _lowerCamelCase : Optional[Any] = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ) _lowerCamelCase : List[Any] = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() _lowerCamelCase : Tuple = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't _lowerCamelCase : List[str] = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() _lowerCamelCase : Optional[int] = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=__lowerCAmelCase ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) lowerCAmelCase__ = 1E-4 def snake_case_ ( ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_vision @slow class __snake_case ( unittest.TestCase): @cached_property def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" return ( MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' ) if is_vision_available() else None ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase : str = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__lowerCAmelCase ) _lowerCamelCase : Any = self.default_image_processor _lowerCamelCase : List[Any] = prepare_img() _lowerCamelCase : Any = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase ) _lowerCamelCase : Any = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): _lowerCamelCase : int = model(**__lowerCAmelCase ) _lowerCamelCase : str = torch.tensor( [[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(__lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) _lowerCamelCase : Union[str, Any] = torch.tensor( [[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(__lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) _lowerCamelCase : Optional[int] = torch.tensor( [[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(__lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" _lowerCamelCase : Optional[int] = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__lowerCAmelCase ) .eval() ) _lowerCamelCase : int = self.default_image_processor _lowerCamelCase : str = prepare_img() _lowerCamelCase : int = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase ) _lowerCamelCase : str = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): _lowerCamelCase : Optional[int] = model(**__lowerCAmelCase ) # masks_queries_logits _lowerCamelCase : Optional[int] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _lowerCamelCase : List[str] = [ [-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33], [-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95], [-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42], ] _lowerCamelCase : Any = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) # class_queries_logits _lowerCamelCase : List[Any] = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _lowerCamelCase : str = torch.tensor( [ [1.6512E00, -5.2572E00, -3.3519E00], [3.6169E-02, -5.9025E00, -2.9313E00], [1.0766E-04, -7.7630E00, -5.1263E00], ] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : Any = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' ) .to(__lowerCAmelCase ) .eval() ) _lowerCamelCase : Tuple = self.default_image_processor _lowerCamelCase : Tuple = prepare_img() _lowerCamelCase : Optional[Any] = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase ) _lowerCamelCase : List[Any] = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): _lowerCamelCase : Optional[Any] = model(**__lowerCAmelCase ) # masks_queries_logits _lowerCamelCase : List[Any] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _lowerCamelCase : int = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]] _lowerCamelCase : List[Any] = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) # class_queries_logits _lowerCamelCase : Dict = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _lowerCamelCase : Any = torch.tensor( [[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" _lowerCamelCase : str = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__lowerCAmelCase ) .eval() ) _lowerCamelCase : int = self.default_image_processor _lowerCamelCase : List[str] = image_processor( [np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors='''pt''' , ) _lowerCamelCase : Union[str, Any] = inputs['''pixel_values'''].to(__lowerCAmelCase ) _lowerCamelCase : Dict = [el.to(__lowerCAmelCase ) for el in inputs['''mask_labels''']] _lowerCamelCase : Optional[Any] = [el.to(__lowerCAmelCase ) for el in inputs['''class_labels''']] with torch.no_grad(): _lowerCamelCase : Tuple = model(**__lowerCAmelCase ) self.assertTrue(outputs.loss is not None )
72
0
'''simple docstring''' from typing import List, Union import numpy as np from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING UpperCAmelCase = logging.get_logger(__name__) @add_end_docstrings(_lowercase ) class lowerCAmelCase ( _lowercase ): def __init__( self : List[Any] , *__lowercase : Dict , **__lowercase : Union[str, Any] ): """simple docstring""" super().__init__(*__lowerCAmelCase , **__lowerCAmelCase ) requires_backends(self , 'vision' ) self.check_model_type(__lowerCAmelCase ) def __call__( self : Tuple , __lowercase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__lowercase : str ): """simple docstring""" return super().__call__(__lowerCAmelCase , **__lowerCAmelCase ) def snake_case ( self : int , **__lowercase : Any ): """simple docstring""" return {}, {}, {} def snake_case ( self : Union[str, Any] , __lowercase : Tuple ): """simple docstring""" __lowercase =load_image(__lowerCAmelCase ) __lowercase =image.size __lowercase =self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework ) return model_inputs def snake_case ( self : List[Any] , __lowercase : Optional[int] ): """simple docstring""" __lowercase =self.model(**__lowerCAmelCase ) return model_outputs def snake_case ( self : str , __lowercase : str ): """simple docstring""" __lowercase =model_outputs.predicted_depth __lowercase =torch.nn.functional.interpolate( predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='bicubic' , align_corners=__lowerCAmelCase ) __lowercase =prediction.squeeze().cpu().numpy() __lowercase =(output * 255 / np.max(__lowerCAmelCase )).astype('uint8' ) __lowercase =Image.fromarray(__lowerCAmelCase ) __lowercase ={} __lowercase =predicted_depth __lowercase =depth return output_dict
141
"""simple docstring""" lowerCAmelCase__ = { '''A''': ['''B''', '''C''', '''E'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F''', '''G'''], '''D''': ['''B'''], '''E''': ['''A''', '''B''', '''D'''], '''F''': ['''C'''], '''G''': ['''C'''], } def snake_case_ ( A_ : dict, A_ : int, A_ : int ): '''simple docstring''' _lowerCamelCase : List[str] = set() # keep track of all the paths to be checked _lowerCamelCase : str = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue _lowerCamelCase : str = queue.pop(0 ) # get the last node from the path _lowerCamelCase : List[Any] = path[-1] if node not in explored: _lowerCamelCase : Union[str, Any] = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: _lowerCamelCase : Union[str, Any] = list(A_ ) new_path.append(A_ ) queue.append(A_ ) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(A_ ) # in case there's no path between the 2 nodes return [] def snake_case_ ( A_ : dict, A_ : int, A_ : Dict ): '''simple docstring''' if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 _lowerCamelCase : Optional[int] = [start] _lowerCamelCase : int = set(A_ ) # Keep tab on distances from `start` node. _lowerCamelCase : int = {start: 0, target: -1} while queue: _lowerCamelCase : Optional[Any] = queue.pop(0 ) if node == target: _lowerCamelCase : Any = ( dist[node] if dist[target] == -1 else min(dist[target], dist[node] ) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(A_ ) queue.append(A_ ) _lowerCamelCase : Any = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
72
0
'''simple docstring''' import inspect import unittest from transformers import MobileViTVaConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel from transformers.models.mobilevitva.modeling_mobilevitva import ( MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST, make_divisible, ) if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class lowerCamelCase_ ( _lowercase ): def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : int = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__lowerCAmelCase , '''width_multiplier''' ) ) class lowerCamelCase_ : def __init__( self : List[str] , _A : Optional[Any] , _A : str=13 , _A : List[Any]=64 , _A : int=2 , _A : Dict=3 , _A : int="swish" , _A : List[Any]=3 , _A : List[Any]=32 , _A : Tuple=0.1 , _A : Any=0.0_2 , _A : int=True , _A : Any=True , _A : Tuple=10 , _A : List[str]=None , _A : Tuple=0.2_5 , _A : Union[str, Any]=0.0 , _A : List[str]=0.0 , ): '''simple docstring''' UpperCAmelCase__ : List[str] = parent UpperCAmelCase__ : Tuple = batch_size UpperCAmelCase__ : Optional[int] = image_size UpperCAmelCase__ : Union[str, Any] = patch_size UpperCAmelCase__ : Optional[int] = num_channels UpperCAmelCase__ : Union[str, Any] = make_divisible(512 * width_multiplier , divisor=8 ) UpperCAmelCase__ : Union[str, Any] = hidden_act UpperCAmelCase__ : Dict = conv_kernel_size UpperCAmelCase__ : List[Any] = output_stride UpperCAmelCase__ : Any = classifier_dropout_prob UpperCAmelCase__ : Tuple = use_labels UpperCAmelCase__ : List[str] = is_training UpperCAmelCase__ : Optional[int] = num_labels UpperCAmelCase__ : str = initializer_range UpperCAmelCase__ : List[Any] = scope UpperCAmelCase__ : Any = width_multiplier UpperCAmelCase__ : int = ffn_dropout UpperCAmelCase__ : Union[str, Any] = attn_dropout def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase__ : Optional[int] = None UpperCAmelCase__ : Union[str, Any] = None if self.use_labels: UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_labels ) UpperCAmelCase__ : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) UpperCAmelCase__ : int = self.get_config() return config, pixel_values, labels, pixel_labels def lowercase_ ( self : int ): '''simple docstring''' return MobileViTVaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , ) def lowercase_ ( self : Optional[Any] , _A : List[Any] , _A : Dict , _A : Optional[Any] , _A : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = MobileViTVaModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() UpperCAmelCase__ : Optional[Any] = model(__lowerCAmelCase ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def lowercase_ ( self : Any , _A : Dict , _A : Tuple , _A : List[str] , _A : str ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.num_labels UpperCAmelCase__ : Optional[Any] = MobileViTVaForImageClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() UpperCAmelCase__ : Any = model(__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase_ ( self : str , _A : str , _A : Any , _A : Tuple , _A : int ): '''simple docstring''' UpperCAmelCase__ : List[str] = self.num_labels UpperCAmelCase__ : List[str] = MobileViTVaForSemanticSegmentation(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() UpperCAmelCase__ : str = model(__lowerCAmelCase ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) UpperCAmelCase__ : Union[str, Any] = model(__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.prepare_config_and_inputs() UpperCAmelCase__ : Union[str, Any] = config_and_inputs UpperCAmelCase__ : Any = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class lowerCamelCase_ ( _lowercase , _lowercase , unittest.TestCase ): lowerCAmelCase__ = ( (MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation) if is_torch_available() else () ) lowerCAmelCase__ = ( { "feature-extraction": MobileViTVaModel, "image-classification": MobileViTVaForImageClassification, "image-segmentation": MobileViTVaForSemanticSegmentation, } if is_torch_available() else {} ) lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False def lowercase_ ( self : str ): '''simple docstring''' UpperCAmelCase__ : int = MobileViTVaModelTester(self ) UpperCAmelCase__ : Union[str, Any] = MobileViTVaConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase ) def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' ) def lowercase_ ( self : Tuple ): '''simple docstring''' pass @unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' ) def lowercase_ ( self : List[Any] ): '''simple docstring''' pass @unittest.skip(reason='''MobileViTV2 does not output attentions''' ) def lowercase_ ( self : Optional[Any] ): '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' ) def lowercase_ ( self : int ): '''simple docstring''' pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def lowercase_ ( self : str ): '''simple docstring''' pass def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : Any = model_class(__lowerCAmelCase ) UpperCAmelCase__ : Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase__ : Dict = [*signature.parameters.keys()] UpperCAmelCase__ : List[str] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __lowerCAmelCase ) def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def lowercase_ ( self : str ): '''simple docstring''' def check_hidden_states_output(_A : int , _A : Tuple , _A : Optional[int] ): UpperCAmelCase__ : Tuple = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() with torch.no_grad(): UpperCAmelCase__ : Dict = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) ) UpperCAmelCase__ : List[Any] = outputs.hidden_states UpperCAmelCase__ : List[str] = 5 self.assertEqual(len(__lowerCAmelCase ) , __lowerCAmelCase ) # MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. UpperCAmelCase__ : Union[str, Any] = 2 for i in range(len(__lowerCAmelCase ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , ) divisor *= 2 self.assertEqual(self.model_tester.output_stride , divisor // 2 ) UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : Any = True check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase__ : List[Any] = True check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase ) def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*__lowerCAmelCase ) @slow def lowercase_ ( self : Dict ): '''simple docstring''' for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : List[Any] = MobileViTVaModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) def a__ ( ) -> str: UpperCAmelCase__ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class lowerCamelCase_ ( unittest.TestCase ): @cached_property def lowercase_ ( self : List[Any] ): '''simple docstring''' return ( MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ) if is_vision_available() else None ) @slow def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to( __lowerCAmelCase ) UpperCAmelCase__ : int = self.default_image_processor UpperCAmelCase__ : Optional[Any] = prepare_img() UpperCAmelCase__ : Any = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase ) # forward pass with torch.no_grad(): UpperCAmelCase__ : Any = model(**__lowerCAmelCase ) # verify the logits UpperCAmelCase__ : List[Any] = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , __lowerCAmelCase ) UpperCAmelCase__ : int = torch.tensor([-1.63_36e00, -7.32_04e-02, -5.18_83e-01] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1e-4 ) ) @slow def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : int = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' ) UpperCAmelCase__ : Any = model.to(__lowerCAmelCase ) UpperCAmelCase__ : Tuple = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' ) UpperCAmelCase__ : Dict = prepare_img() UpperCAmelCase__ : Union[str, Any] = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase ) # forward pass with torch.no_grad(): UpperCAmelCase__ : Optional[int] = model(**__lowerCAmelCase ) UpperCAmelCase__ : Optional[Any] = outputs.logits # verify the logits UpperCAmelCase__ : Dict = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape , __lowerCAmelCase ) UpperCAmelCase__ : Optional[Any] = torch.tensor( [ [[7.0_8_6_3, 7.1_5_2_5, 6.8_2_0_1], [6.6_9_3_1, 6.8_7_7_0, 6.8_9_3_3], [6.2_9_7_8, 7.0_3_6_6, 6.9_6_3_6]], [[-3.7_1_3_4, -3.6_7_1_2, -3.6_6_7_5], [-3.5_8_2_5, -3.3_5_4_9, -3.4_7_7_7], [-3.3_4_3_5, -3.3_9_7_9, -3.2_8_5_7]], [[-2.9_3_2_9, -2.8_0_0_3, -2.7_3_6_9], [-3.0_5_6_4, -2.4_7_8_0, -2.0_2_0_7], [-2.6_8_8_9, -1.9_2_9_8, -1.7_6_4_0]], ] , device=__lowerCAmelCase , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __lowerCAmelCase , atol=1e-4 ) ) @slow def lowercase_ ( self : str ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' ) UpperCAmelCase__ : Tuple = model.to(__lowerCAmelCase ) UpperCAmelCase__ : Tuple = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' ) UpperCAmelCase__ : List[str] = prepare_img() UpperCAmelCase__ : Optional[int] = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase ) # forward pass with torch.no_grad(): UpperCAmelCase__ : Dict = model(**__lowerCAmelCase ) UpperCAmelCase__ : List[str] = outputs.logits.detach().cpu() UpperCAmelCase__ : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=__lowerCAmelCase , target_sizes=[(50, 60)] ) UpperCAmelCase__ : Dict = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape , __lowerCAmelCase ) UpperCAmelCase__ : List[Any] = image_processor.post_process_semantic_segmentation(outputs=__lowerCAmelCase ) UpperCAmelCase__ : str = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape , __lowerCAmelCase )
181
"""simple docstring""" import argparse import torch from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert from transformers.utils import logging logging.set_verbosity_info() def snake_case_ ( A_ : Tuple, A_ : int, A_ : Dict ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = LxmertConfig.from_json_file(A_ ) print(F'''Building PyTorch model from configuration: {config}''' ) _lowerCamelCase : List[str] = LxmertForPreTraining(A_ ) # Load weights from tf checkpoint load_tf_weights_in_lxmert(A_, A_, A_ ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict(), A_ ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowerCAmelCase__ = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
72
0
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowerCAmelCase : int =logging.get_logger(__name__) def UpperCAmelCase_ ( __lowerCamelCase : Dict ): if isinstance(A_ ,(list, tuple) ) and isinstance(videos[0] ,(list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(A_ ,(list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(A_ ): return [[videos]] raise ValueError(F'Could not make batched video from {videos}' ) class a_ ( _lowercase ): __A = ["pixel_values"] def __init__( self : str , lowercase : bool = True , lowercase : Dict[str, int] = None , lowercase : PILImageResampling = PILImageResampling.BILINEAR , lowercase : bool = True , lowercase : Dict[str, int] = None , lowercase : bool = True , lowercase : Union[int, float] = 1 / 255 , lowercase : bool = True , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[float, List[float]]] = None , **lowercase : Tuple , ): """simple docstring""" super().__init__(**__lowerCAmelCase ) lowercase_ :Optional[int] = size if size is not None else {'''shortest_edge''': 224} lowercase_ :List[Any] = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase ) lowercase_ :Union[str, Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} lowercase_ :str = get_size_dict(__lowerCAmelCase , param_name="crop_size" ) lowercase_ :int = do_resize lowercase_ :Optional[Any] = size lowercase_ :Optional[int] = do_center_crop lowercase_ :Optional[Any] = crop_size lowercase_ :str = resample lowercase_ :Any = do_rescale lowercase_ :List[Any] = rescale_factor lowercase_ :List[Any] = do_normalize lowercase_ :Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowercase_ :str = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowercase__ ( self : Dict , lowercase : np.ndarray , lowercase : Dict[str, int] , lowercase : PILImageResampling = PILImageResampling.BILINEAR , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : List[Any] , ): """simple docstring""" lowercase_ :Optional[Any] = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase ) if "shortest_edge" in size: lowercase_ :Tuple = get_resize_output_image_size(__lowerCAmelCase , size["shortest_edge"] , default_to_square=__lowerCAmelCase ) elif "height" in size and "width" in size: lowercase_ :str = (size['''height'''], size['''width''']) else: raise ValueError(F'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' ) return resize(__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase ) def lowercase__ ( self : str , lowercase : np.ndarray , lowercase : Dict[str, int] , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : Dict , ): """simple docstring""" lowercase_ :int = get_size_dict(__lowerCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(F'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' ) return center_crop(__lowerCAmelCase , size=(size["height"], size["width"]) , data_format=__lowerCAmelCase , **__lowerCAmelCase ) def lowercase__ ( self : Union[str, Any] , lowercase : np.ndarray , lowercase : Union[int, float] , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : Dict , ): """simple docstring""" return rescale(__lowerCAmelCase , scale=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase ) def lowercase__ ( self : List[str] , lowercase : np.ndarray , lowercase : Union[float, List[float]] , lowercase : Union[float, List[float]] , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : Tuple , ): """simple docstring""" return normalize(__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase ) def lowercase__ ( self : Any , lowercase : ImageInput , lowercase : bool = None , lowercase : Dict[str, int] = None , lowercase : PILImageResampling = None , lowercase : bool = None , lowercase : Dict[str, int] = None , lowercase : bool = None , lowercase : float = None , lowercase : bool = None , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[ChannelDimension] = ChannelDimension.FIRST , ): """simple docstring""" if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. lowercase_ :Any = to_numpy_array(__lowerCAmelCase ) if do_resize: lowercase_ :Tuple = self.resize(image=__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase ) if do_center_crop: lowercase_ :List[str] = self.center_crop(__lowerCAmelCase , size=__lowerCAmelCase ) if do_rescale: lowercase_ :Optional[Any] = self.rescale(image=__lowerCAmelCase , scale=__lowerCAmelCase ) if do_normalize: lowercase_ :int = self.normalize(image=__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase ) lowercase_ :str = to_channel_dimension_format(__lowerCAmelCase , __lowerCAmelCase ) return image def lowercase__ ( self : Any , lowercase : ImageInput , lowercase : bool = None , lowercase : Dict[str, int] = None , lowercase : PILImageResampling = None , lowercase : bool = None , lowercase : Dict[str, int] = None , lowercase : bool = None , lowercase : float = None , lowercase : bool = None , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[str, TensorType]] = None , lowercase : ChannelDimension = ChannelDimension.FIRST , **lowercase : Any , ): """simple docstring""" lowercase_ :List[Any] = do_resize if do_resize is not None else self.do_resize lowercase_ :str = resample if resample is not None else self.resample lowercase_ :List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop lowercase_ :Any = do_rescale if do_rescale is not None else self.do_rescale lowercase_ :Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase_ :List[Any] = do_normalize if do_normalize is not None else self.do_normalize lowercase_ :Dict = image_mean if image_mean is not None else self.image_mean lowercase_ :int = image_std if image_std is not None else self.image_std lowercase_ :List[str] = size if size is not None else self.size lowercase_ :str = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase ) lowercase_ :List[str] = crop_size if crop_size is not None else self.crop_size lowercase_ :str = get_size_dict(__lowerCAmelCase , param_name="crop_size" ) if not valid_images(__lowerCAmelCase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) lowercase_ :Any = make_batched(__lowerCAmelCase ) lowercase_ :Tuple = [ [ self._preprocess_image( image=__lowerCAmelCase , do_resize=__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase , do_center_crop=__lowerCAmelCase , crop_size=__lowerCAmelCase , do_rescale=__lowerCAmelCase , rescale_factor=__lowerCAmelCase , do_normalize=__lowerCAmelCase , image_mean=__lowerCAmelCase , image_std=__lowerCAmelCase , data_format=__lowerCAmelCase , ) for img in video ] for video in videos ] lowercase_ :List[str] = {'''pixel_values''': videos} return BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase )
223
"""simple docstring""" from collections import defaultdict from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def snake_case_ ( ): '''simple docstring''' _lowerCamelCase , _lowerCamelCase : int = 9, 14 # noqa: F841 _lowerCamelCase : List[Any] = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] _lowerCamelCase : Any = defaultdict(A_ ) for nodea, nodea, cost in edges: adjancency[nodea].append([nodea, cost] ) adjancency[nodea].append([nodea, cost] ) _lowerCamelCase : List[str] = mst(A_ ) _lowerCamelCase : Optional[int] = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] for answer in expected: _lowerCamelCase : int = tuple(answer[:2] ) _lowerCamelCase : int = tuple(edge[::-1] ) assert edge in result or reverse in result
72
0
"""simple docstring""" from math import factorial, radians def UpperCamelCase ( _lowerCAmelCase : float, _lowerCAmelCase : int = 18, _lowerCAmelCase : int = 10 ) -> List[str]: _UpperCAmelCase : Any = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0) # Converting from degrees to radians _UpperCAmelCase : Tuple = radians(A_ ) _UpperCAmelCase : int = angle_in_radians _UpperCAmelCase : Any = 3 _UpperCAmelCase : str = -1 for _ in range(A_ ): result += (b * (angle_in_radians**a)) / factorial(A_ ) _UpperCAmelCase : Optional[Any] = -b # One positive term and the next will be negative and so on... a += 2 # Increased by 2 for every term. return round(A_, A_ ) if __name__ == "__main__": __import__('''doctest''').testmod()
246
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bart import BartTokenizer lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} # See all BART models at https://huggingface.co/models?filter=bart lowerCAmelCase__ = { '''vocab_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''', }, '''merges_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json''', }, } lowerCAmelCase__ = { '''facebook/bart-base''': 1024, '''facebook/bart-large''': 1024, '''facebook/bart-large-mnli''': 1024, '''facebook/bart-large-cnn''': 1024, '''facebook/bart-large-xsum''': 1024, '''yjernite/bart_eli5''': 1024, } class __snake_case ( _lowercase): snake_case__ : Any = VOCAB_FILES_NAMES snake_case__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP snake_case__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case__ : Optional[int] = ["input_ids", "attention_mask"] snake_case__ : Any = BartTokenizer def __init__( self : int , __lowerCAmelCase : Dict=None , __lowerCAmelCase : int=None , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[Any]="replace" , __lowerCAmelCase : Any="<s>" , __lowerCAmelCase : Optional[int]="</s>" , __lowerCAmelCase : str="</s>" , __lowerCAmelCase : Dict="<s>" , __lowerCAmelCase : Union[str, Any]="<unk>" , __lowerCAmelCase : Any="<pad>" , __lowerCAmelCase : Optional[Any]="<mask>" , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Dict=True , **__lowerCAmelCase : Optional[Any] , ): """simple docstring""" super().__init__( __lowerCAmelCase , __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , errors=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase , **__lowerCAmelCase , ) _lowerCamelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , __lowerCAmelCase ) != add_prefix_space: _lowerCamelCase : Dict = getattr(__lowerCAmelCase , pre_tok_state.pop('''type''' ) ) _lowerCamelCase : Any = add_prefix_space _lowerCamelCase : int = pre_tok_class(**__lowerCAmelCase ) _lowerCamelCase : Optional[int] = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` _lowerCamelCase : List[str] = '''post_processor''' _lowerCamelCase : List[str] = getattr(self.backend_tokenizer , __lowerCAmelCase , __lowerCAmelCase ) if tokenizer_component_instance: _lowerCamelCase : int = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: _lowerCamelCase : Tuple = tuple(state['''sep'''] ) if "cls" in state: _lowerCamelCase : int = tuple(state['''cls'''] ) _lowerCamelCase : Union[str, Any] = False if state.get('''add_prefix_space''' , __lowerCAmelCase ) != add_prefix_space: _lowerCamelCase : Dict = add_prefix_space _lowerCamelCase : Optional[Any] = True if state.get('''trim_offsets''' , __lowerCAmelCase ) != trim_offsets: _lowerCamelCase : Any = trim_offsets _lowerCamelCase : str = True if changes_to_apply: _lowerCamelCase : List[str] = getattr(__lowerCAmelCase , state.pop('''type''' ) ) _lowerCamelCase : str = component_class(**__lowerCAmelCase ) setattr(self.backend_tokenizer , __lowerCAmelCase , __lowerCAmelCase ) @property def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" if self._mask_token is None: if self.verbose: logger.error('''Using mask_token, but it is not set yet.''' ) return None return str(self._mask_token ) @mask_token.setter def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : int ): """simple docstring""" _lowerCamelCase : Tuple = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else value _lowerCamelCase : str = value def SCREAMING_SNAKE_CASE ( self : int , *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : List[Any] ): """simple docstring""" _lowerCamelCase : Dict = kwargs.get('''is_split_into_words''' , __lowerCAmelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' '''to use it with pretokenized inputs.''' ) return super()._batch_encode_plus(*__lowerCAmelCase , **__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : Tuple , **__lowerCAmelCase : List[Any] ): """simple docstring""" _lowerCamelCase : Any = kwargs.get('''is_split_into_words''' , __lowerCAmelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' '''to use it with pretokenized inputs.''' ) return super()._encode_plus(*__lowerCAmelCase , **__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ): """simple docstring""" _lowerCamelCase : Tuple = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase ) return tuple(__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str]=None ): """simple docstring""" _lowerCamelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ): """simple docstring""" _lowerCamelCase : List[str] = [self.sep_token_id] _lowerCamelCase : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
72
0
from __future__ import annotations from collections.abc import Iterator class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__(self : Tuple , UpperCAmelCase_ : int) ->Optional[Any]: '''simple docstring''' lowerCamelCase__: List[str] =value lowerCamelCase__: Node | None =None lowerCamelCase__: Node | None =None class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__(self : Optional[int] , UpperCAmelCase_ : Node) ->Any: '''simple docstring''' lowerCamelCase__: List[str] =tree def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Node | None) ->int: '''simple docstring''' if node is None: return 0 return node.value + ( self.depth_first_search(node.left) + self.depth_first_search(node.right) ) def __iter__(self : int) ->List[str]: '''simple docstring''' yield self.depth_first_search(self.tree) if __name__ == "__main__": import doctest doctest.testmod()
10
"""simple docstring""" from __future__ import annotations def snake_case_ ( A_ : str ): '''simple docstring''' return [ord(A_ ) - 96 for elem in plain] def snake_case_ ( A_ : list[int] ): '''simple docstring''' return "".join(chr(elem + 96 ) for elem in encoded ) def snake_case_ ( ): '''simple docstring''' _lowerCamelCase : Dict = encode(input('''-> ''' ).strip().lower() ) print('''Encoded: ''', A_ ) print('''Decoded:''', decode(A_ ) ) if __name__ == "__main__": main()
72
0
from . import __version__ # Backward compatibility imports, to make sure all those objects can be found in file_utils from .utils import ( CLOUDFRONT_DISTRIB_PREFIX, CONFIG_NAME, DISABLE_TELEMETRY, DUMMY_INPUTS, DUMMY_MASK, ENV_VARS_TRUE_AND_AUTO_VALUES, ENV_VARS_TRUE_VALUES, FEATURE_EXTRACTOR_NAME, FLAX_WEIGHTS_NAME, HF_MODULES_CACHE, HUGGINGFACE_CO_PREFIX, HUGGINGFACE_CO_RESOLVE_ENDPOINT, MODEL_CARD_NAME, MULTIPLE_CHOICE_DUMMY_INPUTS, PYTORCH_PRETRAINED_BERT_CACHE, PYTORCH_TRANSFORMERS_CACHE, S3_BUCKET_PREFIX, SENTENCEPIECE_UNDERLINE, SPIECE_UNDERLINE, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, TORCH_FX_REQUIRED_VERSION, TRANSFORMERS_CACHE, TRANSFORMERS_DYNAMIC_MODULE_NAME, USE_JAX, USE_TF, USE_TORCH, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ContextManagers, DummyObject, EntryNotFoundError, ExplicitEnum, ModelOutput, PaddingStrategy, PushToHubMixin, RepositoryNotFoundError, RevisionNotFoundError, TensorType, _LazyModule, add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, cached_property, copy_func, default_cache_path, define_sagemaker_information, get_cached_models, get_file_from_repo, get_full_repo_name, get_torch_version, has_file, http_user_agent, is_apex_available, is_bsa_available, is_coloredlogs_available, is_datasets_available, is_detectrona_available, is_faiss_available, is_flax_available, is_ftfy_available, is_in_notebook, is_ipex_available, is_librosa_available, is_offline_mode, is_onnx_available, is_pandas_available, is_phonemizer_available, is_protobuf_available, is_psutil_available, is_pyanvml_available, is_pyctcdecode_available, is_pytesseract_available, is_pytorch_quantization_available, is_rjieba_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_scipy_available, is_sentencepiece_available, is_seqio_available, is_sklearn_available, is_soundfile_availble, is_spacy_available, is_speech_available, is_tensor, is_tensorflow_probability_available, is_tfaonnx_available, is_tf_available, is_timm_available, is_tokenizers_available, is_torch_available, is_torch_bfaa_available, is_torch_cuda_available, is_torch_fx_available, is_torch_fx_proxy, is_torch_mps_available, is_torch_tfaa_available, is_torch_tpu_available, is_torchaudio_available, is_training_run_on_sagemaker, is_vision_available, replace_return_docstrings, requires_backends, to_numpy, to_py_obj, torch_only_method, )
90
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase__ = { '''configuration_lxmert''': ['''LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LxmertConfig'''], '''tokenization_lxmert''': ['''LxmertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['''LxmertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''LxmertEncoder''', '''LxmertForPreTraining''', '''LxmertForQuestionAnswering''', '''LxmertModel''', '''LxmertPreTrainedModel''', '''LxmertVisualFeatureEncoder''', '''LxmertXLayer''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFLxmertForPreTraining''', '''TFLxmertMainLayer''', '''TFLxmertModel''', '''TFLxmertPreTrainedModel''', '''TFLxmertVisualFeatureEncoder''', ] if TYPE_CHECKING: from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig from .tokenization_lxmert import LxmertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_lxmert_fast import LxmertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_lxmert import ( LxmertEncoder, LxmertForPreTraining, LxmertForQuestionAnswering, LxmertModel, LxmertPreTrainedModel, LxmertVisualFeatureEncoder, LxmertXLayer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_lxmert import ( TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFLxmertForPreTraining, TFLxmertMainLayer, TFLxmertModel, TFLxmertPreTrainedModel, TFLxmertVisualFeatureEncoder, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
72
0
import pprint import requests a__: Optional[int] = 'https://zenquotes.io/api' def UpperCamelCase__( )->str: return requests.get(API_ENDPOINT_URL + '''/today''' ).json() def UpperCamelCase__( )->List[Any]: return requests.get(API_ENDPOINT_URL + '''/random''' ).json() if __name__ == "__main__": a__: Optional[Any] = random_quotes() pprint.pprint(response)
193
"""simple docstring""" def snake_case_ ( A_ : list[list[float]] ): '''simple docstring''' _lowerCamelCase : list[list[float]] = [] for data in source_data: for i, el in enumerate(A_ ): if len(A_ ) < i + 1: data_lists.append([] ) data_lists[i].append(float(A_ ) ) return data_lists def snake_case_ ( A_ : list[list[float]], A_ : list[int] ): '''simple docstring''' _lowerCamelCase : list[list[float]] = [] for dlist, weight in zip(A_, A_ ): _lowerCamelCase : Any = min(A_ ) _lowerCamelCase : Optional[Any] = max(A_ ) _lowerCamelCase : list[float] = [] # for weight 0 score is 1 - actual score if weight == 0: for item in dlist: try: score.append(1 - ((item - mind) / (maxd - mind)) ) except ZeroDivisionError: score.append(1 ) elif weight == 1: for item in dlist: try: score.append((item - mind) / (maxd - mind) ) except ZeroDivisionError: score.append(0 ) # weight not 0 or 1 else: _lowerCamelCase : str = F'''Invalid weight of {weight:f} provided''' raise ValueError(A_ ) score_lists.append(A_ ) return score_lists def snake_case_ ( A_ : list[list[float]] ): '''simple docstring''' _lowerCamelCase : list[float] = [0 for i in range(len(score_lists[0] ) )] for slist in score_lists: for j, ele in enumerate(A_ ): _lowerCamelCase : List[str] = final_scores[j] + ele return final_scores def snake_case_ ( A_ : list[list[float]], A_ : list[int] ): '''simple docstring''' _lowerCamelCase : Tuple = get_data(A_ ) _lowerCamelCase : Optional[Any] = calculate_each_score(A_, A_ ) _lowerCamelCase : str = generate_final_scores(A_ ) # append scores to source data for i, ele in enumerate(A_ ): source_data[i].append(A_ ) return source_data
72
0
import torch from torch import nn from transformers import CLIPPreTrainedModel, CLIPVisionModel from ...models.attention import BasicTransformerBlock from ...utils import logging _a = logging.get_logger(__name__) # pylint: disable=invalid-name class A_ ( _lowercase ): def __init__( self : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : str=7_6_8 ) -> Tuple: super().__init__(__lowerCAmelCase ) __lowerCAmelCase: Optional[int] = proj_size __lowerCAmelCase: Optional[int] = CLIPVisionModel(__lowerCAmelCase ) __lowerCAmelCase: Tuple = PaintByExampleMapper(__lowerCAmelCase ) __lowerCAmelCase: Optional[Any] = nn.LayerNorm(config.hidden_size ) __lowerCAmelCase: Any = nn.Linear(config.hidden_size , self.proj_size ) # uncondition for scaling __lowerCAmelCase: int = nn.Parameter(torch.randn((1, 1, self.proj_size) ) ) def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Tuple=False ) -> Dict: __lowerCAmelCase: int = self.model(pixel_values=__lowerCAmelCase ) __lowerCAmelCase: int = clip_output.pooler_output __lowerCAmelCase: Optional[Any] = self.mapper(latent_states[:, None] ) __lowerCAmelCase: List[str] = self.final_layer_norm(__lowerCAmelCase ) __lowerCAmelCase: Optional[int] = self.proj_out(__lowerCAmelCase ) if return_uncond_vector: return latent_states, self.uncond_vector return latent_states class A_ ( nn.Module ): def __init__( self : int , UpperCAmelCase : Union[str, Any] ) -> Optional[int]: super().__init__() __lowerCAmelCase: Optional[Any] = (config.num_hidden_layers + 1) // 5 __lowerCAmelCase: Optional[Any] = config.hidden_size __lowerCAmelCase: List[str] = 1 __lowerCAmelCase: Union[str, Any] = nn.ModuleList( [ BasicTransformerBlock(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , activation_fn='gelu' , attention_bias=__lowerCAmelCase ) for _ in range(__lowerCAmelCase ) ] ) def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : Optional[int] ) -> Union[str, Any]: for block in self.blocks: __lowerCAmelCase: Union[str, Any] = block(__lowerCAmelCase ) return hidden_states
322
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''microsoft/unispeech-large-1500h-cv''': ( '''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json''' ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech } class __snake_case ( _lowercase): snake_case__ : List[str] = "unispeech" def __init__( self : List[str] , __lowerCAmelCase : List[Any]=3_2 , __lowerCAmelCase : str=7_6_8 , __lowerCAmelCase : int=1_2 , __lowerCAmelCase : int=1_2 , __lowerCAmelCase : int=3_0_7_2 , __lowerCAmelCase : Tuple="gelu" , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : Tuple=0.0 , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : Dict=1E-5 , __lowerCAmelCase : Optional[int]="group" , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : int=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , __lowerCAmelCase : Optional[int]=(5, 2, 2, 2, 2, 2, 2) , __lowerCAmelCase : Union[str, Any]=(1_0, 3, 3, 3, 3, 2, 2) , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : List[str]=1_2_8 , __lowerCAmelCase : Any=1_6 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Union[str, Any]=0.05 , __lowerCAmelCase : Union[str, Any]=1_0 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Dict=0.0 , __lowerCAmelCase : Optional[int]=1_0 , __lowerCAmelCase : Dict=0 , __lowerCAmelCase : List[str]=3_2_0 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Tuple=1_0_0 , __lowerCAmelCase : Dict=2_5_6 , __lowerCAmelCase : str=2_5_6 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Dict="mean" , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Optional[Any]=2_5_6 , __lowerCAmelCase : Dict=8_0 , __lowerCAmelCase : int=0 , __lowerCAmelCase : Optional[int]=1 , __lowerCAmelCase : Dict=2 , __lowerCAmelCase : Any=0.5 , **__lowerCAmelCase : Optional[Any] , ): """simple docstring""" super().__init__(**__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase ) _lowerCamelCase : Dict = hidden_size _lowerCamelCase : Any = feat_extract_norm _lowerCamelCase : List[Any] = feat_extract_activation _lowerCamelCase : Any = list(__lowerCAmelCase ) _lowerCamelCase : Tuple = list(__lowerCAmelCase ) _lowerCamelCase : int = list(__lowerCAmelCase ) _lowerCamelCase : List[str] = conv_bias _lowerCamelCase : List[str] = num_conv_pos_embeddings _lowerCamelCase : Tuple = num_conv_pos_embedding_groups _lowerCamelCase : List[str] = len(self.conv_dim ) _lowerCamelCase : Tuple = num_hidden_layers _lowerCamelCase : List[Any] = intermediate_size _lowerCamelCase : Dict = hidden_act _lowerCamelCase : Union[str, Any] = num_attention_heads _lowerCamelCase : Tuple = hidden_dropout _lowerCamelCase : List[Any] = attention_dropout _lowerCamelCase : Optional[int] = activation_dropout _lowerCamelCase : Optional[Any] = feat_proj_dropout _lowerCamelCase : Optional[int] = final_dropout _lowerCamelCase : Any = layerdrop _lowerCamelCase : Any = layer_norm_eps _lowerCamelCase : List[Any] = initializer_range _lowerCamelCase : List[str] = num_ctc_classes _lowerCamelCase : List[Any] = vocab_size _lowerCamelCase : Optional[Any] = do_stable_layer_norm _lowerCamelCase : Tuple = use_weighted_layer_sum _lowerCamelCase : List[Any] = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _lowerCamelCase : Any = apply_spec_augment _lowerCamelCase : Dict = mask_time_prob _lowerCamelCase : List[str] = mask_time_length _lowerCamelCase : Optional[Any] = mask_time_min_masks _lowerCamelCase : List[str] = mask_feature_prob _lowerCamelCase : int = mask_feature_length _lowerCamelCase : Dict = mask_feature_min_masks # parameters for pretraining with codevector quantized representations _lowerCamelCase : Optional[Any] = num_codevectors_per_group _lowerCamelCase : int = num_codevector_groups _lowerCamelCase : List[Any] = contrastive_logits_temperature _lowerCamelCase : List[str] = feat_quantizer_dropout _lowerCamelCase : Dict = num_negatives _lowerCamelCase : Optional[int] = codevector_dim _lowerCamelCase : List[Any] = proj_codevector_dim _lowerCamelCase : List[Any] = diversity_loss_weight # ctc loss _lowerCamelCase : Union[str, Any] = ctc_loss_reduction _lowerCamelCase : Any = ctc_zero_infinity # pretraining loss _lowerCamelCase : str = replace_prob @property def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
72
0
"""simple docstring""" import os import string import sys __UpperCamelCase : Optional[Any] = 1 << 8 __UpperCamelCase : Dict = { '''tab''': ord('''\t'''), '''newline''': ord('''\r'''), '''esc''': 2_7, '''up''': 6_5 + ARROW_KEY_FLAG, '''down''': 6_6 + ARROW_KEY_FLAG, '''right''': 6_7 + ARROW_KEY_FLAG, '''left''': 6_8 + ARROW_KEY_FLAG, '''mod_int''': 9_1, '''undefined''': sys.maxsize, '''interrupt''': 3, '''insert''': 5_0, '''delete''': 5_1, '''pg_up''': 5_3, '''pg_down''': 5_4, } __UpperCamelCase : Tuple = KEYMAP['''up'''] __UpperCamelCase : List[str] = KEYMAP['''left'''] if sys.platform == "win32": __UpperCamelCase : Optional[int] = [] __UpperCamelCase : Optional[int] = { b'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG, b'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG, b'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG, b'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG, b'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG, b'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG, b'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG, b'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG, } for i in range(1_0): __UpperCamelCase : List[str] = ord(str(i)) def __SCREAMING_SNAKE_CASE ( ): if os.name == "nt": import msvcrt lowerCAmelCase__ : str = '''mbcs''' # Flush the keyboard buffer while msvcrt.kbhit(): msvcrt.getch() if len(A_ ) == 0: # Read the keystroke lowerCAmelCase__ : Optional[Any] = msvcrt.getch() # If it is a prefix char, get second part if ch in (b"\x00", b"\xe0"): lowerCAmelCase__ : Optional[int] = ch + msvcrt.getch() # Translate actual Win chars to bullet char types try: lowerCAmelCase__ : str = chr(WIN_KEYMAP[cha] ) WIN_CH_BUFFER.append(chr(KEYMAP['''mod_int'''] ) ) WIN_CH_BUFFER.append(A_ ) if ord(A_ ) in ( KEYMAP["insert"] - 1 << 9, KEYMAP["delete"] - 1 << 9, KEYMAP["pg_up"] - 1 << 9, KEYMAP["pg_down"] - 1 << 9, ): WIN_CH_BUFFER.append(chr(1_26 ) ) lowerCAmelCase__ : str = chr(KEYMAP['''esc'''] ) except KeyError: lowerCAmelCase__ : List[Any] = cha[1] else: lowerCAmelCase__ : int = ch.decode(A_ ) else: lowerCAmelCase__ : Optional[Any] = WIN_CH_BUFFER.pop(0 ) elif os.name == "posix": import termios import tty lowerCAmelCase__ : Optional[Any] = sys.stdin.fileno() lowerCAmelCase__ : List[Any] = termios.tcgetattr(A_ ) try: tty.setraw(A_ ) lowerCAmelCase__ : Tuple = sys.stdin.read(1 ) finally: termios.tcsetattr(A_ , termios.TCSADRAIN , A_ ) return ch def __SCREAMING_SNAKE_CASE ( ): lowerCAmelCase__ : Tuple = get_raw_chars() if ord(A_ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]: return char elif ord(A_ ) == KEYMAP["esc"]: lowerCAmelCase__ : Optional[int] = get_raw_chars() if ord(A_ ) == KEYMAP["mod_int"]: lowerCAmelCase__ : Dict = get_raw_chars() if ord(A_ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(A_ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG: return chr(ord(A_ ) + ARROW_KEY_FLAG ) else: return KEYMAP["undefined"] else: return get_raw_chars() else: if char in string.printable: return char else: return KEYMAP["undefined"]
106
"""simple docstring""" from typing import Optional from urllib.parse import quote import huggingface_hub as hfh from packaging import version def snake_case_ ( A_ : str, A_ : str, A_ : Optional[str] = None ): '''simple docstring''' if version.parse(hfh.__version__ ).release < version.parse('''0.11.0''' ).release: # old versions of hfh don't url-encode the file path _lowerCamelCase : Optional[Any] = quote(A_ ) return hfh.hf_hub_url(A_, A_, repo_type='''dataset''', revision=A_ )
72
0
'''simple docstring''' from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError import requests def _SCREAMING_SNAKE_CASE ( UpperCamelCase = "isbn/0140328726" ): """simple docstring""" lowerCAmelCase__ : List[str] = olid.strip().strip("""/""" ) # Remove leading/trailing whitespace & slashes if new_olid.count("""/""" ) != 1: lowerCAmelCase__ : Tuple = f"""{olid} is not a valid Open Library olid""" raise ValueError(A_ ) return requests.get(f"""https://openlibrary.org/{new_olid}.json""" ).json() def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : List[str] = { '''title''': '''Title''', '''publish_date''': '''Publish date''', '''authors''': '''Authors''', '''number_of_pages''': '''Number of pages:''', '''first_sentence''': '''First sentence''', '''isbn_10''': '''ISBN (10)''', '''isbn_13''': '''ISBN (13)''', } lowerCAmelCase__ : Union[str, Any] = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()} lowerCAmelCase__ : Dict = [ get_openlibrary_data(author["""key"""] )['''name'''] for author in data['''Authors'''] ] lowerCAmelCase__ : List[str] = data['''First sentence''']['''value'''] for key, value in data.items(): if isinstance(A_ , A_ ): lowerCAmelCase__ : Any = ''', '''.join(A_ ) return data if __name__ == "__main__": import doctest doctest.testmod() while True: _lowerCAmelCase = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip() if isbn.lower() in ("", "q", "quit", "exit", "stop"): break if len(isbn) not in (10, 13) or not isbn.isdigit(): print(F"""Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.""") continue print(F"""\nSearching Open Library for ISBN: {isbn}...\n""") try: _lowerCAmelCase = summarize_book(get_openlibrary_data(F"""isbn/{isbn}""")) print('''\n'''.join(F"""{key}: {value}""" for key, value in book_summary.items())) except JSONDecodeError: # Workaround for requests.exceptions.RequestException: print(F"""Sorry, there are no results for ISBN: {isbn}.""")
37
"""simple docstring""" import unittest import numpy as np def snake_case_ ( A_ : np.ndarray, A_ : np.ndarray, A_ : np.ndarray, A_ : np.ndarray | None = None, ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = np.shape(A_ ) _lowerCamelCase : List[str] = np.shape(A_ ) _lowerCamelCase : List[str] = np.shape(A_ ) if shape_a[0] != shape_b[0]: _lowerCamelCase : Tuple = ( '''Expected the same number of rows for A and B. ''' F'''Instead found A of size {shape_a} and B of size {shape_b}''' ) raise ValueError(A_ ) if shape_b[1] != shape_c[1]: _lowerCamelCase : Tuple = ( '''Expected the same number of columns for B and C. ''' F'''Instead found B of size {shape_b} and C of size {shape_c}''' ) raise ValueError(A_ ) _lowerCamelCase : List[str] = pseudo_inv if a_inv is None: try: _lowerCamelCase : Any = np.linalg.inv(A_ ) except np.linalg.LinAlgError: raise ValueError( '''Input matrix A is not invertible. Cannot compute Schur complement.''' ) return mat_c - mat_b.T @ a_inv @ mat_b class __snake_case ( unittest.TestCase): def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _lowerCamelCase : List[str] = np.array([[0, 3], [3, 0], [2, 3]] ) _lowerCamelCase : List[str] = np.array([[2, 1], [6, 3]] ) _lowerCamelCase : List[Any] = schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) _lowerCamelCase : Dict = np.block([[a, b], [b.T, c]] ) _lowerCamelCase : Tuple = np.linalg.det(__lowerCAmelCase ) _lowerCamelCase : List[str] = np.linalg.det(__lowerCAmelCase ) _lowerCamelCase : Any = np.linalg.det(__lowerCAmelCase ) self.assertAlmostEqual(__lowerCAmelCase , det_a * det_s ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" _lowerCamelCase : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _lowerCamelCase : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] ) _lowerCamelCase : int = np.array([[2, 1], [6, 3]] ) with self.assertRaises(__lowerCAmelCase ): schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _lowerCamelCase : List[str] = np.array([[0, 3], [3, 0], [2, 3]] ) _lowerCamelCase : Union[str, Any] = np.array([[2, 1, 3], [6, 3, 5]] ) with self.assertRaises(__lowerCAmelCase ): schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
72
0
from collections import OrderedDict from typing import List, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCamelCase : List[Any] = logging.get_logger(__name__) _lowerCamelCase : int = { """google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""", } class UpperCamelCase_ ( _lowercase ): '''simple docstring''' UpperCAmelCase__ = "efficientnet" def __init__( self : Optional[Any] , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 600 , UpperCAmelCase__ : float = 2.0 , UpperCAmelCase__ : float = 3.1 , UpperCAmelCase__ : int = 8 , UpperCAmelCase__ : List[int] = [3, 3, 5, 3, 5, 5, 3] , UpperCAmelCase__ : List[int] = [32, 16, 24, 40, 80, 112, 192] , UpperCAmelCase__ : List[int] = [16, 24, 40, 80, 112, 192, 320] , UpperCAmelCase__ : List[int] = [] , UpperCAmelCase__ : List[int] = [1, 2, 2, 2, 1, 2, 1] , UpperCAmelCase__ : List[int] = [1, 2, 2, 3, 3, 4, 1] , UpperCAmelCase__ : List[int] = [1, 6, 6, 6, 6, 6, 6] , UpperCAmelCase__ : float = 0.25 , UpperCAmelCase__ : str = "swish" , UpperCAmelCase__ : int = 2_560 , UpperCAmelCase__ : str = "mean" , UpperCAmelCase__ : float = 0.02 , UpperCAmelCase__ : float = 0.001 , UpperCAmelCase__ : float = 0.99 , UpperCAmelCase__ : float = 0.5 , UpperCAmelCase__ : float = 0.2 , **UpperCAmelCase__ : str , ) ->str: '''simple docstring''' super().__init__(**__lowerCAmelCase) A__ = num_channels A__ = image_size A__ = width_coefficient A__ = depth_coefficient A__ = depth_divisor A__ = kernel_sizes A__ = in_channels A__ = out_channels A__ = depthwise_padding A__ = strides A__ = num_block_repeats A__ = expand_ratios A__ = squeeze_expansion_ratio A__ = hidden_act A__ = hidden_dim A__ = pooling_type A__ = initializer_range A__ = batch_norm_eps A__ = batch_norm_momentum A__ = dropout_rate A__ = drop_connect_rate A__ = sum(__lowerCAmelCase) * 4 class UpperCamelCase_ ( _lowercase ): '''simple docstring''' UpperCAmelCase__ = version.parse('''1.11''' ) @property def SCREAMING_SNAKE_CASE ( self : Dict) ->int: '''simple docstring''' return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ]) @property def SCREAMING_SNAKE_CASE ( self : int) ->Union[str, Any]: '''simple docstring''' return 1e-5
14
"""simple docstring""" def snake_case_ ( A_ : list[int], A_ : str ): '''simple docstring''' _lowerCamelCase : Tuple = int(A_ ) # Initialize Result _lowerCamelCase : Dict = [] # Traverse through all denomination for denomination in reversed(A_ ): # Find denominations while int(A_ ) >= int(A_ ): total_value -= int(A_ ) answer.append(A_ ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": lowerCAmelCase__ = [] lowerCAmelCase__ = '''0''' if ( input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower() == "y" ): lowerCAmelCase__ = int(input('''Enter the number of denominations you want to add: ''').strip()) for i in range(0, n): denominations.append(int(input(F"""Denomination {i}: """).strip())) lowerCAmelCase__ = input('''Enter the change you want to make in Indian Currency: ''').strip() else: # All denominations of Indian Currency if user does not enter lowerCAmelCase__ = [1, 2, 5, 10, 20, 50, 100, 500, 2000] lowerCAmelCase__ = input('''Enter the change you want to make: ''').strip() if int(value) == 0 or int(value) < 0: print('''The total value cannot be zero or negative.''') else: print(F"""Following is minimal change for {value}: """) lowerCAmelCase__ = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=''' ''')
72
0
'''simple docstring''' import unittest from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline from transformers.convert_graph_to_onnx import ( convert, ensure_valid_input, generate_identified_filename, infer_shapes, quantize, ) from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow class lowerCAmelCase : def snake_case ( self : Any , __lowercase : Union[str, Any] , __lowercase : Optional[int] , __lowercase : Optional[int] ): """simple docstring""" return None class lowerCAmelCase : def snake_case ( self : str , __lowercase : Tuple , __lowercase : int , __lowercase : Any , __lowercase : int ): """simple docstring""" return None class lowerCAmelCase ( unittest.TestCase ): lowerCAmelCase_ = [ # (model_name, model_kwargs) ("bert-base-cased", {}), ("gpt2", {"use_cache": False}), # We don't support exporting GPT2 past keys anymore ] @require_tf @slow def snake_case ( self : Optional[int] ): """simple docstring""" for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__lowerCAmelCase , 'tf' , 12 , **__lowerCAmelCase ) @require_torch @slow def snake_case ( self : Optional[int] ): """simple docstring""" for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__lowerCAmelCase , 'pt' , 12 , **__lowerCAmelCase ) @require_torch @slow def snake_case ( self : int ): """simple docstring""" from transformers import BertModel __lowercase =['''[UNK]''', '''[SEP]''', '''[CLS]''', '''[PAD]''', '''[MASK]''', '''some''', '''other''', '''words'''] with NamedTemporaryFile(mode='w+t' ) as vocab_file: vocab_file.write('\n'.join(__lowerCAmelCase ) ) vocab_file.flush() __lowercase =BertTokenizerFast(vocab_file.name ) with TemporaryDirectory() as bert_save_dir: __lowercase =BertModel(BertConfig(vocab_size=len(__lowerCAmelCase ) ) ) model.save_pretrained(__lowerCAmelCase ) self._test_export(__lowerCAmelCase , 'pt' , 12 , __lowerCAmelCase ) @require_tf @slow def snake_case ( self : Any ): """simple docstring""" for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: __lowercase =self._test_export(__lowerCAmelCase , 'tf' , 12 , **__lowerCAmelCase ) __lowercase =quantize(Path(__lowerCAmelCase ) ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__lowerCAmelCase ).stat().st_size: self.fail('Quantized model is bigger than initial ONNX model' ) @require_torch @slow def snake_case ( self : List[str] ): """simple docstring""" for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: __lowercase =self._test_export(__lowerCAmelCase , 'pt' , 12 , **__lowerCAmelCase ) __lowercase =quantize(__lowerCAmelCase ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__lowerCAmelCase ).stat().st_size: self.fail('Quantized model is bigger than initial ONNX model' ) def snake_case ( self : Optional[int] , __lowercase : Optional[Any] , __lowercase : str , __lowercase : Dict , __lowercase : List[Any]=None , **__lowercase : int ): """simple docstring""" try: # Compute path with TemporaryDirectory() as tempdir: __lowercase =Path(__lowerCAmelCase ).joinpath('model.onnx' ) # Remove folder if exists if path.parent.exists(): path.parent.rmdir() # Export convert(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) return path except Exception as e: self.fail(__lowerCAmelCase ) @require_torch @require_tokenizers @slow def snake_case ( self : Optional[Any] ): """simple docstring""" from transformers import BertModel __lowercase =BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) ) __lowercase =BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' ) self._test_infer_dynamic_axis(__lowerCAmelCase , __lowerCAmelCase , 'pt' ) @require_tf @require_tokenizers @slow def snake_case ( self : Any ): """simple docstring""" from transformers import TFBertModel __lowercase =TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) ) __lowercase =BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' ) self._test_infer_dynamic_axis(__lowerCAmelCase , __lowerCAmelCase , 'tf' ) def snake_case ( self : str , __lowercase : Dict , __lowercase : int , __lowercase : List[Any] ): """simple docstring""" __lowercase =FeatureExtractionPipeline(__lowerCAmelCase , __lowerCAmelCase ) __lowercase =['''input_ids''', '''token_type_ids''', '''attention_mask''', '''output_0''', '''output_1'''] __lowercase =infer_shapes(__lowerCAmelCase , __lowerCAmelCase ) # Assert all variables are present self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) ) self.assertTrue(all(var_name in shapes for var_name in variable_names ) ) self.assertSequenceEqual(variable_names[:3] , __lowerCAmelCase ) self.assertSequenceEqual(variable_names[3:] , __lowerCAmelCase ) # Assert inputs are {0: batch, 1: sequence} for var_name in ["input_ids", "token_type_ids", "attention_mask"]: self.assertDictEqual(shapes[var_name] , {0: 'batch', 1: 'sequence'} ) # Assert outputs are {0: batch, 1: sequence} and {0: batch} self.assertDictEqual(shapes['output_0'] , {0: 'batch', 1: 'sequence'} ) self.assertDictEqual(shapes['output_1'] , {0: 'batch'} ) def snake_case ( self : Optional[int] ): """simple docstring""" __lowercase =['''input_ids''', '''attention_mask''', '''token_type_ids'''] __lowercase ={'''input_ids''': [1, 2, 3, 4], '''attention_mask''': [0, 0, 0, 0], '''token_type_ids''': [1, 1, 1, 1]} __lowercase =ensure_valid_input(FuncContiguousArgs() , __lowerCAmelCase , __lowerCAmelCase ) # Should have exactly the same number of args (all are valid) self.assertEqual(len(__lowerCAmelCase ) , 3 ) # Should have exactly the same input names self.assertEqual(set(__lowerCAmelCase ) , set(__lowerCAmelCase ) ) # Parameter should be reordered according to their respective place in the function: # (input_ids, token_type_ids, attention_mask) self.assertEqual(__lowerCAmelCase , (tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) ) # Generated args are interleaved with another args (for instance parameter "past" in GPT2) __lowercase =ensure_valid_input(FuncNonContiguousArgs() , __lowerCAmelCase , __lowerCAmelCase ) # Should have exactly the one arg (all before the one not provided "some_other_args") self.assertEqual(len(__lowerCAmelCase ) , 1 ) self.assertEqual(len(__lowerCAmelCase ) , 1 ) # Should have only "input_ids" self.assertEqual(inputs_args[0] , tokens['input_ids'] ) self.assertEqual(ordered_input_names[0] , 'input_ids' ) def snake_case ( self : int ): """simple docstring""" __lowercase =generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) , '-test' ) self.assertEqual('/home/something/my_fake_model-test.onnx' , generated.as_posix() )
141
"""simple docstring""" def snake_case_ ( A_ : int = 2_00_00_00 ): '''simple docstring''' _lowerCamelCase : int = [0 for i in range(n + 1 )] _lowerCamelCase : List[str] = 1 _lowerCamelCase : Any = 1 for i in range(2, int(n**0.5 ) + 1 ): if primality_list[i] == 0: for j in range(i * i, n + 1, A_ ): _lowerCamelCase : str = 1 _lowerCamelCase : Tuple = 0 for i in range(A_ ): if primality_list[i] == 0: sum_of_primes += i return sum_of_primes if __name__ == "__main__": print(F"""{solution() = }""")
72
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = { '''microsoft/swin-tiny-patch4-window7-224''': ( '''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json''' ), # See all Swin models at https://huggingface.co/models?filter=swin } class lowerCamelCase_ ( _lowercase , _lowercase ): lowerCAmelCase__ = "swin" lowerCAmelCase__ = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self : int , _A : List[Any]=224 , _A : Dict=4 , _A : Optional[int]=3 , _A : Optional[int]=96 , _A : List[str]=[2, 2, 6, 2] , _A : int=[3, 6, 12, 24] , _A : Optional[Any]=7 , _A : List[str]=4.0 , _A : int=True , _A : List[str]=0.0 , _A : Union[str, Any]=0.0 , _A : Union[str, Any]=0.1 , _A : Union[str, Any]="gelu" , _A : Union[str, Any]=False , _A : Optional[int]=0.0_2 , _A : int=1e-5 , _A : Optional[int]=32 , _A : int=None , _A : int=None , **_A : str , ): '''simple docstring''' super().__init__(**__lowerCAmelCase ) UpperCAmelCase__ : Dict = image_size UpperCAmelCase__ : Any = patch_size UpperCAmelCase__ : Dict = num_channels UpperCAmelCase__ : Optional[int] = embed_dim UpperCAmelCase__ : Optional[int] = depths UpperCAmelCase__ : Union[str, Any] = len(__lowerCAmelCase ) UpperCAmelCase__ : List[Any] = num_heads UpperCAmelCase__ : Union[str, Any] = window_size UpperCAmelCase__ : Dict = mlp_ratio UpperCAmelCase__ : Any = qkv_bias UpperCAmelCase__ : Any = hidden_dropout_prob UpperCAmelCase__ : Tuple = attention_probs_dropout_prob UpperCAmelCase__ : Dict = drop_path_rate UpperCAmelCase__ : Optional[int] = hidden_act UpperCAmelCase__ : Any = use_absolute_embeddings UpperCAmelCase__ : int = layer_norm_eps UpperCAmelCase__ : List[Any] = initializer_range UpperCAmelCase__ : Any = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model UpperCAmelCase__ : List[Any] = int(embed_dim * 2 ** (len(__lowerCAmelCase ) - 1) ) UpperCAmelCase__ : List[str] = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(__lowerCAmelCase ) + 1 )] UpperCAmelCase__ : str = get_aligned_output_features_output_indices( out_features=__lowerCAmelCase , out_indices=__lowerCAmelCase , stage_names=self.stage_names ) class lowerCamelCase_ ( _lowercase ): lowerCAmelCase__ = version.parse('1.11' ) @property def lowercase_ ( self : List[Any] ): '''simple docstring''' return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def lowercase_ ( self : Any ): '''simple docstring''' return 1e-4
181
"""simple docstring""" import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def snake_case_ ( A_ : Any ): '''simple docstring''' _lowerCamelCase : Any = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''_float_tensor''', '''decoder.output_projection.weight''', ] for k in ignore_keys: state_dict.pop(A_, A_ ) def snake_case_ ( A_ : Union[str, Any] ): '''simple docstring''' _lowerCamelCase , _lowerCamelCase : Tuple = emb.weight.shape _lowerCamelCase : Dict = nn.Linear(A_, A_, bias=A_ ) _lowerCamelCase : str = emb.weight.data return lin_layer def snake_case_ ( A_ : str, A_ : Optional[int]="facebook/mbart-large-en-ro", A_ : Union[str, Any]=False, A_ : List[str]=False ): '''simple docstring''' _lowerCamelCase : Tuple = torch.load(A_, map_location='''cpu''' )['''model'''] remove_ignore_keys_(A_ ) _lowerCamelCase : int = state_dict['''encoder.embed_tokens.weight'''].shape[0] _lowerCamelCase : Any = MBartConfig.from_pretrained(A_, vocab_size=A_ ) if mbart_aa and finetuned: _lowerCamelCase : Any = '''relu''' _lowerCamelCase : Optional[int] = state_dict['''decoder.embed_tokens.weight'''] _lowerCamelCase : Any = MBartForConditionalGeneration(A_ ) model.model.load_state_dict(A_ ) if finetuned: _lowerCamelCase : str = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.''' ) parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--hf_config''', default='''facebook/mbart-large-cc25''', type=str, help='''Which huggingface architecture to use: mbart-large''', ) parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''') parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''') lowerCAmelCase__ = parser.parse_args() lowerCAmelCase__ = convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
72
0
'''simple docstring''' import os import sys import unittest lowerCAmelCase : str =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path lowerCAmelCase : Optional[int] =os.path.join(git_repo_path, '''src''', '''transformers''') lowerCAmelCase : Union[str, Any] =''' {0} = None ''' lowerCAmelCase : List[str] =''' class {0}(metaclass=DummyObject): _backends = {1} def __init__(self, *args, **kwargs): requires_backends(self, {1}) ''' lowerCAmelCase : Tuple =''' def {0}(*args, **kwargs): requires_backends({0}, {1}) ''' class a_ ( unittest.TestCase ): def lowercase__ ( self : List[str] ): """simple docstring""" lowercase_ :Any = find_backend(" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")" ) self.assertIsNone(__lowerCAmelCase ) lowercase_ :Optional[Any] = find_backend(" if not is_tokenizers_available():" ) self.assertEqual(__lowerCAmelCase , "tokenizers" ) lowercase_ :Dict = find_backend(" if not is_tensorflow_text_available():" ) self.assertEqual(__lowerCAmelCase , "tensorflow_text" ) lowercase_ :Optional[Any] = find_backend(" if not (is_sentencepiece_available() and is_tokenizers_available()):" ) self.assertEqual(__lowerCAmelCase , "sentencepiece_and_tokenizers" ) lowercase_ :Dict = find_backend( " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" ) self.assertEqual(__lowerCAmelCase , "sentencepiece_and_tensorflow_text" ) lowercase_ :Tuple = find_backend( " if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):" ) self.assertEqual(__lowerCAmelCase , "sentencepiece_and_tokenizers_and_vision" ) def lowercase__ ( self : str ): """simple docstring""" lowercase_ :Union[str, Any] = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("torch" , __lowerCAmelCase ) self.assertIn("tensorflow_text" , __lowerCAmelCase ) self.assertIn("sentencepiece_and_tokenizers" , __lowerCAmelCase ) # Likewise, we can't assert on the exact content of a key self.assertIn("BertModel" , objects["torch"] ) self.assertIn("TFBertModel" , objects["tf"] ) self.assertIn("FlaxBertModel" , objects["flax"] ) self.assertIn("BertModel" , objects["torch"] ) self.assertIn("TFBertTokenizer" , objects["tensorflow_text"] ) self.assertIn("convert_slow_tokenizer" , objects["sentencepiece_and_tokenizers"] ) def lowercase__ ( self : Optional[Any] ): """simple docstring""" lowercase_ :Union[str, Any] = create_dummy_object("CONSTANT" , "\'torch\'" ) self.assertEqual(__lowerCAmelCase , "\nCONSTANT = None\n" ) lowercase_ :List[str] = create_dummy_object("function" , "\'torch\'" ) self.assertEqual( __lowerCAmelCase , "\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n" ) lowercase_ :List[Any] = ''' class FakeClass(metaclass=DummyObject): _backends = \'torch\' def __init__(self, *args, **kwargs): requires_backends(self, \'torch\') ''' lowercase_ :Tuple = create_dummy_object("FakeClass" , "\'torch\'" ) self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) def lowercase__ ( self : List[Any] ): """simple docstring""" lowercase_ :List[Any] = '''# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends CONSTANT = None def function(*args, **kwargs): requires_backends(function, ["torch"]) class FakeClass(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) ''' lowercase_ :str = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} ) self.assertEqual(dummy_files["torch"] , __lowerCAmelCase )
223
"""simple docstring""" def snake_case_ ( A_ : list[list] ): '''simple docstring''' _lowerCamelCase : Optional[int] = current_set.copy() for row_index, row in enumerate(A_ ): _lowerCamelCase : Tuple = row[0] for column_index, column in enumerate(A_ ): if magnitude == 0: _lowerCamelCase : List[Any] = column continue _lowerCamelCase : List[Any] = column / magnitude # Subtract to cancel term _lowerCamelCase : Union[str, Any] = current_set[0] _lowerCamelCase : Dict = [first_row] _lowerCamelCase : str = current_set[1::] for row in current_set: _lowerCamelCase : Union[str, Any] = [] # If first term is 0, it is already in form we want, so we preserve it if row[0] == 0: final_set.append(A_ ) continue for column_index in range(len(A_ ) ): temp_row.append(first_row[column_index] - row[column_index] ) final_set.append(A_ ) # Create next recursion iteration set if len(final_set[0] ) != 3: _lowerCamelCase : Any = final_set[0] _lowerCamelCase : Any = [] _lowerCamelCase : Optional[int] = [] for row in final_set[1::]: current_first_column.append(row[0] ) next_iteration.append(row[1::] ) _lowerCamelCase : Dict = simplify(A_ ) for i in range(len(A_ ) ): resultant[i].insert(0, current_first_column[i] ) resultant.insert(0, A_ ) _lowerCamelCase : Tuple = resultant return final_set def snake_case_ ( A_ : list[list] ): '''simple docstring''' if len(A_ ) == 0: raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) _lowerCamelCase : Dict = len(A_ ) + 1 if any(len(A_ ) != _length for item in equations ): raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) for row in equations: if any(not isinstance(A_, (int, float) ) for column in row ): raise ValueError('''solve_simultaneous() requires lists of integers''' ) if len(A_ ) == 1: return [equations[0][-1] / equations[0][0]] _lowerCamelCase : Optional[Any] = equations.copy() if any(0 in row for row in data_set ): _lowerCamelCase : str = data_set.copy() _lowerCamelCase : List[Any] = [] for row_index, row in enumerate(A_ ): if 0 not in row: _lowerCamelCase : Union[str, Any] = data_set.pop(A_ ) break if not full_row: raise ValueError('''solve_simultaneous() requires at least 1 full equation''' ) data_set.insert(0, A_ ) _lowerCamelCase : List[str] = data_set.copy() _lowerCamelCase : int = simplify(A_ ) _lowerCamelCase : int = simplified[::-1] _lowerCamelCase : list = [] for row in simplified: _lowerCamelCase : Tuple = row[-1] if not solutions: if row[-2] == 0: solutions.append(0 ) continue solutions.append(current_solution / row[-2] ) continue _lowerCamelCase : Optional[Any] = row.copy()[: len(A_ ) - 1 :] while temp_row[0] == 0: temp_row.pop(0 ) if len(A_ ) == 0: solutions.append(0 ) continue _lowerCamelCase : Tuple = temp_row[1::] _lowerCamelCase : Tuple = temp_row[::-1] for column_index, column in enumerate(A_ ): current_solution -= column * solutions[column_index] solutions.append(A_ ) _lowerCamelCase : Optional[int] = [] for item in solutions: final.append(float(round(A_, 5 ) ) ) return final[::-1] if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase__ = [ [2, 1, 1, 1, 1, 4], [1, 2, 1, 1, 1, 5], [1, 1, 2, 1, 1, 6], [1, 1, 1, 2, 1, 7], [1, 1, 1, 1, 2, 8], ] print(solve_simultaneous(eq)) print(solve_simultaneous([[4, 2]]))
72
0
"""simple docstring""" from __future__ import annotations import unittest from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel @require_tf class _UpperCAmelCase : __a : Optional[int] = BlenderbotConfig __a : Optional[int] = {} __a : str = "gelu" def __init__( self , _A , _A=13 , _A=7 , _A=True , _A=False , _A=99 , _A=32 , _A=2 , _A=4 , _A=37 , _A=0.1 , _A=0.1 , _A=20 , _A=2 , _A=1 , _A=0 , ) -> List[Any]: '''simple docstring''' _UpperCAmelCase : Any = parent _UpperCAmelCase : int = batch_size _UpperCAmelCase : Union[str, Any] = seq_length _UpperCAmelCase : Optional[int] = is_training _UpperCAmelCase : Optional[Any] = use_labels _UpperCAmelCase : int = vocab_size _UpperCAmelCase : str = hidden_size _UpperCAmelCase : Any = num_hidden_layers _UpperCAmelCase : Optional[Any] = num_attention_heads _UpperCAmelCase : str = intermediate_size _UpperCAmelCase : Optional[int] = hidden_dropout_prob _UpperCAmelCase : List[str] = attention_probs_dropout_prob _UpperCAmelCase : int = max_position_embeddings _UpperCAmelCase : Any = eos_token_id _UpperCAmelCase : Any = pad_token_id _UpperCAmelCase : List[Any] = bos_token_id def __snake_case ( self ) -> Tuple: '''simple docstring''' _UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) _UpperCAmelCase : Optional[int] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) _UpperCAmelCase : Tuple = tf.concat([input_ids, eos_tensor] , axis=1 ) _UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase : Union[str, Any] = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) _UpperCAmelCase : List[str] = prepare_blenderbot_inputs_dict(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return config, inputs_dict def __snake_case ( self , _A , _A ) -> Any: '''simple docstring''' _UpperCAmelCase : Dict = TFBlenderbotModel(config=__lowerCAmelCase ).get_decoder() _UpperCAmelCase : Union[str, Any] = inputs_dict['''input_ids'''] _UpperCAmelCase : str = input_ids[:1, :] _UpperCAmelCase : Union[str, Any] = inputs_dict['''attention_mask'''][:1, :] _UpperCAmelCase : str = inputs_dict['''head_mask'''] _UpperCAmelCase : str = 1 # first forward pass _UpperCAmelCase : Any = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , head_mask=__lowerCAmelCase , use_cache=__lowerCAmelCase ) _UpperCAmelCase : List[str] = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _UpperCAmelCase : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size ) _UpperCAmelCase : Union[str, Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and _UpperCAmelCase : int = tf.concat([input_ids, next_tokens] , axis=-1 ) _UpperCAmelCase : Optional[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) _UpperCAmelCase : List[Any] = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0] _UpperCAmelCase : List[str] = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice _UpperCAmelCase : Any = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) _UpperCAmelCase : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx] _UpperCAmelCase : List[Any] = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-3 ) def UpperCamelCase ( _lowerCAmelCase : Dict, _lowerCAmelCase : Optional[Any], _lowerCAmelCase : List[Any], _lowerCAmelCase : Optional[int]=None, _lowerCAmelCase : List[str]=None, _lowerCAmelCase : Dict=None, _lowerCAmelCase : Any=None, _lowerCAmelCase : Union[str, Any]=None, ) -> Optional[Any]: if attention_mask is None: _UpperCAmelCase : Any = tf.cast(tf.math.not_equal(A_, config.pad_token_id ), tf.inta ) if decoder_attention_mask is None: _UpperCAmelCase : Optional[int] = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ), ], axis=-1, ) if head_mask is None: _UpperCAmelCase : Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _UpperCAmelCase : List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: _UpperCAmelCase : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class _UpperCAmelCase ( _lowercase , _lowercase , unittest.TestCase): __a : Optional[int] = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else () __a : Optional[int] = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else () __a : Dict = ( { "conversational": TFBlenderbotForConditionalGeneration, "feature-extraction": TFBlenderbotModel, "summarization": TFBlenderbotForConditionalGeneration, "text2text-generation": TFBlenderbotForConditionalGeneration, "translation": TFBlenderbotForConditionalGeneration, } if is_tf_available() else {} ) __a : Union[str, Any] = True __a : str = False __a : str = False def __snake_case ( self ) -> str: '''simple docstring''' _UpperCAmelCase : str = TFBlenderbotModelTester(self ) _UpperCAmelCase : List[Any] = ConfigTester(self , config_class=__lowerCAmelCase ) def __snake_case ( self ) -> List[str]: '''simple docstring''' self.config_tester.run_common_tests() def __snake_case ( self ) -> Tuple: '''simple docstring''' _UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__lowerCAmelCase ) @require_tokenizers @require_tf class _UpperCAmelCase ( unittest.TestCase): __a : str = ["My friends are cool but they eat too many carbs."] __a : int = "facebook/blenderbot-400M-distill" @cached_property def __snake_case ( self ) -> str: '''simple docstring''' return BlenderbotTokenizer.from_pretrained(self.model_name ) @cached_property def __snake_case ( self ) -> str: '''simple docstring''' _UpperCAmelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def __snake_case ( self ) -> int: '''simple docstring''' _UpperCAmelCase : List[str] = self.tokenizer(self.src_text , return_tensors="""tf""" ) _UpperCAmelCase : int = self.model.generate( model_inputs.input_ids , ) _UpperCAmelCase : Dict = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__lowerCAmelCase )[0] assert ( generated_words == " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?" )
246
"""simple docstring""" import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class __snake_case ( _lowercase): snake_case__ : List[Any] = "Speech2TextFeatureExtractor" snake_case__ : Union[str, Any] = "Speech2TextTokenizer" def __init__( self : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] ): """simple docstring""" super().__init__(__lowerCAmelCase , __lowerCAmelCase ) _lowerCamelCase : List[str] = self.feature_extractor _lowerCamelCase : str = False def __call__( self : List[Any] , *__lowerCAmelCase : int , **__lowerCAmelCase : List[str] ): """simple docstring""" if self._in_target_context_manager: return self.current_processor(*__lowerCAmelCase , **__lowerCAmelCase ) if "raw_speech" in kwargs: warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' ) _lowerCamelCase : str = kwargs.pop('''raw_speech''' ) else: _lowerCamelCase : Tuple = kwargs.pop('''audio''' , __lowerCAmelCase ) _lowerCamelCase : Optional[Any] = kwargs.pop('''sampling_rate''' , __lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = kwargs.pop('''text''' , __lowerCAmelCase ) if len(__lowerCAmelCase ) > 0: _lowerCamelCase : List[Any] = args[0] _lowerCamelCase : int = args[1:] if audio is None and text is None: raise ValueError('''You need to specify either an `audio` or `text` input to process.''' ) if audio is not None: _lowerCamelCase : List[Any] = self.feature_extractor(__lowerCAmelCase , *__lowerCAmelCase , sampling_rate=__lowerCAmelCase , **__lowerCAmelCase ) if text is not None: _lowerCamelCase : List[Any] = self.tokenizer(__lowerCAmelCase , **__lowerCAmelCase ) if text is None: return inputs elif audio is None: return encodings else: _lowerCamelCase : List[str] = encodings['''input_ids'''] return inputs def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : Tuple ): """simple docstring""" return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : int ): """simple docstring""" return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase ) @contextmanager def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" warnings.warn( '''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ''' '''labels by using the argument `text` of the regular `__call__` method (either in the same call as ''' '''your audio inputs, or in a separate call.''' ) _lowerCamelCase : Union[str, Any] = True _lowerCamelCase : Any = self.tokenizer yield _lowerCamelCase : List[str] = self.feature_extractor _lowerCamelCase : Tuple = False
72
0
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import AlignProcessor, EfficientNetImageProcessor @require_vision class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE_ (self : Dict) ->List[Any]: '''simple docstring''' lowerCamelCase__: List[str] =tempfile.mkdtemp() lowerCamelCase__: Tuple =[ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] lowerCamelCase__: int =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) lowerCamelCase__: List[str] ={ '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073], '''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711], } lowerCamelCase__: str =os.path.join(self.tmpdirname , __lowerCAmelCase) with open(self.image_processor_file , "w" , encoding="utf-8") as fp: json.dump(__lowerCAmelCase , __lowerCAmelCase) def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , **UpperCAmelCase_ : int) ->Optional[int]: '''simple docstring''' return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase) def SCREAMING_SNAKE_CASE_ (self : List[str] , **UpperCAmelCase_ : Union[str, Any]) ->Tuple: '''simple docstring''' return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase) def SCREAMING_SNAKE_CASE_ (self : Tuple , **UpperCAmelCase_ : Optional[int]) ->Optional[int]: '''simple docstring''' return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase) def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->List[Any]: '''simple docstring''' shutil.rmtree(self.tmpdirname) def SCREAMING_SNAKE_CASE_ (self : Any) ->Any: '''simple docstring''' lowerCamelCase__: Union[str, Any] =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)] lowerCamelCase__: Dict =[Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1)) for x in image_inputs] return image_inputs def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Tuple: '''simple docstring''' lowerCamelCase__: int =self.get_tokenizer() lowerCamelCase__: List[Any] =self.get_rust_tokenizer() lowerCamelCase__: Tuple =self.get_image_processor() lowerCamelCase__: List[Any] =AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase) processor_slow.save_pretrained(self.tmpdirname) lowerCamelCase__: str =AlignProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase) lowerCamelCase__: str =AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase) processor_fast.save_pretrained(self.tmpdirname) lowerCamelCase__: List[Any] =AlignProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab()) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab()) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab()) self.assertIsInstance(processor_slow.tokenizer , __lowerCAmelCase) self.assertIsInstance(processor_fast.tokenizer , __lowerCAmelCase) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string()) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string()) self.assertIsInstance(processor_slow.image_processor , __lowerCAmelCase) self.assertIsInstance(processor_fast.image_processor , __lowerCAmelCase) def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->int: '''simple docstring''' lowerCamelCase__: Tuple =AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor()) processor.save_pretrained(self.tmpdirname) lowerCamelCase__: Any =self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)") lowerCamelCase__: Tuple =self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0) lowerCamelCase__: int =AlignProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__lowerCAmelCase , padding_value=1.0) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer , __lowerCAmelCase) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor , __lowerCAmelCase) def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Union[str, Any]: '''simple docstring''' lowerCamelCase__: Dict =self.get_image_processor() lowerCamelCase__: str =self.get_tokenizer() lowerCamelCase__: Optional[int] =AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase) lowerCamelCase__: Tuple =self.prepare_image_inputs() lowerCamelCase__: Optional[Any] =image_processor(__lowerCAmelCase , return_tensors="np") lowerCamelCase__: Tuple =processor(images=__lowerCAmelCase , return_tensors="np") for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2) def SCREAMING_SNAKE_CASE_ (self : Any) ->Optional[int]: '''simple docstring''' lowerCamelCase__: Union[str, Any] =self.get_image_processor() lowerCamelCase__: List[Any] =self.get_tokenizer() lowerCamelCase__: List[str] =AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase) lowerCamelCase__: List[Any] ='''lower newer''' lowerCamelCase__: Tuple =processor(text=__lowerCAmelCase) lowerCamelCase__: str =tokenizer(__lowerCAmelCase , padding="max_length" , max_length=64) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key]) def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Union[str, Any]: '''simple docstring''' lowerCamelCase__: int =self.get_image_processor() lowerCamelCase__: Optional[Any] =self.get_tokenizer() lowerCamelCase__: str =AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase) lowerCamelCase__: Union[str, Any] ='''lower newer''' lowerCamelCase__: int =self.prepare_image_inputs() lowerCamelCase__: int =processor(text=__lowerCAmelCase , images=__lowerCAmelCase) self.assertListEqual(list(inputs.keys()) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"]) # test if it raises when no input is passed with pytest.raises(__lowerCAmelCase): processor() def SCREAMING_SNAKE_CASE_ (self : Dict) ->Optional[int]: '''simple docstring''' lowerCamelCase__: int =self.get_image_processor() lowerCamelCase__: int =self.get_tokenizer() lowerCamelCase__: Any =AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase) lowerCamelCase__: List[str] =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowerCamelCase__: Optional[int] =processor.batch_decode(__lowerCAmelCase) lowerCamelCase__: List[Any] =tokenizer.batch_decode(__lowerCAmelCase) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase) def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Optional[int]: '''simple docstring''' lowerCamelCase__: Optional[Any] =self.get_image_processor() lowerCamelCase__: Optional[Any] =self.get_tokenizer() lowerCamelCase__: Optional[Any] =AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase) lowerCamelCase__: Optional[int] ='''lower newer''' lowerCamelCase__: Tuple =self.prepare_image_inputs() lowerCamelCase__: Union[str, Any] =processor(text=__lowerCAmelCase , images=__lowerCAmelCase) self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
10
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase__ = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['''ReformerTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['''ReformerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ReformerAttention''', '''ReformerForMaskedLM''', '''ReformerForQuestionAnswering''', '''ReformerForSequenceClassification''', '''ReformerLayer''', '''ReformerModel''', '''ReformerModelWithLMHead''', '''ReformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
72
0
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> Any: """simple docstring""" return int(input_a == input_a == 0 ) def lowerCamelCase_ ( ) -> Tuple: """simple docstring""" print('Truth Table of NOR Gate:' ) print('| Input 1 | Input 2 | Output |' ) print(F"""| 0 | 0 | {nor_gate(0 , 0 )} |""" ) print(F"""| 0 | 1 | {nor_gate(0 , 1 )} |""" ) print(F"""| 1 | 0 | {nor_gate(1 , 0 )} |""" ) print(F"""| 1 | 1 | {nor_gate(1 , 1 )} |""" ) if __name__ == "__main__": import doctest doctest.testmod() main()
90
"""simple docstring""" from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase__ = { '''configuration_autoformer''': [ '''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''AutoformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''AutoformerForPrediction''', '''AutoformerModel''', '''AutoformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_autoformer import ( AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_autoformer import ( AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, AutoformerForPrediction, AutoformerModel, AutoformerPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
72
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) a__: List[str] = { 'configuration_clip': [ 'CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CLIPConfig', 'CLIPOnnxConfig', 'CLIPTextConfig', 'CLIPVisionConfig', ], 'processing_clip': ['CLIPProcessor'], 'tokenization_clip': ['CLIPTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__: Optional[Any] = ['CLIPTokenizerFast'] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__: Optional[int] = ['CLIPFeatureExtractor'] a__: Dict = ['CLIPImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__: Any = [ 'CLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'CLIPModel', 'CLIPPreTrainedModel', 'CLIPTextModel', 'CLIPTextModelWithProjection', 'CLIPVisionModel', 'CLIPVisionModelWithProjection', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__: List[str] = [ 'TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFCLIPModel', 'TFCLIPPreTrainedModel', 'TFCLIPTextModel', 'TFCLIPVisionModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__: Any = [ 'FlaxCLIPModel', 'FlaxCLIPPreTrainedModel', 'FlaxCLIPTextModel', 'FlaxCLIPTextPreTrainedModel', 'FlaxCLIPVisionModel', 'FlaxCLIPVisionPreTrainedModel', ] if TYPE_CHECKING: from .configuration_clip import ( CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPConfig, CLIPOnnxConfig, CLIPTextConfig, CLIPVisionConfig, ) from .processing_clip import CLIPProcessor from .tokenization_clip import CLIPTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_clip_fast import CLIPTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clip import CLIPFeatureExtractor from .image_processing_clip import CLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clip import ( CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPModel, CLIPPreTrainedModel, CLIPTextModel, CLIPTextModelWithProjection, CLIPVisionModel, CLIPVisionModelWithProjection, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_clip import ( TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFCLIPModel, TFCLIPPreTrainedModel, TFCLIPTextModel, TFCLIPVisionModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_clip import ( FlaxCLIPModel, FlaxCLIPPreTrainedModel, FlaxCLIPTextModel, FlaxCLIPTextPreTrainedModel, FlaxCLIPVisionModel, FlaxCLIPVisionPreTrainedModel, ) else: import sys a__: Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
193
"""simple docstring""" import math def snake_case_ ( A_ : int ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5, int(math.sqrt(A_ ) + 1 ), 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def snake_case_ ( A_ : float = 0.1 ): '''simple docstring''' _lowerCamelCase : Optional[int] = 3 _lowerCamelCase : List[str] = 3 while primes / (2 * j - 1) >= ratio: for i in range(j * j + j + 1, (j + 2) * (j + 2), j + 1 ): primes += is_prime(A_ ) j += 2 return j if __name__ == "__main__": import doctest doctest.testmod()
72
0
import argparse import os from accelerate.test_utils import execute_subprocess_async def _a ( SCREAMING_SNAKE_CASE : Optional[int]=None ) -> Union[str, Any]: """simple docstring""" if subparsers is not None: __lowerCAmelCase: Tuple = subparsers.add_parser('test' ) else: __lowerCAmelCase: List[str] = argparse.ArgumentParser('Accelerate test command' ) parser.add_argument( '--config_file' , default=A_ , help=( 'The path to use to store the config file. Will default to a file named default_config.yaml in the cache ' 'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ' 'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ' 'with \'huggingface\'.' ) , ) if subparsers is not None: parser.set_defaults(func=A_ ) return parser def _a ( SCREAMING_SNAKE_CASE : List[Any] ) -> Dict: """simple docstring""" __lowerCAmelCase: int = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['test_utils', 'scripts', 'test_script.py'] ) if args.config_file is None: __lowerCAmelCase: Tuple = script_name else: __lowerCAmelCase: List[Any] = f'''--config_file={args.config_file} {script_name}''' __lowerCAmelCase: Tuple = ['''accelerate-launch'''] + test_args.split() __lowerCAmelCase: List[Any] = execute_subprocess_async(A_ , env=os.environ.copy() ) if result.returncode == 0: print('Test is a success! You are ready for your distributed training!' ) def _a ( ) -> Dict: """simple docstring""" __lowerCAmelCase: Tuple = test_command_parser() __lowerCAmelCase: List[Any] = parser.parse_args() test_command(A_ ) if __name__ == "__main__": main()
322
"""simple docstring""" import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class __snake_case ( unittest.TestCase): def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _lowerCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase ) _lowerCamelCase : Tuple = -1 _lowerCamelCase : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase ) _lowerCamelCase : List[Any] = model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: _lowerCamelCase : Union[str, Any] = TextStreamer(__lowerCAmelCase ) model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _lowerCamelCase : int = cs.out[:-1] self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" _lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _lowerCamelCase : Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase ) _lowerCamelCase : Tuple = -1 _lowerCamelCase : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase ) _lowerCamelCase : Optional[int] = model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase ) _lowerCamelCase : List[str] = tokenizer.decode(greedy_ids[0] ) _lowerCamelCase : Tuple = TextIteratorStreamer(__lowerCAmelCase ) _lowerCamelCase : Tuple = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer} _lowerCamelCase : List[Any] = Thread(target=model.generate , kwargs=__lowerCAmelCase ) thread.start() _lowerCamelCase : int = '''''' for new_text in streamer: streamer_text += new_text self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" _lowerCamelCase : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _lowerCamelCase : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase ) _lowerCamelCase : Tuple = -1 _lowerCamelCase : Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase ) _lowerCamelCase : int = model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = greedy_ids[:, input_ids.shape[1] :] _lowerCamelCase : int = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: _lowerCamelCase : Any = TextStreamer(__lowerCAmelCase , skip_prompt=__lowerCAmelCase ) model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _lowerCamelCase : Union[str, Any] = cs.out[:-1] self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" _lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained('''distilgpt2''' ) _lowerCamelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(__lowerCAmelCase ) _lowerCamelCase : str = -1 _lowerCamelCase : Any = torch.ones((1, 5) , device=__lowerCAmelCase ).long() * model.config.bos_token_id with CaptureStdout() as cs: _lowerCamelCase : List[Any] = TextStreamer(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase ) model.generate(__lowerCAmelCase , max_new_tokens=1 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token _lowerCamelCase : Any = cs.out[:-1] # Remove the final "\n" _lowerCamelCase : int = tokenizer(__lowerCAmelCase , return_tensors='''pt''' ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _lowerCamelCase : Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = -1 _lowerCamelCase : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase ) _lowerCamelCase : List[str] = TextIteratorStreamer(__lowerCAmelCase , timeout=0.0_01 ) _lowerCamelCase : str = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer} _lowerCamelCase : List[Any] = Thread(target=model.generate , kwargs=__lowerCAmelCase ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(__lowerCAmelCase ): _lowerCamelCase : Optional[Any] = '''''' for new_text in streamer: streamer_text += new_text
72
0
"""simple docstring""" import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from torch import nn from ...models.controlnet import ControlNetModel, ControlNetOutput from ...models.modeling_utils import ModelMixin from ...utils import logging __UpperCamelCase : Optional[Any] = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE ( _lowercase ): """simple docstring""" def __init__( self : Optional[int] ,lowercase_ : Union[List[ControlNetModel], Tuple[ControlNetModel]] ): super().__init__() lowerCAmelCase__ : Dict = nn.ModuleList(__lowerCAmelCase ) def __lowerCAmelCase ( self : List[Any] ,lowercase_ : torch.FloatTensor ,lowercase_ : Union[torch.Tensor, float, int] ,lowercase_ : torch.Tensor ,lowercase_ : List[torch.tensor] ,lowercase_ : List[float] ,lowercase_ : Optional[torch.Tensor] = None ,lowercase_ : Optional[torch.Tensor] = None ,lowercase_ : Optional[torch.Tensor] = None ,lowercase_ : Optional[Dict[str, Any]] = None ,lowercase_ : bool = False ,lowercase_ : bool = True ,): for i, (image, scale, controlnet) in enumerate(zip(__lowerCAmelCase ,__lowerCAmelCase ,self.nets ) ): lowerCAmelCase__ : Tuple = controlnet( __lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,) # merge samples if i == 0: lowerCAmelCase__ : List[str] = down_samples, mid_sample else: lowerCAmelCase__ : List[Any] = [ samples_prev + samples_curr for samples_prev, samples_curr in zip(__lowerCAmelCase ,__lowerCAmelCase ) ] mid_block_res_sample += mid_sample return down_block_res_samples, mid_block_res_sample def __lowerCAmelCase ( self : List[str] ,lowercase_ : Union[str, os.PathLike] ,lowercase_ : bool = True ,lowercase_ : Callable = None ,lowercase_ : bool = False ,lowercase_ : Optional[str] = None ,): lowerCAmelCase__ : Any = 0 lowerCAmelCase__ : List[Any] = save_directory for controlnet in self.nets: controlnet.save_pretrained( __lowerCAmelCase ,is_main_process=__lowerCAmelCase ,save_function=__lowerCAmelCase ,safe_serialization=__lowerCAmelCase ,variant=__lowerCAmelCase ,) idx += 1 lowerCAmelCase__ : Optional[int] = model_path_to_save + F'_{idx}' @classmethod def __lowerCAmelCase ( cls : Tuple ,lowercase_ : Optional[Union[str, os.PathLike]] ,**lowercase_ : Optional[int] ): lowerCAmelCase__ : int = 0 lowerCAmelCase__ : int = [] # load controlnet and append to list until no controlnet directory exists anymore # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... lowerCAmelCase__ : int = pretrained_model_path while os.path.isdir(__lowerCAmelCase ): lowerCAmelCase__ : Union[str, Any] = ControlNetModel.from_pretrained(__lowerCAmelCase ,**__lowerCAmelCase ) controlnets.append(__lowerCAmelCase ) idx += 1 lowerCAmelCase__ : Union[str, Any] = pretrained_model_path + F'_{idx}' logger.info(F'{len(__lowerCAmelCase )} controlnets loaded from {pretrained_model_path}.' ) if len(__lowerCAmelCase ) == 0: raise ValueError( F'No ControlNets found under {os.path.dirname(__lowerCAmelCase )}. Expected at least {pretrained_model_path + "_0"}.' ) return cls(__lowerCAmelCase )
106
"""simple docstring""" from ....configuration_utils import PretrainedConfig from ....utils import logging lowerCAmelCase__ = logging.get_logger(__name__) # TODO: upload to AWS lowerCAmelCase__ = { '''yjernite/retribert-base-uncased''': ( '''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json''' ), } class __snake_case ( _lowercase): snake_case__ : int = "retribert" def __init__( self : Optional[int] , __lowerCAmelCase : str=3_0_5_2_2 , __lowerCAmelCase : Tuple=7_6_8 , __lowerCAmelCase : Union[str, Any]=8 , __lowerCAmelCase : Any=1_2 , __lowerCAmelCase : Optional[int]=3_0_7_2 , __lowerCAmelCase : List[str]="gelu" , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Tuple=5_1_2 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : Optional[Any]=1E-12 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Any=1_2_8 , __lowerCAmelCase : Optional[int]=0 , **__lowerCAmelCase : str , ): """simple docstring""" super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase ) _lowerCamelCase : Dict = vocab_size _lowerCamelCase : Union[str, Any] = hidden_size _lowerCamelCase : Dict = num_hidden_layers _lowerCamelCase : int = num_attention_heads _lowerCamelCase : int = hidden_act _lowerCamelCase : str = intermediate_size _lowerCamelCase : Union[str, Any] = hidden_dropout_prob _lowerCamelCase : List[Any] = attention_probs_dropout_prob _lowerCamelCase : Optional[int] = max_position_embeddings _lowerCamelCase : List[Any] = type_vocab_size _lowerCamelCase : Any = initializer_range _lowerCamelCase : Optional[int] = layer_norm_eps _lowerCamelCase : int = share_encoders _lowerCamelCase : Optional[Any] = projection_dim
72
0
'''simple docstring''' import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, PerceiverTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): _lowerCAmelCase = '''pt''' elif is_tf_available(): _lowerCAmelCase = '''tf''' else: _lowerCAmelCase = '''jax''' class lowerCAmelCase_( _lowercase , unittest.TestCase ): '''simple docstring''' __lowercase : Union[str, Any] = PerceiverTokenizer __lowercase : Union[str, Any] = False def UpperCAmelCase_ ( self ) -> List[Any]: super().setUp() lowerCAmelCase__ : Any = PerceiverTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def UpperCAmelCase_ ( self ) -> Any: return PerceiverTokenizer.from_pretrained("""deepmind/language-perceiver""" ) def UpperCAmelCase_ ( self ,**__UpperCAmelCase ) -> List[Any]: return self.tokenizer_class.from_pretrained(self.tmpdirname ,**__lowerCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=False ,__UpperCAmelCase=20 ,__UpperCAmelCase=5 ) -> Optional[int]: lowerCAmelCase__ : Optional[int] = [] for i in range(len(__lowerCAmelCase ) ): try: lowerCAmelCase__ : Optional[Any] = tokenizer.decode([i] ,clean_up_tokenization_spaces=__lowerCAmelCase ) except UnicodeDecodeError: pass toks.append((i, tok) ) lowerCAmelCase__ : Any = list(filter(lambda __UpperCAmelCase : re.match(R"""^[ a-zA-Z]+$""" ,t[1] ) ,__lowerCAmelCase ) ) lowerCAmelCase__ : Optional[Any] = list(filter(lambda __UpperCAmelCase : [t[0]] == tokenizer.encode(t[1] ,add_special_tokens=__lowerCAmelCase ) ,__lowerCAmelCase ) ) if max_length is not None and len(__lowerCAmelCase ) > max_length: lowerCAmelCase__ : Optional[Any] = toks[:max_length] if min_length is not None and len(__lowerCAmelCase ) < min_length and len(__lowerCAmelCase ) > 0: while len(__lowerCAmelCase ) < min_length: lowerCAmelCase__ : Tuple = toks + toks # toks_str = [t[1] for t in toks] lowerCAmelCase__ : List[Any] = [t[0] for t in toks] # Ensure consistency lowerCAmelCase__ : Any = tokenizer.decode(__lowerCAmelCase ,clean_up_tokenization_spaces=__lowerCAmelCase ) if " " not in output_txt and len(__lowerCAmelCase ) > 1: lowerCAmelCase__ : int = ( tokenizer.decode([toks_ids[0]] ,clean_up_tokenization_spaces=__lowerCAmelCase ) + ''' ''' + tokenizer.decode(toks_ids[1:] ,clean_up_tokenization_spaces=__lowerCAmelCase ) ) if with_prefix_space: lowerCAmelCase__ : Any = ''' ''' + output_txt lowerCAmelCase__ : int = tokenizer.encode(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase ) return output_txt, output_ids def UpperCAmelCase_ ( self ) -> List[str]: lowerCAmelCase__ : Tuple = self.perceiver_tokenizer lowerCAmelCase__ : Optional[Any] = '''Unicode €.''' lowerCAmelCase__ : str = tokenizer(__lowerCAmelCase ) lowerCAmelCase__ : List[Any] = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5] self.assertEqual(encoded["""input_ids"""] ,__lowerCAmelCase ) # decoding lowerCAmelCase__ : Union[str, Any] = tokenizer.decode(__lowerCAmelCase ) self.assertEqual(__lowerCAmelCase ,"""[CLS]Unicode €.[SEP]""" ) lowerCAmelCase__ : Union[str, Any] = tokenizer("""e è é ê ë""" ) lowerCAmelCase__ : int = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5] self.assertEqual(encoded["""input_ids"""] ,__lowerCAmelCase ) # decoding lowerCAmelCase__ : List[Any] = tokenizer.decode(__lowerCAmelCase ) self.assertEqual(__lowerCAmelCase ,"""[CLS]e è é ê ë[SEP]""" ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) ,"""[CLS]e è é ê ë[SEP]""" ) def UpperCAmelCase_ ( self ) -> str: lowerCAmelCase__ : List[str] = self.perceiver_tokenizer lowerCAmelCase__ : List[Any] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] # fmt: off lowerCAmelCase__ : Dict = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0] # fmt: on lowerCAmelCase__ : Optional[Any] = tokenizer(__lowerCAmelCase ,padding=__lowerCAmelCase ,return_tensors=__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase ) if FRAMEWORK != "jax": lowerCAmelCase__ : str = list(batch.input_ids.numpy()[0] ) else: lowerCAmelCase__ : Any = list(batch.input_ids.tolist()[0] ) self.assertListEqual(__lowerCAmelCase ,__lowerCAmelCase ) self.assertEqual((2, 38) ,batch.input_ids.shape ) self.assertEqual((2, 38) ,batch.attention_mask.shape ) def UpperCAmelCase_ ( self ) -> Tuple: lowerCAmelCase__ : List[Any] = self.perceiver_tokenizer lowerCAmelCase__ : Optional[int] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] lowerCAmelCase__ : Union[str, Any] = tokenizer(__lowerCAmelCase ,padding=__lowerCAmelCase ,return_tensors=__lowerCAmelCase ) # check if input_ids are returned and no decoder_input_ids self.assertIn("""input_ids""" ,__lowerCAmelCase ) self.assertIn("""attention_mask""" ,__lowerCAmelCase ) self.assertNotIn("""decoder_input_ids""" ,__lowerCAmelCase ) self.assertNotIn("""decoder_attention_mask""" ,__lowerCAmelCase ) def UpperCAmelCase_ ( self ) -> List[str]: lowerCAmelCase__ : Optional[int] = self.perceiver_tokenizer lowerCAmelCase__ : str = [ '''Summary of the text.''', '''Another summary.''', ] lowerCAmelCase__ : int = tokenizer( text_target=__lowerCAmelCase ,max_length=32 ,padding="""max_length""" ,truncation=__lowerCAmelCase ,return_tensors=__lowerCAmelCase ) self.assertEqual(32 ,targets["""input_ids"""].shape[1] ) def UpperCAmelCase_ ( self ) -> Any: lowerCAmelCase__ : int = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): self.assertNotEqual(tokenizer.model_max_length ,42 ) # Now let's start the test lowerCAmelCase__ : List[str] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): # Isolate this from the other tests because we save additional tokens/etc lowerCAmelCase__ : Optional[int] = tempfile.mkdtemp() lowerCAmelCase__ : Optional[int] = ''' He is very happy, UNwant\u00E9d,running''' lowerCAmelCase__ : Dict = tokenizer.encode(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase ) tokenizer.save_pretrained(__lowerCAmelCase ) lowerCAmelCase__ : Any = tokenizer.__class__.from_pretrained(__lowerCAmelCase ) lowerCAmelCase__ : List[str] = after_tokenizer.encode(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase ,__lowerCAmelCase ) shutil.rmtree(__lowerCAmelCase ) lowerCAmelCase__ : str = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): # Isolate this from the other tests because we save additional tokens/etc lowerCAmelCase__ : List[Any] = tempfile.mkdtemp() lowerCAmelCase__ : Optional[Any] = ''' He is very happy, UNwant\u00E9d,running''' tokenizer.add_tokens(["""bim""", """bambam"""] ) lowerCAmelCase__ : Tuple = tokenizer.additional_special_tokens additional_special_tokens.append("""new_additional_special_token""" ) tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} ) lowerCAmelCase__ : Any = tokenizer.encode(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase ) tokenizer.save_pretrained(__lowerCAmelCase ) lowerCAmelCase__ : Optional[Any] = tokenizer.__class__.from_pretrained(__lowerCAmelCase ) lowerCAmelCase__ : Dict = after_tokenizer.encode(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase ,__lowerCAmelCase ) self.assertIn("""new_additional_special_token""" ,after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length ,42 ) lowerCAmelCase__ : Tuple = tokenizer.__class__.from_pretrained(__lowerCAmelCase ,model_max_length=43 ) self.assertEqual(tokenizer.model_max_length ,43 ) shutil.rmtree(__lowerCAmelCase ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: lowerCAmelCase__ : List[str] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(__lowerCAmelCase ) with open(os.path.join(__lowerCAmelCase ,"""special_tokens_map.json""" ) ,encoding="""utf-8""" ) as json_file: lowerCAmelCase__ : Union[str, Any] = json.load(__lowerCAmelCase ) with open(os.path.join(__lowerCAmelCase ,"""tokenizer_config.json""" ) ,encoding="""utf-8""" ) as json_file: lowerCAmelCase__ : Optional[int] = json.load(__lowerCAmelCase ) lowerCAmelCase__ : str = [F"""<extra_id_{i}>""" for i in range(125 )] lowerCAmelCase__ : Optional[int] = added_tokens_extra_ids + [ '''an_additional_special_token''' ] lowerCAmelCase__ : Any = added_tokens_extra_ids + [ '''an_additional_special_token''' ] with open(os.path.join(__lowerCAmelCase ,"""special_tokens_map.json""" ) ,"""w""" ,encoding="""utf-8""" ) as outfile: json.dump(__lowerCAmelCase ,__lowerCAmelCase ) with open(os.path.join(__lowerCAmelCase ,"""tokenizer_config.json""" ) ,"""w""" ,encoding="""utf-8""" ) as outfile: json.dump(__lowerCAmelCase ,__lowerCAmelCase ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files lowerCAmelCase__ : Dict = tokenizer_class.from_pretrained( __lowerCAmelCase ,) self.assertIn( """an_additional_special_token""" ,tokenizer_without_change_in_init.additional_special_tokens ) self.assertEqual( ["""an_additional_special_token"""] ,tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) ,) # Now we test that we can change the value of additional_special_tokens in the from_pretrained lowerCAmelCase__ : Union[str, Any] = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" ,lstrip=__lowerCAmelCase )] lowerCAmelCase__ : int = tokenizer_class.from_pretrained( __lowerCAmelCase ,additional_special_tokens=__lowerCAmelCase ,) self.assertIn("""a_new_additional_special_token""" ,tokenizer.additional_special_tokens ) self.assertEqual( ["""a_new_additional_special_token"""] ,tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) ,) def UpperCAmelCase_ ( self ) -> Union[str, Any]: lowerCAmelCase__ : Optional[Any] = self.perceiver_tokenizer self.assertEqual(tokenizer.decode([178] ) ,"""�""" ) def UpperCAmelCase_ ( self ) -> Dict: pass def UpperCAmelCase_ ( self ) -> Union[str, Any]: pass def UpperCAmelCase_ ( self ) -> Tuple: pass def UpperCAmelCase_ ( self ) -> List[Any]: pass def UpperCAmelCase_ ( self ) -> int: lowerCAmelCase__ : Optional[int] = self.get_tokenizers(fast=__lowerCAmelCase ,do_lower_case=__lowerCAmelCase ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): lowerCAmelCase__ : Any = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]'''] lowerCAmelCase__ : Dict = tokenizer.convert_tokens_to_string(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase ,__lowerCAmelCase )
37
"""simple docstring""" import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class __snake_case ( unittest.TestCase): def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" _lowerCamelCase : Union[str, Any] = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : List[Any] = [ '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" _lowerCamelCase : Optional[int] = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', '''unet/diffusion_pytorch_model.bin''', # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : Optional[Any] = [ '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', ] self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" _lowerCamelCase : int = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', # Removed: 'text_encoder/model.safetensors', '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertFalse(is_safetensors_compatible(__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase : int = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] _lowerCamelCase : Optional[int] = '''fp16''' self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" _lowerCamelCase : Optional[Any] = [ '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] _lowerCamelCase : Union[str, Any] = '''fp16''' self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" _lowerCamelCase : str = [ '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] _lowerCamelCase : Optional[Any] = '''fp16''' self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase : Tuple = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', '''unet/diffusion_pytorch_model.fp16.bin''', # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] _lowerCamelCase : Any = '''fp16''' self.assertFalse(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" _lowerCamelCase : Optional[Any] = [ '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', ] _lowerCamelCase : str = '''fp16''' self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" _lowerCamelCase : Optional[Any] = [ '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', ] _lowerCamelCase : Union[str, Any] = '''fp16''' self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : int = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', # 'text_encoder/model.fp16.safetensors', '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] _lowerCamelCase : int = '''fp16''' self.assertFalse(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
72
0
class UpperCamelCase_ : '''simple docstring''' def __init__( self : Optional[int]) ->Tuple: '''simple docstring''' A__ = {} # Mapping from char to TrieNode A__ = False def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : list[str]) ->List[str]: '''simple docstring''' for word in words: self.insert(__lowerCAmelCase) def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : str) ->str: '''simple docstring''' A__ = self for char in word: if char not in curr.nodes: A__ = TrieNode() A__ = curr.nodes[char] A__ = True def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : str) ->int: '''simple docstring''' A__ = self for char in word: if char not in curr.nodes: return False A__ = curr.nodes[char] return curr.is_leaf def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : str) ->List[str]: '''simple docstring''' def _delete(UpperCAmelCase__ : TrieNode , UpperCAmelCase__ : str , UpperCAmelCase__ : int) -> bool: if index == len(__lowerCAmelCase): # If word does not exist if not curr.is_leaf: return False A__ = False return len(curr.nodes) == 0 A__ = word[index] A__ = curr.nodes.get(__lowerCAmelCase) # If char not in current trie node if not char_node: return False # Flag to check if node can be deleted A__ = _delete(__lowerCAmelCase , __lowerCAmelCase , index + 1) if delete_curr: del curr.nodes[char] return len(curr.nodes) == 0 return delete_curr _delete(self , __lowerCAmelCase , 0) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> List[str]: """simple docstring""" if node.is_leaf: print(A_ , end=''' ''' ) for key, value in node.nodes.items(): print_words(A_ , word + key ) def SCREAMING_SNAKE_CASE ( ) -> int: """simple docstring""" A__ = '''banana bananas bandana band apple all beast'''.split() A__ = TrieNode() root.insert_many(A_ ) # print_words(root, "") assert all(root.find(A_ ) for word in words ) assert root.find('''banana''' ) assert not root.find('''bandanas''' ) assert not root.find('''apps''' ) assert root.find('''apple''' ) assert root.find('''all''' ) root.delete('''all''' ) assert not root.find('''all''' ) root.delete('''banana''' ) assert not root.find('''banana''' ) assert root.find('''bananas''' ) return True def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Dict: """simple docstring""" print(str(A_ ) , '''works!''' if passes else '''doesn\'t work :(''' ) def SCREAMING_SNAKE_CASE ( ) -> int: """simple docstring""" assert test_trie() def SCREAMING_SNAKE_CASE ( ) -> Optional[int]: """simple docstring""" print_results('''Testing trie functionality''' , test_trie() ) if __name__ == "__main__": main()
14
"""simple docstring""" import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class __snake_case : def __init__( self : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any=2 , __lowerCAmelCase : Any=True , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Optional[Any]=1_0 , __lowerCAmelCase : List[str]=3 , __lowerCAmelCase : int=3_2 * 4 , __lowerCAmelCase : Dict=3_2 * 6 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : List[str]=3_2 , ): """simple docstring""" _lowerCamelCase : List[str] = parent _lowerCamelCase : str = batch_size _lowerCamelCase : Dict = is_training _lowerCamelCase : str = use_auxiliary_loss _lowerCamelCase : Any = num_queries _lowerCamelCase : List[Any] = num_channels _lowerCamelCase : int = min_size _lowerCamelCase : Any = max_size _lowerCamelCase : int = num_labels _lowerCamelCase : List[str] = mask_feature_size def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( __lowerCAmelCase ) _lowerCamelCase : List[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowerCAmelCase ) > 0.5 ).float() _lowerCamelCase : Optional[int] = (torch.rand((self.batch_size, self.num_labels) , device=__lowerCAmelCase ) > 0.5).long() _lowerCamelCase : Dict = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = self.prepare_config_and_inputs() _lowerCamelCase : List[str] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask} return config, inputs_dict def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] ): """simple docstring""" _lowerCamelCase : List[str] = output.encoder_hidden_states _lowerCamelCase : Tuple = output.pixel_decoder_hidden_states _lowerCamelCase : Dict = output.transformer_decoder_hidden_states self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__lowerCAmelCase ) , config.decoder_config.decoder_layers ) def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any]=False ): """simple docstring""" with torch.no_grad(): _lowerCamelCase : Optional[int] = MaskFormerModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : Tuple = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase ) _lowerCamelCase : List[str] = model(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(__lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] ): """simple docstring""" _lowerCamelCase : str = MaskFormerForInstanceSegmentation(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() def comm_check_on_output(__lowerCAmelCase : Dict ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): _lowerCamelCase : str = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = model(__lowerCAmelCase ) comm_check_on_output(__lowerCAmelCase ) _lowerCamelCase : List[str] = model( pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ) comm_check_on_output(__lowerCAmelCase ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class __snake_case ( _lowercase , _lowercase , unittest.TestCase): snake_case__ : Dict = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () snake_case__ : Any = ( {"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) snake_case__ : List[str] = False snake_case__ : List[str] = False snake_case__ : Optional[int] = False snake_case__ : Dict = False def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" _lowerCamelCase : Optional[int] = MaskFormerModelTester(self ) _lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowerCAmelCase ) @unittest.skip(reason='''MaskFormer does not use inputs_embeds''' ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" pass @unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' ) def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" pass @unittest.skip(reason='''MaskFormer is not a generative model''' ) def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" pass @unittest.skip(reason='''MaskFormer does not use token embeddings''' ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" pass @require_torch_multi_gpu @unittest.skip( reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" pass def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : str = model_class(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCamelCase : Dict = [*signature.parameters.keys()] _lowerCamelCase : Optional[Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __lowerCAmelCase ) @slow def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" for model_name in ["facebook/maskformer-swin-small-coco"]: _lowerCamelCase : Union[str, Any] = MaskFormerModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : List[Any] = (self.model_tester.min_size,) * 2 _lowerCamelCase : Union[str, Any] = { '''pixel_values''': torch.randn((2, 3, *size) , device=__lowerCAmelCase ), '''mask_labels''': torch.randn((2, 1_0, *size) , device=__lowerCAmelCase ), '''class_labels''': torch.zeros(2 , 1_0 , device=__lowerCAmelCase ).long(), } _lowerCamelCase : int = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = model(**__lowerCAmelCase ) self.assertTrue(outputs.loss is not None ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : List[Any] = model_class(__lowerCAmelCase ).to(__lowerCAmelCase ) _lowerCamelCase : List[str] = model(**__lowerCAmelCase , output_attentions=__lowerCAmelCase ) self.assertTrue(outputs.attentions is not None ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss _lowerCamelCase : Union[str, Any] = self.all_model_classes[1] _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() _lowerCamelCase : Any = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.train() _lowerCamelCase : List[Any] = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ).loss loss.backward() def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase : int = self.all_model_classes[1] _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs() _lowerCamelCase : List[str] = True _lowerCamelCase : Optional[int] = True _lowerCamelCase : Optional[Any] = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.train() _lowerCamelCase : Optional[Any] = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ) _lowerCamelCase : List[Any] = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() _lowerCamelCase : Tuple = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't _lowerCamelCase : List[str] = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() _lowerCamelCase : Optional[int] = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=__lowerCAmelCase ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) lowerCAmelCase__ = 1E-4 def snake_case_ ( ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_vision @slow class __snake_case ( unittest.TestCase): @cached_property def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" return ( MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' ) if is_vision_available() else None ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase : str = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__lowerCAmelCase ) _lowerCamelCase : Any = self.default_image_processor _lowerCamelCase : List[Any] = prepare_img() _lowerCamelCase : Any = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase ) _lowerCamelCase : Any = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): _lowerCamelCase : int = model(**__lowerCAmelCase ) _lowerCamelCase : str = torch.tensor( [[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(__lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) _lowerCamelCase : Union[str, Any] = torch.tensor( [[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(__lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) _lowerCamelCase : Optional[int] = torch.tensor( [[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(__lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" _lowerCamelCase : Optional[int] = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__lowerCAmelCase ) .eval() ) _lowerCamelCase : int = self.default_image_processor _lowerCamelCase : str = prepare_img() _lowerCamelCase : int = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase ) _lowerCamelCase : str = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): _lowerCamelCase : Optional[int] = model(**__lowerCAmelCase ) # masks_queries_logits _lowerCamelCase : Optional[int] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _lowerCamelCase : List[str] = [ [-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33], [-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95], [-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42], ] _lowerCamelCase : Any = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) # class_queries_logits _lowerCamelCase : List[Any] = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _lowerCamelCase : str = torch.tensor( [ [1.6512E00, -5.2572E00, -3.3519E00], [3.6169E-02, -5.9025E00, -2.9313E00], [1.0766E-04, -7.7630E00, -5.1263E00], ] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : Any = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' ) .to(__lowerCAmelCase ) .eval() ) _lowerCamelCase : Tuple = self.default_image_processor _lowerCamelCase : Tuple = prepare_img() _lowerCamelCase : Optional[Any] = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase ) _lowerCamelCase : List[Any] = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): _lowerCamelCase : Optional[Any] = model(**__lowerCAmelCase ) # masks_queries_logits _lowerCamelCase : List[Any] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _lowerCamelCase : int = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]] _lowerCamelCase : List[Any] = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) # class_queries_logits _lowerCamelCase : Dict = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _lowerCamelCase : Any = torch.tensor( [[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" _lowerCamelCase : str = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__lowerCAmelCase ) .eval() ) _lowerCamelCase : int = self.default_image_processor _lowerCamelCase : List[str] = image_processor( [np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors='''pt''' , ) _lowerCamelCase : Union[str, Any] = inputs['''pixel_values'''].to(__lowerCAmelCase ) _lowerCamelCase : Dict = [el.to(__lowerCAmelCase ) for el in inputs['''mask_labels''']] _lowerCamelCase : Optional[Any] = [el.to(__lowerCAmelCase ) for el in inputs['''class_labels''']] with torch.no_grad(): _lowerCamelCase : Tuple = model(**__lowerCAmelCase ) self.assertTrue(outputs.loss is not None )
72
0
'''simple docstring''' import unittest from transformers import AutoTokenizer, FalconConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, ) class lowerCAmelCase : def __init__( self : Optional[Any] , __lowercase : Any , __lowercase : Any=3 , __lowercase : List[Any]=7 , __lowercase : Optional[Any]=True , __lowercase : Optional[int]=True , __lowercase : Optional[int]=False , __lowercase : Optional[Any]=True , __lowercase : Optional[Any]=99 , __lowercase : List[Any]=32 , __lowercase : Any=5 , __lowercase : Any=4 , __lowercase : Union[str, Any]=37 , __lowercase : Optional[Any]="gelu" , __lowercase : str=0.1 , __lowercase : Optional[Any]=0.1 , __lowercase : Optional[int]=512 , __lowercase : List[Any]=16 , __lowercase : List[Any]=2 , __lowercase : List[Any]=0.0_2 , __lowercase : List[str]=3 , __lowercase : Any=4 , __lowercase : int=None , ): """simple docstring""" __lowercase =parent __lowercase =batch_size __lowercase =seq_length __lowercase =is_training __lowercase =use_input_mask __lowercase =use_token_type_ids __lowercase =use_labels __lowercase =vocab_size __lowercase =hidden_size __lowercase =num_hidden_layers __lowercase =num_attention_heads __lowercase =intermediate_size __lowercase =hidden_act __lowercase =hidden_dropout_prob __lowercase =attention_probs_dropout_prob __lowercase =max_position_embeddings __lowercase =type_vocab_size __lowercase =type_sequence_label_size __lowercase =initializer_range __lowercase =num_labels __lowercase =num_choices __lowercase =scope def snake_case ( self : Optional[Any] ): """simple docstring""" __lowercase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase =None if self.use_input_mask: __lowercase =random_attention_mask([self.batch_size, self.seq_length] ) __lowercase =None __lowercase =None __lowercase =None __lowercase =None if self.use_labels: __lowercase =ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowercase =ids_tensor([self.batch_size] , self.num_choices ) __lowercase =self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def snake_case ( self : Union[str, Any] ): """simple docstring""" return FalconConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=__lowerCAmelCase , ) def snake_case ( self : Optional[int] , __lowercase : Optional[Any] , __lowercase : List[str] , __lowercase : Any , __lowercase : Tuple , __lowercase : List[str] , __lowercase : str , __lowercase : List[str] ): """simple docstring""" __lowercase =FalconModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() __lowercase =model(__lowerCAmelCase , attention_mask=__lowerCAmelCase ) __lowercase =model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case ( self : str , __lowercase : Union[str, Any] , __lowercase : str , __lowercase : Tuple , __lowercase : Optional[int] , __lowercase : List[Any] , __lowercase : Any , __lowercase : Optional[int] , __lowercase : List[str] , __lowercase : Any , ): """simple docstring""" __lowercase =True __lowercase =FalconModel(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() __lowercase =model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , encoder_attention_mask=__lowerCAmelCase , ) __lowercase =model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , ) __lowercase =model(__lowerCAmelCase , attention_mask=__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case ( self : Any , __lowercase : Dict , __lowercase : str , __lowercase : str , __lowercase : Any , __lowercase : List[Any] , __lowercase : str , __lowercase : Dict , __lowercase : List[Any] , __lowercase : str , ): """simple docstring""" __lowercase =FalconForCausalLM(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() __lowercase =model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def snake_case ( self : int , __lowercase : Union[str, Any] , __lowercase : int , __lowercase : int , __lowercase : Any , __lowercase : List[Any] , __lowercase : List[str] , __lowercase : Dict , __lowercase : str , __lowercase : List[str] , ): """simple docstring""" __lowercase =True __lowercase =True __lowercase =FalconForCausalLM(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() # first forward pass __lowercase =model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , encoder_attention_mask=__lowerCAmelCase , use_cache=__lowerCAmelCase , ) __lowercase =outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids __lowercase =ids_tensor((self.batch_size, 3) , config.vocab_size ) __lowercase =ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and __lowercase =torch.cat([input_ids, next_tokens] , dim=-1 ) __lowercase =torch.cat([input_mask, next_mask] , dim=-1 ) __lowercase =model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , encoder_attention_mask=__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , )['''hidden_states'''][0] __lowercase =model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , encoder_attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , )['''hidden_states'''][0] # select random slice __lowercase =ids_tensor((1,) , output_from_past.shape[-1] ).item() __lowercase =output_from_no_past[:, -3:, random_slice_idx].detach() __lowercase =output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) ) def snake_case ( self : int ): """simple docstring""" __lowercase =self.prepare_config_and_inputs() ( __lowercase ) =config_and_inputs __lowercase ={'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class lowerCAmelCase ( _lowercase , _lowercase , _lowercase , unittest.TestCase ): lowerCAmelCase_ = ( ( FalconModel, FalconForCausalLM, FalconForSequenceClassification, FalconForTokenClassification, FalconForQuestionAnswering, ) if is_torch_available() else () ) lowerCAmelCase_ = (FalconForCausalLM,) if is_torch_available() else () lowerCAmelCase_ = ( { "feature-extraction": FalconModel, "text-classification": FalconForSequenceClassification, "text-generation": FalconForCausalLM, "question-answering": FalconForQuestionAnswering, "token-classification": FalconForTokenClassification, "zero-shot": FalconForSequenceClassification, } if is_torch_available() else {} ) lowerCAmelCase_ = False lowerCAmelCase_ = False def snake_case ( self : Dict ): """simple docstring""" __lowercase =FalconModelTester(self ) __lowercase =ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 ) def snake_case ( self : Union[str, Any] ): """simple docstring""" self.config_tester.run_common_tests() def snake_case ( self : str ): """simple docstring""" __lowercase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def snake_case ( self : Optional[int] ): """simple docstring""" __lowercase =self.model_tester.prepare_config_and_inputs() for alibi in [True, False]: __lowercase =alibi self.model_tester.create_and_check_model(__lowerCAmelCase , *__lowerCAmelCase ) def snake_case ( self : int ): """simple docstring""" __lowercase =self.model_tester.prepare_config_and_inputs_for_common() __lowercase =3 __lowercase =input_dict['''input_ids'''] __lowercase =input_ids.ne(1 ).to(__lowerCAmelCase ) __lowercase =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __lowercase =FalconForSequenceClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() __lowercase =model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def snake_case ( self : int ): """simple docstring""" __lowercase =self.model_tester.prepare_config_and_inputs_for_common() __lowercase =3 __lowercase ='''single_label_classification''' __lowercase =input_dict['''input_ids'''] __lowercase =input_ids.ne(1 ).to(__lowerCAmelCase ) __lowercase =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __lowercase =FalconForSequenceClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() __lowercase =model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def snake_case ( self : Union[str, Any] ): """simple docstring""" __lowercase =self.model_tester.prepare_config_and_inputs_for_common() __lowercase =input_dict['''input_ids'''] __lowercase =FalconForCausalLM(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() __lowercase =model(__lowerCAmelCase , use_cache=__lowerCAmelCase ) __lowercase =input_ids.shape[0] __lowercase =model._convert_to_rw_cache(result.past_key_values ) __lowercase =model._convert_cache_to_standard_format(__lowerCAmelCase , __lowerCAmelCase ) for layer in range(len(__lowerCAmelCase ) ): for tensor_idx in range(2 ): self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 ) self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 ) self.assertTrue( torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) ) def snake_case ( self : Dict ): """simple docstring""" __lowercase =self.model_tester.prepare_config_and_inputs_for_common() __lowercase =3 __lowercase ='''multi_label_classification''' __lowercase =input_dict['''input_ids'''] __lowercase =input_ids.ne(1 ).to(__lowerCAmelCase ) __lowercase =ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) __lowercase =FalconForSequenceClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() __lowercase =model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def snake_case ( self : str ): """simple docstring""" for model_class in self.all_generative_model_classes: __lowercase =self.model_tester.prepare_config_and_inputs_for_common() # If it doesn't support cache, pass the test if not hasattr(__lowerCAmelCase , 'use_cache' ): return __lowercase =model_class(__lowerCAmelCase ).to(__lowerCAmelCase ) if "use_cache" not in inputs: __lowercase =True __lowercase =model(**__lowerCAmelCase ) # If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format) if "past_key_values" not in outputs: return __lowercase =( getattr(__lowerCAmelCase , 'decoder_layers' , __lowerCAmelCase ) or getattr(__lowerCAmelCase , 'num_decoder_layers' , __lowerCAmelCase ) or config.num_hidden_layers ) __lowercase =getattr(__lowerCAmelCase , 'num_kv_heads' , config.num_attention_heads ) __lowercase =getattr(__lowerCAmelCase , 'd_model' , config.hidden_size ) __lowercase =embed_dim // num_attention_heads __lowercase =outputs['''past_key_values'''] self.assertEqual(len(__lowerCAmelCase ) , __lowerCAmelCase ) __lowercase =inputs['''input_ids'''].shape for i in range(__lowerCAmelCase ): if config.new_decoder_architecture: __lowercase =config.num_attention_heads elif config.multi_query: __lowercase =1 self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2 self.assertEqual( past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) self.assertEqual( past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) @require_torch class lowerCAmelCase ( unittest.TestCase ): @slow def snake_case ( self : Union[str, Any] ): """simple docstring""" __lowercase =AutoTokenizer.from_pretrained('Rocketknight1/falcon-rw-1b' ) __lowercase =FalconForCausalLM.from_pretrained('Rocketknight1/falcon-rw-1b' ) model.eval() model.to(__lowerCAmelCase ) __lowercase =tokenizer('My favorite food is' , return_tensors='pt' ).to(__lowerCAmelCase ) __lowercase =( '''My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.''' ) __lowercase =model.generate(**__lowerCAmelCase , do_sample=__lowerCAmelCase , max_new_tokens=19 ) __lowercase =tokenizer.batch_decode(__lowerCAmelCase )[0] self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) @slow def snake_case ( self : str ): """simple docstring""" for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]: __lowercase =AutoTokenizer.from_pretrained(__lowerCAmelCase ) __lowercase =FalconForCausalLM.from_pretrained(__lowerCAmelCase ) model.eval() model.to(__lowerCAmelCase ) __lowercase =tokenizer('My favorite food is' , return_tensors='pt' ).to(__lowerCAmelCase ) # We just test that these run without errors - the models are randomly initialized # and so the actual text outputs will be garbage model.generate(**__lowerCAmelCase , do_sample=__lowerCAmelCase , max_new_tokens=4 ) model.generate(**__lowerCAmelCase , do_sample=__lowerCAmelCase , max_new_tokens=4 ) model.generate(**__lowerCAmelCase , num_beams=2 , max_new_tokens=4 ) @slow def snake_case ( self : Dict ): """simple docstring""" with torch.no_grad(): for repo in [ "Rocketknight1/falcon-rw-1b", "Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b", ]: __lowercase =AutoTokenizer.from_pretrained(__lowerCAmelCase ) __lowercase =FalconForCausalLM.from_pretrained(__lowerCAmelCase ) model.eval() model.to(device=__lowerCAmelCase ) __lowercase =tokenizer('My favorite food is' , return_tensors='pt' ).to(__lowerCAmelCase ) # Test results are the same with and without cache __lowercase =model.generate(**__lowerCAmelCase , do_sample=__lowerCAmelCase , max_new_tokens=20 , use_cache=__lowerCAmelCase ) __lowercase =model.generate(**__lowerCAmelCase , do_sample=__lowerCAmelCase , max_new_tokens=20 , use_cache=__lowerCAmelCase ) self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
141
"""simple docstring""" lowerCAmelCase__ = { '''A''': ['''B''', '''C''', '''E'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F''', '''G'''], '''D''': ['''B'''], '''E''': ['''A''', '''B''', '''D'''], '''F''': ['''C'''], '''G''': ['''C'''], } def snake_case_ ( A_ : dict, A_ : int, A_ : int ): '''simple docstring''' _lowerCamelCase : List[str] = set() # keep track of all the paths to be checked _lowerCamelCase : str = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue _lowerCamelCase : str = queue.pop(0 ) # get the last node from the path _lowerCamelCase : List[Any] = path[-1] if node not in explored: _lowerCamelCase : Union[str, Any] = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: _lowerCamelCase : Union[str, Any] = list(A_ ) new_path.append(A_ ) queue.append(A_ ) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(A_ ) # in case there's no path between the 2 nodes return [] def snake_case_ ( A_ : dict, A_ : int, A_ : Dict ): '''simple docstring''' if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 _lowerCamelCase : Optional[int] = [start] _lowerCamelCase : int = set(A_ ) # Keep tab on distances from `start` node. _lowerCamelCase : int = {start: 0, target: -1} while queue: _lowerCamelCase : Optional[Any] = queue.pop(0 ) if node == target: _lowerCamelCase : Any = ( dist[node] if dist[target] == -1 else min(dist[target], dist[node] ) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(A_ ) queue.append(A_ ) _lowerCamelCase : Any = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
72
0
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class lowerCamelCase_ ( _lowercase , unittest.TestCase ): lowerCAmelCase__ = KandinskyImgaImgPipeline lowerCAmelCase__ = ["prompt", "image_embeds", "negative_image_embeds", "image"] lowerCAmelCase__ = [ "prompt", "negative_prompt", "image_embeds", "negative_image_embeds", "image", ] lowerCAmelCase__ = [ "generator", "height", "width", "strength", "guidance_scale", "negative_prompt", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] lowerCAmelCase__ = False @property def lowercase_ ( self : List[Any] ): '''simple docstring''' return 32 @property def lowercase_ ( self : Optional[int] ): '''simple docstring''' return 32 @property def lowercase_ ( self : List[Any] ): '''simple docstring''' return self.time_input_dim @property def lowercase_ ( self : Optional[Any] ): '''simple docstring''' return self.time_input_dim * 4 @property def lowercase_ ( self : List[str] ): '''simple docstring''' return 100 @property def lowercase_ ( self : str ): '''simple docstring''' UpperCAmelCase__ : List[Any] = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' ) return tokenizer @property def lowercase_ ( self : str ): '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase__ : Optional[Any] = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , ) UpperCAmelCase__ : Tuple = MultilingualCLIP(__lowerCAmelCase ) UpperCAmelCase__ : Optional[int] = text_encoder.eval() return text_encoder @property def lowercase_ ( self : str ): '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase__ : List[str] = { '''in_channels''': 4, # Out channels is double in channels because predicts mean and variance '''out_channels''': 8, '''addition_embed_type''': '''text_image''', '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''encoder_hid_dim''': self.text_embedder_hidden_size, '''encoder_hid_dim_type''': '''text_image_proj''', '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': None, } UpperCAmelCase__ : List[Any] = UNetaDConditionModel(**__lowerCAmelCase ) return model @property def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def lowercase_ ( self : List[str] ): '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase__ : Optional[Any] = VQModel(**self.dummy_movq_kwargs ) return model def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Dict = self.dummy_text_encoder UpperCAmelCase__ : Tuple = self.dummy_tokenizer UpperCAmelCase__ : Dict = self.dummy_unet UpperCAmelCase__ : int = self.dummy_movq UpperCAmelCase__ : Any = { '''num_train_timesteps''': 1_000, '''beta_schedule''': '''linear''', '''beta_start''': 0.0_0_0_8_5, '''beta_end''': 0.0_1_2, '''clip_sample''': False, '''set_alpha_to_one''': False, '''steps_offset''': 0, '''prediction_type''': '''epsilon''', '''thresholding''': False, } UpperCAmelCase__ : Optional[Any] = DDIMScheduler(**__lowerCAmelCase ) UpperCAmelCase__ : List[str] = { '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''unet''': unet, '''scheduler''': scheduler, '''movq''': movq, } return components def lowercase_ ( self : str , _A : Any , _A : int=0 ): '''simple docstring''' UpperCAmelCase__ : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase ) UpperCAmelCase__ : Optional[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__lowerCAmelCase ) # create init_image UpperCAmelCase__ : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase ) UpperCAmelCase__ : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase__ : Dict = Image.fromarray(np.uinta(__lowerCAmelCase ) ).convert('''RGB''' ).resize((256, 256) ) if str(__lowerCAmelCase ).startswith('''mps''' ): UpperCAmelCase__ : Dict = torch.manual_seed(__lowerCAmelCase ) else: UpperCAmelCase__ : int = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase ) UpperCAmelCase__ : Tuple = { '''prompt''': '''horse''', '''image''': init_image, '''image_embeds''': image_embeds, '''negative_image_embeds''': negative_image_embeds, '''generator''': generator, '''height''': 64, '''width''': 64, '''num_inference_steps''': 10, '''guidance_scale''': 7.0, '''strength''': 0.2, '''output_type''': '''np''', } return inputs def lowercase_ ( self : str ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = '''cpu''' UpperCAmelCase__ : List[Any] = self.get_dummy_components() UpperCAmelCase__ : Optional[int] = self.pipeline_class(**__lowerCAmelCase ) UpperCAmelCase__ : Optional[Any] = pipe.to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) UpperCAmelCase__ : Dict = pipe(**self.get_dummy_inputs(__lowerCAmelCase ) ) UpperCAmelCase__ : Optional[int] = output.images UpperCAmelCase__ : int = pipe( **self.get_dummy_inputs(__lowerCAmelCase ) , return_dict=__lowerCAmelCase , )[0] UpperCAmelCase__ : List[Any] = image[0, -3:, -3:, -1] UpperCAmelCase__ : List[str] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCAmelCase__ : List[Any] = np.array( [0.6_1_4_7_4_9_4_3, 0.6_0_7_3_5_3_9, 0.4_3_3_0_8_5_4_4, 0.5_9_2_8_2_6_9, 0.4_7_4_9_3_5_9_5, 0.4_6_7_5_5_9_7_3, 0.4_6_1_3_8_3_8, 0.4_5_3_6_8_7_9_7, 0.5_0_1_1_9_2_3_3] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" @slow @require_torch_gpu class lowerCamelCase_ ( unittest.TestCase ): def lowercase_ ( self : List[str] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Any = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/kandinsky_img2img_frog.npy''' ) UpperCAmelCase__ : Dict = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' ) UpperCAmelCase__ : List[Any] = '''A red cartoon frog, 4k''' UpperCAmelCase__ : int = KandinskyPriorPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa ) pipe_prior.to(__lowerCAmelCase ) UpperCAmelCase__ : List[Any] = KandinskyImgaImgPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa ) UpperCAmelCase__ : Optional[Any] = pipeline.to(__lowerCAmelCase ) pipeline.set_progress_bar_config(disable=__lowerCAmelCase ) UpperCAmelCase__ : Dict = torch.Generator(device='''cpu''' ).manual_seed(0 ) UpperCAmelCase__ : List[str] = pipe_prior( __lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple() UpperCAmelCase__ : List[Any] = pipeline( __lowerCAmelCase , image=__lowerCAmelCase , image_embeds=__lowerCAmelCase , negative_image_embeds=__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , ) UpperCAmelCase__ : List[str] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase )
181
"""simple docstring""" import argparse import torch from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert from transformers.utils import logging logging.set_verbosity_info() def snake_case_ ( A_ : Tuple, A_ : int, A_ : Dict ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = LxmertConfig.from_json_file(A_ ) print(F'''Building PyTorch model from configuration: {config}''' ) _lowerCamelCase : List[str] = LxmertForPreTraining(A_ ) # Load weights from tf checkpoint load_tf_weights_in_lxmert(A_, A_, A_ ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict(), A_ ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowerCAmelCase__ = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
72
0
'''simple docstring''' import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from tensorflow.python.eager import context from tensorflow.python.framework import ops from transformers import GradientAccumulator, create_optimizer @require_tf class a_ ( unittest.TestCase ): def lowercase__ ( self : Optional[int] , lowercase : Tuple , lowercase : List[str] , lowercase : Optional[int] ): """simple docstring""" self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) ) for a, b in zip(__lowerCAmelCase , __lowerCAmelCase ): self.assertAlmostEqual(__lowerCAmelCase , __lowerCAmelCase , delta=__lowerCAmelCase ) def lowercase__ ( self : Optional[Any] ): """simple docstring""" lowercase_ :List[str] = GradientAccumulator() accumulator([tf.constant([1.0, 2.0] )] ) accumulator([tf.constant([-2.0, 1.0] )] ) accumulator([tf.constant([-1.0, 2.0] )] ) with self.assertRaises(__lowerCAmelCase ): accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] ) self.assertEqual(accumulator.step , 3 ) self.assertEqual(len(accumulator.gradients ) , 1 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 ) def lowercase__ ( self : List[str] ): """simple docstring""" lowercase_ :Union[str, Any] = None ops.enable_eager_execution_internal() lowercase_ :List[Any] = tf.config.list_physical_devices("CPU" ) if len(__lowerCAmelCase ) == 1: tf.config.set_logical_device_configuration( physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] ) lowercase_ :str = tf.config.list_logical_devices(device_type="CPU" ) lowercase_ :Union[str, Any] = tf.distribute.MirroredStrategy(devices=devices[:2] ) with strategy.scope(): lowercase_ :Union[str, Any] = GradientAccumulator() lowercase_ :Dict = tf.Variable([4.0, 3.0] ) lowercase_ :Tuple = create_optimizer(5e-5 , 10 , 5 ) lowercase_ :Tuple = tf.Variable([0.0, 0.0] , trainable=__lowerCAmelCase ) def accumulate_on_replica(lowercase : Optional[int] ): accumulator([gradient] ) def apply_on_replica(): optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) ) @tf.function def accumulate(lowercase : Dict , lowercase : List[Any] ): with strategy.scope(): lowercase_ :Optional[int] = strategy.experimental_local_results(__lowerCAmelCase ) local_variables[0].assign(__lowerCAmelCase ) local_variables[1].assign(__lowerCAmelCase ) strategy.run(__lowerCAmelCase , args=(gradient_placeholder,) ) @tf.function def apply_grad(): with strategy.scope(): strategy.run(__lowerCAmelCase ) def _check_local_values(lowercase : List[str] , lowercase : List[str] ): lowercase_ :str = strategy.experimental_local_results(accumulator._gradients[0] ) self.assertListAlmostEqual(values[0].value() , __lowerCAmelCase , tol=1e-2 ) self.assertListAlmostEqual(values[1].value() , __lowerCAmelCase , tol=1e-2 ) accumulate([1.0, 2.0] , [-1.0, 1.0] ) accumulate([3.0, -1.0] , [-1.0, -1.0] ) accumulate([-2.0, 2.0] , [3.0, -2.0] ) self.assertEqual(accumulator.step , 3 ) _check_local_values([2.0, 3.0] , [1.0, -2.0] ) apply_grad() self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) _check_local_values([0.0, 0.0] , [0.0, 0.0] )
223
"""simple docstring""" from collections import defaultdict from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def snake_case_ ( ): '''simple docstring''' _lowerCamelCase , _lowerCamelCase : int = 9, 14 # noqa: F841 _lowerCamelCase : List[Any] = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] _lowerCamelCase : Any = defaultdict(A_ ) for nodea, nodea, cost in edges: adjancency[nodea].append([nodea, cost] ) adjancency[nodea].append([nodea, cost] ) _lowerCamelCase : List[str] = mst(A_ ) _lowerCamelCase : Optional[int] = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] for answer in expected: _lowerCamelCase : int = tuple(answer[:2] ) _lowerCamelCase : int = tuple(edge[::-1] ) assert edge in result or reverse in result
72
0
"""simple docstring""" lowerCamelCase__ : Any = { '''A''': ['''B''', '''C''', '''E'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F''', '''G'''], '''D''': ['''B'''], '''E''': ['''A''', '''B''', '''D'''], '''F''': ['''C'''], '''G''': ['''C'''], } def UpperCamelCase ( _lowerCAmelCase : dict, _lowerCAmelCase : int, _lowerCAmelCase : int ) -> Optional[int]: _UpperCAmelCase : List[str] = set() # keep track of all the paths to be checked _UpperCAmelCase : str = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue _UpperCAmelCase : str = queue.pop(0 ) # get the last node from the path _UpperCAmelCase : List[Any] = path[-1] if node not in explored: _UpperCAmelCase : Union[str, Any] = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: _UpperCAmelCase : Union[str, Any] = list(A_ ) new_path.append(A_ ) queue.append(A_ ) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(A_ ) # in case there's no path between the 2 nodes return [] def UpperCamelCase ( _lowerCAmelCase : dict, _lowerCAmelCase : int, _lowerCAmelCase : Dict ) -> List[Any]: if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 _UpperCAmelCase : Optional[int] = [start] _UpperCAmelCase : int = set(A_ ) # Keep tab on distances from `start` node. _UpperCAmelCase : int = {start: 0, target: -1} while queue: _UpperCAmelCase : Optional[Any] = queue.pop(0 ) if node == target: _UpperCAmelCase : Any = ( dist[node] if dist[target] == -1 else min(dist[target], dist[node] ) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(A_ ) queue.append(A_ ) _UpperCAmelCase : Any = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
246
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bart import BartTokenizer lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} # See all BART models at https://huggingface.co/models?filter=bart lowerCAmelCase__ = { '''vocab_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''', }, '''merges_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json''', }, } lowerCAmelCase__ = { '''facebook/bart-base''': 1024, '''facebook/bart-large''': 1024, '''facebook/bart-large-mnli''': 1024, '''facebook/bart-large-cnn''': 1024, '''facebook/bart-large-xsum''': 1024, '''yjernite/bart_eli5''': 1024, } class __snake_case ( _lowercase): snake_case__ : Any = VOCAB_FILES_NAMES snake_case__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP snake_case__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case__ : Optional[int] = ["input_ids", "attention_mask"] snake_case__ : Any = BartTokenizer def __init__( self : int , __lowerCAmelCase : Dict=None , __lowerCAmelCase : int=None , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[Any]="replace" , __lowerCAmelCase : Any="<s>" , __lowerCAmelCase : Optional[int]="</s>" , __lowerCAmelCase : str="</s>" , __lowerCAmelCase : Dict="<s>" , __lowerCAmelCase : Union[str, Any]="<unk>" , __lowerCAmelCase : Any="<pad>" , __lowerCAmelCase : Optional[Any]="<mask>" , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Dict=True , **__lowerCAmelCase : Optional[Any] , ): """simple docstring""" super().__init__( __lowerCAmelCase , __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , errors=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase , **__lowerCAmelCase , ) _lowerCamelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , __lowerCAmelCase ) != add_prefix_space: _lowerCamelCase : Dict = getattr(__lowerCAmelCase , pre_tok_state.pop('''type''' ) ) _lowerCamelCase : Any = add_prefix_space _lowerCamelCase : int = pre_tok_class(**__lowerCAmelCase ) _lowerCamelCase : Optional[int] = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` _lowerCamelCase : List[str] = '''post_processor''' _lowerCamelCase : List[str] = getattr(self.backend_tokenizer , __lowerCAmelCase , __lowerCAmelCase ) if tokenizer_component_instance: _lowerCamelCase : int = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: _lowerCamelCase : Tuple = tuple(state['''sep'''] ) if "cls" in state: _lowerCamelCase : int = tuple(state['''cls'''] ) _lowerCamelCase : Union[str, Any] = False if state.get('''add_prefix_space''' , __lowerCAmelCase ) != add_prefix_space: _lowerCamelCase : Dict = add_prefix_space _lowerCamelCase : Optional[Any] = True if state.get('''trim_offsets''' , __lowerCAmelCase ) != trim_offsets: _lowerCamelCase : Any = trim_offsets _lowerCamelCase : str = True if changes_to_apply: _lowerCamelCase : List[str] = getattr(__lowerCAmelCase , state.pop('''type''' ) ) _lowerCamelCase : str = component_class(**__lowerCAmelCase ) setattr(self.backend_tokenizer , __lowerCAmelCase , __lowerCAmelCase ) @property def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" if self._mask_token is None: if self.verbose: logger.error('''Using mask_token, but it is not set yet.''' ) return None return str(self._mask_token ) @mask_token.setter def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : int ): """simple docstring""" _lowerCamelCase : Tuple = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else value _lowerCamelCase : str = value def SCREAMING_SNAKE_CASE ( self : int , *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : List[Any] ): """simple docstring""" _lowerCamelCase : Dict = kwargs.get('''is_split_into_words''' , __lowerCAmelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' '''to use it with pretokenized inputs.''' ) return super()._batch_encode_plus(*__lowerCAmelCase , **__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : Tuple , **__lowerCAmelCase : List[Any] ): """simple docstring""" _lowerCamelCase : Any = kwargs.get('''is_split_into_words''' , __lowerCAmelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' '''to use it with pretokenized inputs.''' ) return super()._encode_plus(*__lowerCAmelCase , **__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ): """simple docstring""" _lowerCamelCase : Tuple = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase ) return tuple(__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str]=None ): """simple docstring""" _lowerCamelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ): """simple docstring""" _lowerCamelCase : List[str] = [self.sep_token_id] _lowerCamelCase : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
72
0
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING __A = logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE ( _lowercase ): '''simple docstring''' lowercase_ = "upernet" def __init__(self : Dict , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Union[str, Any]=512 , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : List[Any]=[1, 2, 3, 6] , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : str=0.4 , UpperCAmelCase_ : Union[str, Any]=384 , UpperCAmelCase_ : Optional[int]=256 , UpperCAmelCase_ : Optional[int]=1 , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : Optional[Any]=255 , **UpperCAmelCase_ : List[str] , ) ->Any: '''simple docstring''' super().__init__(**__lowerCAmelCase) if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.") lowerCamelCase__: Union[str, Any] =CONFIG_MAPPING['''resnet'''](out_features=["stage1", "stage2", "stage3", "stage4"]) elif isinstance(__lowerCAmelCase , __lowerCAmelCase): lowerCamelCase__: Any =backbone_config.get("model_type") lowerCamelCase__: Optional[Any] =CONFIG_MAPPING[backbone_model_type] lowerCamelCase__: int =config_class.from_dict(__lowerCAmelCase) lowerCamelCase__: List[Any] =backbone_config lowerCamelCase__: Tuple =hidden_size lowerCamelCase__: Any =initializer_range lowerCamelCase__: Optional[Any] =pool_scales lowerCamelCase__: List[Any] =use_auxiliary_head lowerCamelCase__: Dict =auxiliary_loss_weight lowerCamelCase__: str =auxiliary_in_channels lowerCamelCase__: Optional[int] =auxiliary_channels lowerCamelCase__: Optional[int] =auxiliary_num_convs lowerCamelCase__: str =auxiliary_concat_input lowerCamelCase__: Dict =loss_ignore_index def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Any: '''simple docstring''' lowerCamelCase__: Tuple =copy.deepcopy(self.__dict__) lowerCamelCase__: Union[str, Any] =self.backbone_config.to_dict() lowerCamelCase__: List[Any] =self.__class__.model_type return output
10
"""simple docstring""" from __future__ import annotations def snake_case_ ( A_ : str ): '''simple docstring''' return [ord(A_ ) - 96 for elem in plain] def snake_case_ ( A_ : list[int] ): '''simple docstring''' return "".join(chr(elem + 96 ) for elem in encoded ) def snake_case_ ( ): '''simple docstring''' _lowerCamelCase : Dict = encode(input('''-> ''' ).strip().lower() ) print('''Encoded: ''', A_ ) print('''Decoded:''', decode(A_ ) ) if __name__ == "__main__": main()
72
0
import unittest from transformers import ( MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TextGenerationPipeline, logging, pipeline, ) from transformers.testing_utils import ( CaptureLogger, is_pipeline_test, require_accelerate, require_tf, require_torch, require_torch_gpu, require_torch_or_tf, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" snake_case_ = MODEL_FOR_CAUSAL_LM_MAPPING snake_case_ = TF_MODEL_FOR_CAUSAL_LM_MAPPING @require_torch def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' __lowerCamelCase = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='pt' ) # Using `do_sample=False` to force deterministic output __lowerCamelCase = text_generator('This is a test' , do_sample=__lowerCAmelCase ) self.assertEqual( __lowerCAmelCase , [ { 'generated_text': ( 'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.' ' oscope. FiliFili@@' ) } ] , ) __lowerCamelCase = text_generator(['This is a test', 'This is a second test'] ) self.assertEqual( __lowerCAmelCase , [ [ { 'generated_text': ( 'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.' ' oscope. FiliFili@@' ) } ], [ { 'generated_text': ( 'This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy' ' oscope. oscope. FiliFili@@' ) } ], ] , ) __lowerCamelCase = text_generator('This is a test' , do_sample=__lowerCAmelCase , num_return_sequences=2 , return_tensors=__lowerCAmelCase ) self.assertEqual( __lowerCAmelCase , [ {'generated_token_ids': ANY(__lowerCAmelCase )}, {'generated_token_ids': ANY(__lowerCAmelCase )}, ] , ) __lowerCamelCase = text_generator.model.config.eos_token_id __lowerCamelCase = '''<pad>''' __lowerCamelCase = text_generator( ['This is a test', 'This is a second test'] , do_sample=__lowerCAmelCase , num_return_sequences=2 , batch_size=2 , return_tensors=__lowerCAmelCase , ) self.assertEqual( __lowerCAmelCase , [ [ {'generated_token_ids': ANY(__lowerCAmelCase )}, {'generated_token_ids': ANY(__lowerCAmelCase )}, ], [ {'generated_token_ids': ANY(__lowerCAmelCase )}, {'generated_token_ids': ANY(__lowerCAmelCase )}, ], ] , ) @require_tf def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='tf' ) # Using `do_sample=False` to force deterministic output __lowerCamelCase = text_generator('This is a test' , do_sample=__lowerCAmelCase ) self.assertEqual( __lowerCAmelCase , [ { 'generated_text': ( 'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵' ' please,' ) } ] , ) __lowerCamelCase = text_generator(['This is a test', 'This is a second test'] , do_sample=__lowerCAmelCase ) self.assertEqual( __lowerCAmelCase , [ [ { 'generated_text': ( 'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵' ' please,' ) } ], [ { 'generated_text': ( 'This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes' ' Cannes 閲閲Cannes Cannes Cannes 攵 please,' ) } ], ] , ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = TextGenerationPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase ) return text_generator, ["This is a test", "Another test"] def lowercase_ ( self ) -> Tuple: '''simple docstring''' __lowerCamelCase = '''Hello I believe in''' __lowerCamelCase = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' ) __lowerCamelCase = text_generator(__lowerCAmelCase ) self.assertEqual( __lowerCAmelCase , [{'generated_text': 'Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'}] , ) __lowerCamelCase = text_generator(__lowerCAmelCase , stop_sequence=' fe' ) self.assertEqual(__lowerCAmelCase , [{'generated_text': 'Hello I believe in fe'}] ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]: '''simple docstring''' __lowerCamelCase = text_generator.model __lowerCamelCase = text_generator.tokenizer __lowerCamelCase = text_generator('This is a test' ) self.assertEqual(__lowerCAmelCase , [{'generated_text': ANY(__lowerCAmelCase )}] ) self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) ) __lowerCamelCase = text_generator('This is a test' , return_full_text=__lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , [{'generated_text': ANY(__lowerCAmelCase )}] ) self.assertNotIn('This is a test' , outputs[0]['generated_text'] ) __lowerCamelCase = pipeline(task='text-generation' , model=__lowerCAmelCase , tokenizer=__lowerCAmelCase , return_full_text=__lowerCAmelCase ) __lowerCamelCase = text_generator('This is a test' ) self.assertEqual(__lowerCAmelCase , [{'generated_text': ANY(__lowerCAmelCase )}] ) self.assertNotIn('This is a test' , outputs[0]['generated_text'] ) __lowerCamelCase = text_generator('This is a test' , return_full_text=__lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , [{'generated_text': ANY(__lowerCAmelCase )}] ) self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) ) __lowerCamelCase = text_generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=__lowerCAmelCase ) self.assertEqual( __lowerCAmelCase , [ [{'generated_text': ANY(__lowerCAmelCase )}, {'generated_text': ANY(__lowerCAmelCase )}], [{'generated_text': ANY(__lowerCAmelCase )}, {'generated_text': ANY(__lowerCAmelCase )}], ] , ) if text_generator.tokenizer.pad_token is not None: __lowerCamelCase = text_generator( ['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=__lowerCAmelCase ) self.assertEqual( __lowerCAmelCase , [ [{'generated_text': ANY(__lowerCAmelCase )}, {'generated_text': ANY(__lowerCAmelCase )}], [{'generated_text': ANY(__lowerCAmelCase )}, {'generated_text': ANY(__lowerCAmelCase )}], ] , ) with self.assertRaises(__lowerCAmelCase ): __lowerCamelCase = text_generator('test' , return_full_text=__lowerCAmelCase , return_text=__lowerCAmelCase ) with self.assertRaises(__lowerCAmelCase ): __lowerCamelCase = text_generator('test' , return_full_text=__lowerCAmelCase , return_tensors=__lowerCAmelCase ) with self.assertRaises(__lowerCAmelCase ): __lowerCamelCase = text_generator('test' , return_text=__lowerCAmelCase , return_tensors=__lowerCAmelCase ) # Empty prompt is slighly special # it requires BOS token to exist. # Special case for Pegasus which will always append EOS so will # work even without BOS. if ( text_generator.tokenizer.bos_token_id is not None or "Pegasus" in tokenizer.__class__.__name__ or "Git" in model.__class__.__name__ ): __lowerCamelCase = text_generator('' ) self.assertEqual(__lowerCAmelCase , [{'generated_text': ANY(__lowerCAmelCase )}] ) else: with self.assertRaises((ValueError, AssertionError) ): __lowerCamelCase = text_generator('' ) if text_generator.framework == "tf": # TF generation does not support max_new_tokens, and it's impossible # to control long generation with only max_length without # fancy calculation, dismissing tests for now. return # We don't care about infinite range models. # They already work. # Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly. __lowerCamelCase = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM'''] if ( tokenizer.model_max_length < 10_000 and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS ): # Handling of large generations with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ): text_generator('This is a test' * 500 , max_new_tokens=20 ) __lowerCamelCase = text_generator('This is a test' * 500 , handle_long_generation='hole' , max_new_tokens=20 ) # Hole strategy cannot work with self.assertRaises(__lowerCAmelCase ): text_generator( 'This is a test' * 500 , handle_long_generation='hole' , max_new_tokens=tokenizer.model_max_length + 10 , ) @require_torch @require_accelerate @require_torch_gpu def lowercase_ ( self ) -> str: '''simple docstring''' import torch # Classic `model_kwargs` __lowerCamelCase = pipeline( model='hf-internal-testing/tiny-random-bloom' , model_kwargs={'device_map': 'auto', 'torch_dtype': torch.bfloataa} , ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) __lowerCamelCase = pipe('This is a test' ) self.assertEqual( __lowerCAmelCase , [ { 'generated_text': ( 'This is a test test test test test test test test test test test test test test test test' ' test' ) } ] , ) # Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.) __lowerCamelCase = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.bfloataa ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) __lowerCamelCase = pipe('This is a test' ) self.assertEqual( __lowerCAmelCase , [ { 'generated_text': ( 'This is a test test test test test test test test test test test test test test test test' ' test' ) } ] , ) # torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602 __lowerCamelCase = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa ) __lowerCamelCase = pipe('This is a test' ) self.assertEqual( __lowerCAmelCase , [ { 'generated_text': ( 'This is a test test test test test test test test test test test test test test test test' ' test' ) } ] , ) @require_torch @require_torch_gpu def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' import torch __lowerCamelCase = pipeline(model='hf-internal-testing/tiny-random-bloom' , device=0 , torch_dtype=torch.floataa ) pipe('This is a test' ) @require_torch @require_accelerate @require_torch_gpu def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' import torch __lowerCamelCase = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.floataa ) pipe('This is a test' , do_sample=__lowerCAmelCase , top_p=0.5 ) def lowercase_ ( self ) -> int: '''simple docstring''' __lowerCamelCase = '''Hello world''' __lowerCamelCase = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' ) if text_generator.model.framework == "tf": __lowerCamelCase = logging.get_logger('transformers.generation.tf_utils' ) else: __lowerCamelCase = logging.get_logger('transformers.generation.utils' ) __lowerCamelCase = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test # Both are set by the user -> log warning with CaptureLogger(__lowerCAmelCase ) as cl: __lowerCamelCase = text_generator(__lowerCAmelCase , max_length=10 , max_new_tokens=1 ) self.assertIn(__lowerCAmelCase , cl.out ) # The user only sets one -> no warning with CaptureLogger(__lowerCAmelCase ) as cl: __lowerCamelCase = text_generator(__lowerCAmelCase , max_new_tokens=1 ) self.assertNotIn(__lowerCAmelCase , cl.out ) with CaptureLogger(__lowerCAmelCase ) as cl: __lowerCamelCase = text_generator(__lowerCAmelCase , max_length=10 ) self.assertNotIn(__lowerCAmelCase , cl.out )
90
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase__ = { '''configuration_lxmert''': ['''LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LxmertConfig'''], '''tokenization_lxmert''': ['''LxmertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['''LxmertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''LxmertEncoder''', '''LxmertForPreTraining''', '''LxmertForQuestionAnswering''', '''LxmertModel''', '''LxmertPreTrainedModel''', '''LxmertVisualFeatureEncoder''', '''LxmertXLayer''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFLxmertForPreTraining''', '''TFLxmertMainLayer''', '''TFLxmertModel''', '''TFLxmertPreTrainedModel''', '''TFLxmertVisualFeatureEncoder''', ] if TYPE_CHECKING: from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig from .tokenization_lxmert import LxmertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_lxmert_fast import LxmertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_lxmert import ( LxmertEncoder, LxmertForPreTraining, LxmertForQuestionAnswering, LxmertModel, LxmertPreTrainedModel, LxmertVisualFeatureEncoder, LxmertXLayer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_lxmert import ( TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFLxmertForPreTraining, TFLxmertMainLayer, TFLxmertModel, TFLxmertPreTrainedModel, TFLxmertVisualFeatureEncoder, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
72
0
import os from argparse import ArgumentParser, Namespace from ..data import SingleSentenceClassificationProcessor as Processor from ..pipelines import TextClassificationPipeline from ..utils import is_tf_available, is_torch_available, logging from . import BaseTransformersCLICommand if not is_tf_available() and not is_torch_available(): raise RuntimeError('At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training') # TF training parameters a__: List[str] = False a__: List[Any] = False def UpperCamelCase__( UpperCamelCase__ : Namespace )->Optional[Any]: return TrainCommand(A_ ) class SCREAMING_SNAKE_CASE__ ( _lowercase ): @staticmethod def UpperCamelCase ( __lowerCamelCase ): A__ = parser.add_parser('''train''',help='''CLI tool to train a model on a task.''' ) train_parser.add_argument( '''--train_data''',type=__lowerCAmelCase,required=__lowerCAmelCase,help='''path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.''',) train_parser.add_argument( '''--column_label''',type=__lowerCAmelCase,default=0,help='''Column of the dataset csv file with example labels.''' ) train_parser.add_argument( '''--column_text''',type=__lowerCAmelCase,default=1,help='''Column of the dataset csv file with example texts.''' ) train_parser.add_argument( '''--column_id''',type=__lowerCAmelCase,default=2,help='''Column of the dataset csv file with example ids.''' ) train_parser.add_argument( '''--skip_first_row''',action='''store_true''',help='''Skip the first row of the csv file (headers).''' ) train_parser.add_argument('''--validation_data''',type=__lowerCAmelCase,default='''''',help='''path to validation dataset.''' ) train_parser.add_argument( '''--validation_split''',type=__lowerCAmelCase,default=0.1,help='''if validation dataset is not provided, fraction of train dataset to use as validation dataset.''',) train_parser.add_argument('''--output''',type=__lowerCAmelCase,default='''./''',help='''path to saved the trained model.''' ) train_parser.add_argument( '''--task''',type=__lowerCAmelCase,default='''text_classification''',help='''Task to train the model on.''' ) train_parser.add_argument( '''--model''',type=__lowerCAmelCase,default='''bert-base-uncased''',help='''Model\'s name or path to stored model.''' ) train_parser.add_argument('''--train_batch_size''',type=__lowerCAmelCase,default=32,help='''Batch size for training.''' ) train_parser.add_argument('''--valid_batch_size''',type=__lowerCAmelCase,default=64,help='''Batch size for validation.''' ) train_parser.add_argument('''--learning_rate''',type=__lowerCAmelCase,default=3E-5,help='''Learning rate.''' ) train_parser.add_argument('''--adam_epsilon''',type=__lowerCAmelCase,default=1E-08,help='''Epsilon for Adam optimizer.''' ) train_parser.set_defaults(func=__lowerCAmelCase ) def __init__( self,__lowerCamelCase ): A__ = logging.get_logger('''transformers-cli/training''' ) A__ = '''tf''' if is_tf_available() else '''torch''' os.makedirs(args.output,exist_ok=__lowerCAmelCase ) A__ = args.output A__ = args.column_label A__ = args.column_text A__ = args.column_id self.logger.info(f"Loading {args.task} pipeline for {args.model}" ) if args.task == "text_classification": A__ = TextClassificationPipeline.from_pretrained(args.model ) elif args.task == "token_classification": raise NotImplementedError elif args.task == "question_answering": raise NotImplementedError self.logger.info(f"Loading dataset from {args.train_data}" ) A__ = Processor.create_from_csv( args.train_data,column_label=args.column_label,column_text=args.column_text,column_id=args.column_id,skip_first_row=args.skip_first_row,) A__ = None if args.validation_data: self.logger.info(f"Loading validation dataset from {args.validation_data}" ) A__ = Processor.create_from_csv( args.validation_data,column_label=args.column_label,column_text=args.column_text,column_id=args.column_id,skip_first_row=args.skip_first_row,) A__ = args.validation_split A__ = args.train_batch_size A__ = args.valid_batch_size A__ = args.learning_rate A__ = args.adam_epsilon def UpperCamelCase ( self ): if self.framework == "tf": return self.run_tf() return self.run_torch() def UpperCamelCase ( self ): raise NotImplementedError def UpperCamelCase ( self ): self.pipeline.fit( self.train_dataset,validation_data=self.valid_dataset,validation_split=self.validation_split,learning_rate=self.learning_rate,adam_epsilon=self.adam_epsilon,train_batch_size=self.train_batch_size,valid_batch_size=self.valid_batch_size,) # Save trained pipeline self.pipeline.save_pretrained(self.output )
193
"""simple docstring""" def snake_case_ ( A_ : list[list[float]] ): '''simple docstring''' _lowerCamelCase : list[list[float]] = [] for data in source_data: for i, el in enumerate(A_ ): if len(A_ ) < i + 1: data_lists.append([] ) data_lists[i].append(float(A_ ) ) return data_lists def snake_case_ ( A_ : list[list[float]], A_ : list[int] ): '''simple docstring''' _lowerCamelCase : list[list[float]] = [] for dlist, weight in zip(A_, A_ ): _lowerCamelCase : Any = min(A_ ) _lowerCamelCase : Optional[Any] = max(A_ ) _lowerCamelCase : list[float] = [] # for weight 0 score is 1 - actual score if weight == 0: for item in dlist: try: score.append(1 - ((item - mind) / (maxd - mind)) ) except ZeroDivisionError: score.append(1 ) elif weight == 1: for item in dlist: try: score.append((item - mind) / (maxd - mind) ) except ZeroDivisionError: score.append(0 ) # weight not 0 or 1 else: _lowerCamelCase : str = F'''Invalid weight of {weight:f} provided''' raise ValueError(A_ ) score_lists.append(A_ ) return score_lists def snake_case_ ( A_ : list[list[float]] ): '''simple docstring''' _lowerCamelCase : list[float] = [0 for i in range(len(score_lists[0] ) )] for slist in score_lists: for j, ele in enumerate(A_ ): _lowerCamelCase : List[str] = final_scores[j] + ele return final_scores def snake_case_ ( A_ : list[list[float]], A_ : list[int] ): '''simple docstring''' _lowerCamelCase : Tuple = get_data(A_ ) _lowerCamelCase : Optional[Any] = calculate_each_score(A_, A_ ) _lowerCamelCase : str = generate_final_scores(A_ ) # append scores to source data for i, ele in enumerate(A_ ): source_data[i].append(A_ ) return source_data
72
0
import itertools import random import unittest import numpy as np from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor from transformers.testing_utils import require_torch, slow from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin _a = random.Random() def _a ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str]=1.0 , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : List[str]=None ) -> int: """simple docstring""" if rng is None: __lowerCAmelCase: Union[str, Any] = global_rng __lowerCAmelCase: Union[str, Any] = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class A_ ( unittest.TestCase ): def __init__( self : str , UpperCAmelCase : List[Any] , UpperCAmelCase : Dict=7 , UpperCAmelCase : str=4_0_0 , UpperCAmelCase : Optional[Any]=2_0_0_0 , UpperCAmelCase : str=1 , UpperCAmelCase : List[str]=0.0 , UpperCAmelCase : Dict=1_6_0_0_0 , UpperCAmelCase : List[str]=True , UpperCAmelCase : str=True , ) -> Optional[int]: __lowerCAmelCase: int = parent __lowerCAmelCase: Dict = batch_size __lowerCAmelCase: str = min_seq_length __lowerCAmelCase: List[str] = max_seq_length __lowerCAmelCase: Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) __lowerCAmelCase: List[Any] = feature_size __lowerCAmelCase: Dict = padding_value __lowerCAmelCase: int = sampling_rate __lowerCAmelCase: int = return_attention_mask __lowerCAmelCase: List[Any] = do_normalize def UpperCAmelCase ( self : Any ) -> Any: return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : Union[str, Any]=False , UpperCAmelCase : Tuple=False ) -> Tuple: def _flatten(UpperCAmelCase : int ): return list(itertools.chain(*__lowerCAmelCase ) ) if equal_length: __lowerCAmelCase: str = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size __lowerCAmelCase: Optional[Any] = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: __lowerCAmelCase: Union[str, Any] = [np.asarray(__lowerCAmelCase ) for x in speech_inputs] return speech_inputs class A_ ( _lowercase , unittest.TestCase ): _lowercase : Tuple = WavaVecaFeatureExtractor def UpperCAmelCase ( self : int ) -> str: __lowerCAmelCase: Any = WavaVecaFeatureExtractionTester(self ) def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : int ) -> Optional[int]: self.assertTrue(np.all(np.mean(__lowerCAmelCase , axis=0 ) < 1E-3 ) ) self.assertTrue(np.all(np.abs(np.var(__lowerCAmelCase , axis=0 ) - 1 ) < 1E-3 ) ) def UpperCAmelCase ( self : List[Any] ) -> Tuple: __lowerCAmelCase: Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 __lowerCAmelCase: int = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )] __lowerCAmelCase: int = [np.asarray(__lowerCAmelCase ) for speech_input in speech_inputs] # Test not batched input __lowerCAmelCase: List[Any] = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values __lowerCAmelCase: Optional[Any] = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) ) # Test batched __lowerCAmelCase: List[Any] = feat_extract(__lowerCAmelCase , return_tensors='np' ).input_values __lowerCAmelCase: Optional[Any] = feat_extract(__lowerCAmelCase , return_tensors='np' ).input_values for enc_seq_a, enc_seq_a in zip(__lowerCAmelCase , __lowerCAmelCase ): self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) ) # Test 2-D numpy arrays are batched. __lowerCAmelCase: List[str] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)] __lowerCAmelCase: Optional[int] = np.asarray(__lowerCAmelCase ) __lowerCAmelCase: Tuple = feat_extract(__lowerCAmelCase , return_tensors='np' ).input_values __lowerCAmelCase: Any = feat_extract(__lowerCAmelCase , return_tensors='np' ).input_values for enc_seq_a, enc_seq_a in zip(__lowerCAmelCase , __lowerCAmelCase ): self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) ) def UpperCAmelCase ( self : Tuple ) -> str: __lowerCAmelCase: Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowerCAmelCase: Dict = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )] __lowerCAmelCase: List[str] = ['''longest''', '''max_length''', '''do_not_pad'''] __lowerCAmelCase: Tuple = [None, 1_6_0_0, None] for max_length, padding in zip(__lowerCAmelCase , __lowerCAmelCase ): __lowerCAmelCase: str = feat_extract(__lowerCAmelCase , padding=__lowerCAmelCase , max_length=__lowerCAmelCase , return_tensors='np' ) __lowerCAmelCase: Optional[int] = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:8_0_0] ) self.assertTrue(input_values[0][8_0_0:].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] ) self.assertTrue(input_values[0][1_0_0_0:].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] ) def UpperCAmelCase ( self : str ) -> Optional[Any]: __lowerCAmelCase: str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowerCAmelCase: Dict = range(8_0_0 , 1_4_0_0 , 2_0_0 ) __lowerCAmelCase: Tuple = [floats_list((1, x) )[0] for x in lengths] __lowerCAmelCase: Optional[Any] = ['''longest''', '''max_length''', '''do_not_pad'''] __lowerCAmelCase: str = [None, 1_6_0_0, None] for max_length, padding in zip(__lowerCAmelCase , __lowerCAmelCase ): __lowerCAmelCase: Tuple = feat_extract(__lowerCAmelCase , max_length=__lowerCAmelCase , padding=__lowerCAmelCase ) __lowerCAmelCase: Any = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:8_0_0] ) self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] ) self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] ) def UpperCAmelCase ( self : Dict ) -> Union[str, Any]: __lowerCAmelCase: Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowerCAmelCase: Optional[int] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )] __lowerCAmelCase: str = feat_extract( __lowerCAmelCase , truncation=__lowerCAmelCase , max_length=1_0_0_0 , padding='max_length' , return_tensors='np' ) __lowerCAmelCase: List[Any] = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :8_0_0] ) self._check_zero_mean_unit_variance(input_values[1] ) self._check_zero_mean_unit_variance(input_values[2] ) def UpperCAmelCase ( self : int ) -> List[str]: __lowerCAmelCase: int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowerCAmelCase: List[Any] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )] __lowerCAmelCase: Union[str, Any] = feat_extract( __lowerCAmelCase , truncation=__lowerCAmelCase , max_length=1_0_0_0 , padding='longest' , return_tensors='np' ) __lowerCAmelCase: int = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :8_0_0] ) self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertTrue(input_values.shape == (3, 1_0_0_0) ) __lowerCAmelCase: Union[str, Any] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )] __lowerCAmelCase: List[str] = feat_extract( __lowerCAmelCase , truncation=__lowerCAmelCase , max_length=2_0_0_0 , padding='longest' , return_tensors='np' ) __lowerCAmelCase: Dict = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :8_0_0] ) self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length > longest -> then pad to longest self.assertTrue(input_values.shape == (3, 1_2_0_0) ) @require_torch def UpperCAmelCase ( self : Tuple ) -> List[Any]: import torch __lowerCAmelCase: Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowerCAmelCase: Tuple = np.random.rand(1_0_0 ).astype(np.floataa ) __lowerCAmelCase: Dict = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: __lowerCAmelCase: List[Any] = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) __lowerCAmelCase: List[Any] = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) @slow @require_torch def UpperCAmelCase ( self : List[Any] ) -> Dict: for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST: __lowerCAmelCase: Optional[Any] = WavaVecaConfig.from_pretrained(__lowerCAmelCase ) __lowerCAmelCase: Any = WavaVecaFeatureExtractor.from_pretrained(__lowerCAmelCase ) # only "layer" feature extraction norm should make use of # attention_mask self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == 'layer' )
322
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''microsoft/unispeech-large-1500h-cv''': ( '''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json''' ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech } class __snake_case ( _lowercase): snake_case__ : List[str] = "unispeech" def __init__( self : List[str] , __lowerCAmelCase : List[Any]=3_2 , __lowerCAmelCase : str=7_6_8 , __lowerCAmelCase : int=1_2 , __lowerCAmelCase : int=1_2 , __lowerCAmelCase : int=3_0_7_2 , __lowerCAmelCase : Tuple="gelu" , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : Tuple=0.0 , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : Dict=1E-5 , __lowerCAmelCase : Optional[int]="group" , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : int=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , __lowerCAmelCase : Optional[int]=(5, 2, 2, 2, 2, 2, 2) , __lowerCAmelCase : Union[str, Any]=(1_0, 3, 3, 3, 3, 2, 2) , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : List[str]=1_2_8 , __lowerCAmelCase : Any=1_6 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Union[str, Any]=0.05 , __lowerCAmelCase : Union[str, Any]=1_0 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Dict=0.0 , __lowerCAmelCase : Optional[int]=1_0 , __lowerCAmelCase : Dict=0 , __lowerCAmelCase : List[str]=3_2_0 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Tuple=1_0_0 , __lowerCAmelCase : Dict=2_5_6 , __lowerCAmelCase : str=2_5_6 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Dict="mean" , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Optional[Any]=2_5_6 , __lowerCAmelCase : Dict=8_0 , __lowerCAmelCase : int=0 , __lowerCAmelCase : Optional[int]=1 , __lowerCAmelCase : Dict=2 , __lowerCAmelCase : Any=0.5 , **__lowerCAmelCase : Optional[Any] , ): """simple docstring""" super().__init__(**__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase ) _lowerCamelCase : Dict = hidden_size _lowerCamelCase : Any = feat_extract_norm _lowerCamelCase : List[Any] = feat_extract_activation _lowerCamelCase : Any = list(__lowerCAmelCase ) _lowerCamelCase : Tuple = list(__lowerCAmelCase ) _lowerCamelCase : int = list(__lowerCAmelCase ) _lowerCamelCase : List[str] = conv_bias _lowerCamelCase : List[str] = num_conv_pos_embeddings _lowerCamelCase : Tuple = num_conv_pos_embedding_groups _lowerCamelCase : List[str] = len(self.conv_dim ) _lowerCamelCase : Tuple = num_hidden_layers _lowerCamelCase : List[Any] = intermediate_size _lowerCamelCase : Dict = hidden_act _lowerCamelCase : Union[str, Any] = num_attention_heads _lowerCamelCase : Tuple = hidden_dropout _lowerCamelCase : List[Any] = attention_dropout _lowerCamelCase : Optional[int] = activation_dropout _lowerCamelCase : Optional[Any] = feat_proj_dropout _lowerCamelCase : Optional[int] = final_dropout _lowerCamelCase : Any = layerdrop _lowerCamelCase : Any = layer_norm_eps _lowerCamelCase : List[Any] = initializer_range _lowerCamelCase : List[str] = num_ctc_classes _lowerCamelCase : List[Any] = vocab_size _lowerCamelCase : Optional[Any] = do_stable_layer_norm _lowerCamelCase : Tuple = use_weighted_layer_sum _lowerCamelCase : List[Any] = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _lowerCamelCase : Any = apply_spec_augment _lowerCamelCase : Dict = mask_time_prob _lowerCamelCase : List[str] = mask_time_length _lowerCamelCase : Optional[Any] = mask_time_min_masks _lowerCamelCase : List[str] = mask_feature_prob _lowerCamelCase : int = mask_feature_length _lowerCamelCase : Dict = mask_feature_min_masks # parameters for pretraining with codevector quantized representations _lowerCamelCase : Optional[Any] = num_codevectors_per_group _lowerCamelCase : int = num_codevector_groups _lowerCamelCase : List[Any] = contrastive_logits_temperature _lowerCamelCase : List[str] = feat_quantizer_dropout _lowerCamelCase : Dict = num_negatives _lowerCamelCase : Optional[int] = codevector_dim _lowerCamelCase : List[Any] = proj_codevector_dim _lowerCamelCase : List[Any] = diversity_loss_weight # ctc loss _lowerCamelCase : Union[str, Any] = ctc_loss_reduction _lowerCamelCase : Any = ctc_zero_infinity # pretraining loss _lowerCamelCase : str = replace_prob @property def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
72
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCamelCase : Any = { '''configuration_pegasus_x''': ['''PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PegasusXConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Union[str, Any] = [ '''PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST''', '''PegasusXForConditionalGeneration''', '''PegasusXModel''', '''PegasusXPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pegasus_x import ( PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST, PegasusXForConditionalGeneration, PegasusXModel, PegasusXPreTrainedModel, ) else: import sys __UpperCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
106
"""simple docstring""" from typing import Optional from urllib.parse import quote import huggingface_hub as hfh from packaging import version def snake_case_ ( A_ : str, A_ : str, A_ : Optional[str] = None ): '''simple docstring''' if version.parse(hfh.__version__ ).release < version.parse('''0.11.0''' ).release: # old versions of hfh don't url-encode the file path _lowerCamelCase : Optional[Any] = quote(A_ ) return hfh.hf_hub_url(A_, A_, repo_type='''dataset''', revision=A_ )
72
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { '''sail/poolformer_s12''': '''https://huggingface.co/sail/poolformer_s12/resolve/main/config.json''', # See all PoolFormer models at https://huggingface.co/models?filter=poolformer } class lowerCAmelCase_( _lowercase ): '''simple docstring''' __lowercase : Optional[int] = "poolformer" def __init__( self ,__UpperCAmelCase=3 ,__UpperCAmelCase=16 ,__UpperCAmelCase=16 ,__UpperCAmelCase=3 ,__UpperCAmelCase=4.0 ,__UpperCAmelCase=[2, 2, 6, 2] ,__UpperCAmelCase=[64, 128, 320, 512] ,__UpperCAmelCase=[7, 3, 3, 3] ,__UpperCAmelCase=[4, 2, 2, 2] ,__UpperCAmelCase=[2, 1, 1, 1] ,__UpperCAmelCase=4 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=True ,__UpperCAmelCase=1E-5 ,__UpperCAmelCase=0.0_2 ,**__UpperCAmelCase ,) -> Tuple: lowerCAmelCase__ : int = num_channels lowerCAmelCase__ : Any = patch_size lowerCAmelCase__ : List[str] = stride lowerCAmelCase__ : int = padding lowerCAmelCase__ : Tuple = pool_size lowerCAmelCase__ : List[Any] = hidden_sizes lowerCAmelCase__ : Tuple = mlp_ratio lowerCAmelCase__ : Union[str, Any] = depths lowerCAmelCase__ : Optional[Any] = patch_sizes lowerCAmelCase__ : Dict = strides lowerCAmelCase__ : Optional[int] = num_encoder_blocks lowerCAmelCase__ : Optional[Any] = drop_path_rate lowerCAmelCase__ : Optional[Any] = hidden_act lowerCAmelCase__ : int = use_layer_scale lowerCAmelCase__ : Optional[Any] = layer_scale_init_value lowerCAmelCase__ : Dict = initializer_range super().__init__(**__lowerCAmelCase ) class lowerCAmelCase_( _lowercase ): '''simple docstring''' __lowercase : Any = version.parse('''1.11''' ) @property def UpperCAmelCase_ ( self ) -> Dict: return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def UpperCAmelCase_ ( self ) -> Union[str, Any]: return 2E-3
37
"""simple docstring""" import unittest import numpy as np def snake_case_ ( A_ : np.ndarray, A_ : np.ndarray, A_ : np.ndarray, A_ : np.ndarray | None = None, ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = np.shape(A_ ) _lowerCamelCase : List[str] = np.shape(A_ ) _lowerCamelCase : List[str] = np.shape(A_ ) if shape_a[0] != shape_b[0]: _lowerCamelCase : Tuple = ( '''Expected the same number of rows for A and B. ''' F'''Instead found A of size {shape_a} and B of size {shape_b}''' ) raise ValueError(A_ ) if shape_b[1] != shape_c[1]: _lowerCamelCase : Tuple = ( '''Expected the same number of columns for B and C. ''' F'''Instead found B of size {shape_b} and C of size {shape_c}''' ) raise ValueError(A_ ) _lowerCamelCase : List[str] = pseudo_inv if a_inv is None: try: _lowerCamelCase : Any = np.linalg.inv(A_ ) except np.linalg.LinAlgError: raise ValueError( '''Input matrix A is not invertible. Cannot compute Schur complement.''' ) return mat_c - mat_b.T @ a_inv @ mat_b class __snake_case ( unittest.TestCase): def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _lowerCamelCase : List[str] = np.array([[0, 3], [3, 0], [2, 3]] ) _lowerCamelCase : List[str] = np.array([[2, 1], [6, 3]] ) _lowerCamelCase : List[Any] = schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) _lowerCamelCase : Dict = np.block([[a, b], [b.T, c]] ) _lowerCamelCase : Tuple = np.linalg.det(__lowerCAmelCase ) _lowerCamelCase : List[str] = np.linalg.det(__lowerCAmelCase ) _lowerCamelCase : Any = np.linalg.det(__lowerCAmelCase ) self.assertAlmostEqual(__lowerCAmelCase , det_a * det_s ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" _lowerCamelCase : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _lowerCamelCase : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] ) _lowerCamelCase : int = np.array([[2, 1], [6, 3]] ) with self.assertRaises(__lowerCAmelCase ): schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _lowerCamelCase : List[str] = np.array([[0, 3], [3, 0], [2, 3]] ) _lowerCamelCase : Union[str, Any] = np.array([[2, 1, 3], [6, 3, 5]] ) with self.assertRaises(__lowerCAmelCase ): schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
72
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCamelCase : int = { """configuration_swinv2""": ["""SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Swinv2Config"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Dict = [ """SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST""", """Swinv2ForImageClassification""", """Swinv2ForMaskedImageModeling""", """Swinv2Model""", """Swinv2PreTrainedModel""", ] if TYPE_CHECKING: from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swinva import ( SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST, SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel, SwinvaPreTrainedModel, ) else: import sys _lowerCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
14
"""simple docstring""" def snake_case_ ( A_ : list[int], A_ : str ): '''simple docstring''' _lowerCamelCase : Tuple = int(A_ ) # Initialize Result _lowerCamelCase : Dict = [] # Traverse through all denomination for denomination in reversed(A_ ): # Find denominations while int(A_ ) >= int(A_ ): total_value -= int(A_ ) answer.append(A_ ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": lowerCAmelCase__ = [] lowerCAmelCase__ = '''0''' if ( input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower() == "y" ): lowerCAmelCase__ = int(input('''Enter the number of denominations you want to add: ''').strip()) for i in range(0, n): denominations.append(int(input(F"""Denomination {i}: """).strip())) lowerCAmelCase__ = input('''Enter the change you want to make in Indian Currency: ''').strip() else: # All denominations of Indian Currency if user does not enter lowerCAmelCase__ = [1, 2, 5, 10, 20, 50, 100, 500, 2000] lowerCAmelCase__ = input('''Enter the change you want to make: ''').strip() if int(value) == 0 or int(value) < 0: print('''The total value cannot be zero or negative.''') else: print(F"""Following is minimal change for {value}: """) lowerCAmelCase__ = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=''' ''')
72
0
'''simple docstring''' from __future__ import annotations def __UpperCamelCase ( lowercase__ : list[int] ): '''simple docstring''' if len(A_ ) == 0: return array __lowercase =min(A_ ), max(A_ ) # Compute the variables __lowercase =_max - _min + 1 __lowercase =[0] * holes_range, [0] * holes_range # Make the sorting. for i in array: __lowercase =i - _min __lowercase =i holes_repeat[index] += 1 # Makes the array back by replacing the numbers. __lowercase =0 for i in range(A_ ): while holes_repeat[i] > 0: __lowercase =holes[i] index += 1 holes_repeat[i] -= 1 # Returns the sorted array. return array if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase = input('''Enter numbers separated by comma:\n''') UpperCAmelCase = [int(x) for x in user_input.split(''',''')] print(pigeon_sort(unsorted))
141
"""simple docstring""" def snake_case_ ( A_ : int = 2_00_00_00 ): '''simple docstring''' _lowerCamelCase : int = [0 for i in range(n + 1 )] _lowerCamelCase : List[str] = 1 _lowerCamelCase : Any = 1 for i in range(2, int(n**0.5 ) + 1 ): if primality_list[i] == 0: for j in range(i * i, n + 1, A_ ): _lowerCamelCase : str = 1 _lowerCamelCase : Tuple = 0 for i in range(A_ ): if primality_list[i] == 0: sum_of_primes += i return sum_of_primes if __name__ == "__main__": print(F"""{solution() = }""")
72
0
'''simple docstring''' import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class lowerCamelCase_ ( unittest.TestCase ): def lowercase_ ( self : List[str] , _A : int ): '''simple docstring''' UpperCAmelCase__ : Any = 3 UpperCAmelCase__ : str = 250 UpperCAmelCase__ : Optional[Any] = ids_tensor((batch_size, length) , __lowerCAmelCase ) UpperCAmelCase__ : List[str] = torch.ones((batch_size, length) , device=__lowerCAmelCase , dtype=torch.float ) / length return input_ids, scores def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : List[str] = self._get_tensors(5 ) UpperCAmelCase__ : str = StoppingCriteriaList( [ MaxLengthCriteria(max_length=10 ), MaxTimeCriteria(max_time=0.1 ), ] ) self.assertFalse(criteria(__lowerCAmelCase , __lowerCAmelCase ) ) UpperCAmelCase__ : List[Any] = self._get_tensors(9 ) self.assertFalse(criteria(__lowerCAmelCase , __lowerCAmelCase ) ) UpperCAmelCase__ : Any = self._get_tensors(10 ) self.assertTrue(criteria(__lowerCAmelCase , __lowerCAmelCase ) ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = MaxLengthCriteria(max_length=10 ) UpperCAmelCase__ : Optional[int] = self._get_tensors(5 ) self.assertFalse(criteria(__lowerCAmelCase , __lowerCAmelCase ) ) UpperCAmelCase__ : Any = self._get_tensors(9 ) self.assertFalse(criteria(__lowerCAmelCase , __lowerCAmelCase ) ) UpperCAmelCase__ : int = self._get_tensors(10 ) self.assertTrue(criteria(__lowerCAmelCase , __lowerCAmelCase ) ) def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : Tuple = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 ) UpperCAmelCase__ : Tuple = self._get_tensors(5 ) self.assertFalse(criteria(__lowerCAmelCase , __lowerCAmelCase ) ) UpperCAmelCase__ : Tuple = self._get_tensors(9 ) self.assertFalse(criteria(__lowerCAmelCase , __lowerCAmelCase ) ) UpperCAmelCase__ : Any = self._get_tensors(10 ) self.assertTrue(criteria(__lowerCAmelCase , __lowerCAmelCase ) ) UpperCAmelCase__ : str = StoppingCriteriaList([criteria] ) self.assertEqual(criteria_list.max_length , 10 ) def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self._get_tensors(5 ) UpperCAmelCase__ : List[str] = MaxTimeCriteria(max_time=0.1 ) self.assertFalse(criteria(__lowerCAmelCase , __lowerCAmelCase ) ) UpperCAmelCase__ : Dict = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 ) self.assertTrue(criteria(__lowerCAmelCase , __lowerCAmelCase ) ) def lowercase_ ( self : List[str] ): '''simple docstring''' validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 ) with self.assertWarns(__lowerCAmelCase ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 ) UpperCAmelCase__ : Dict = validate_stopping_criteria(StoppingCriteriaList() , 11 ) self.assertEqual(len(__lowerCAmelCase ) , 1 )
181
"""simple docstring""" import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def snake_case_ ( A_ : Any ): '''simple docstring''' _lowerCamelCase : Any = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''_float_tensor''', '''decoder.output_projection.weight''', ] for k in ignore_keys: state_dict.pop(A_, A_ ) def snake_case_ ( A_ : Union[str, Any] ): '''simple docstring''' _lowerCamelCase , _lowerCamelCase : Tuple = emb.weight.shape _lowerCamelCase : Dict = nn.Linear(A_, A_, bias=A_ ) _lowerCamelCase : str = emb.weight.data return lin_layer def snake_case_ ( A_ : str, A_ : Optional[int]="facebook/mbart-large-en-ro", A_ : Union[str, Any]=False, A_ : List[str]=False ): '''simple docstring''' _lowerCamelCase : Tuple = torch.load(A_, map_location='''cpu''' )['''model'''] remove_ignore_keys_(A_ ) _lowerCamelCase : int = state_dict['''encoder.embed_tokens.weight'''].shape[0] _lowerCamelCase : Any = MBartConfig.from_pretrained(A_, vocab_size=A_ ) if mbart_aa and finetuned: _lowerCamelCase : Any = '''relu''' _lowerCamelCase : Optional[int] = state_dict['''decoder.embed_tokens.weight'''] _lowerCamelCase : Any = MBartForConditionalGeneration(A_ ) model.model.load_state_dict(A_ ) if finetuned: _lowerCamelCase : str = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.''' ) parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--hf_config''', default='''facebook/mbart-large-cc25''', type=str, help='''Which huggingface architecture to use: mbart-large''', ) parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''') parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''') lowerCAmelCase__ = parser.parse_args() lowerCAmelCase__ = convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
72
0
'''simple docstring''' import inspect import warnings from typing import Any, Dict, Optional, Union from packaging import version def UpperCAmelCase_ ( *__lowerCamelCase : Optional[Any] ,__lowerCamelCase : Optional[Union[Dict, Any]] = None ,__lowerCamelCase : Optional[int]=True ,__lowerCamelCase : str=2 ): from .. import __version__ lowercase_ :List[Any] = take_from lowercase_ :Union[str, Any] = () if not isinstance(args[0] ,A_ ): lowercase_ :Optional[int] = (args,) for attribute, version_name, message in args: if version.parse(version.parse(A_ ).base_version ) >= version.parse(A_ ): raise ValueError( F'The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\'' F' version {__version__} is >= {version_name}' ) lowercase_ :str = None if isinstance(A_ ,A_ ) and attribute in deprecated_kwargs: values += (deprecated_kwargs.pop(A_ ),) lowercase_ :Dict = F'The `{attribute}` argument is deprecated and will be removed in version {version_name}.' elif hasattr(A_ ,A_ ): values += (getattr(A_ ,A_ ),) lowercase_ :Tuple = F'The `{attribute}` attribute is deprecated and will be removed in version {version_name}.' elif deprecated_kwargs is None: lowercase_ :Optional[int] = F'`{attribute}` is deprecated and will be removed in version {version_name}.' if warning is not None: lowercase_ :Union[str, Any] = warning + ''' ''' if standard_warn else '''''' warnings.warn(warning + message ,A_ ,stacklevel=A_ ) if isinstance(A_ ,A_ ) and len(A_ ) > 0: lowercase_ :int = inspect.getouterframes(inspect.currentframe() )[1] lowercase_ :Dict = call_frame.filename lowercase_ :Union[str, Any] = call_frame.lineno lowercase_ :Any = call_frame.function lowercase_ :List[str] = next(iter(deprecated_kwargs.items() ) ) raise TypeError(F'{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`' ) if len(A_ ) == 0: return elif len(A_ ) == 1: return values[0] return values
223
"""simple docstring""" def snake_case_ ( A_ : list[list] ): '''simple docstring''' _lowerCamelCase : Optional[int] = current_set.copy() for row_index, row in enumerate(A_ ): _lowerCamelCase : Tuple = row[0] for column_index, column in enumerate(A_ ): if magnitude == 0: _lowerCamelCase : List[Any] = column continue _lowerCamelCase : List[Any] = column / magnitude # Subtract to cancel term _lowerCamelCase : Union[str, Any] = current_set[0] _lowerCamelCase : Dict = [first_row] _lowerCamelCase : str = current_set[1::] for row in current_set: _lowerCamelCase : Union[str, Any] = [] # If first term is 0, it is already in form we want, so we preserve it if row[0] == 0: final_set.append(A_ ) continue for column_index in range(len(A_ ) ): temp_row.append(first_row[column_index] - row[column_index] ) final_set.append(A_ ) # Create next recursion iteration set if len(final_set[0] ) != 3: _lowerCamelCase : Any = final_set[0] _lowerCamelCase : Any = [] _lowerCamelCase : Optional[int] = [] for row in final_set[1::]: current_first_column.append(row[0] ) next_iteration.append(row[1::] ) _lowerCamelCase : Dict = simplify(A_ ) for i in range(len(A_ ) ): resultant[i].insert(0, current_first_column[i] ) resultant.insert(0, A_ ) _lowerCamelCase : Tuple = resultant return final_set def snake_case_ ( A_ : list[list] ): '''simple docstring''' if len(A_ ) == 0: raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) _lowerCamelCase : Dict = len(A_ ) + 1 if any(len(A_ ) != _length for item in equations ): raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) for row in equations: if any(not isinstance(A_, (int, float) ) for column in row ): raise ValueError('''solve_simultaneous() requires lists of integers''' ) if len(A_ ) == 1: return [equations[0][-1] / equations[0][0]] _lowerCamelCase : Optional[Any] = equations.copy() if any(0 in row for row in data_set ): _lowerCamelCase : str = data_set.copy() _lowerCamelCase : List[Any] = [] for row_index, row in enumerate(A_ ): if 0 not in row: _lowerCamelCase : Union[str, Any] = data_set.pop(A_ ) break if not full_row: raise ValueError('''solve_simultaneous() requires at least 1 full equation''' ) data_set.insert(0, A_ ) _lowerCamelCase : List[str] = data_set.copy() _lowerCamelCase : int = simplify(A_ ) _lowerCamelCase : int = simplified[::-1] _lowerCamelCase : list = [] for row in simplified: _lowerCamelCase : Tuple = row[-1] if not solutions: if row[-2] == 0: solutions.append(0 ) continue solutions.append(current_solution / row[-2] ) continue _lowerCamelCase : Optional[Any] = row.copy()[: len(A_ ) - 1 :] while temp_row[0] == 0: temp_row.pop(0 ) if len(A_ ) == 0: solutions.append(0 ) continue _lowerCamelCase : Tuple = temp_row[1::] _lowerCamelCase : Tuple = temp_row[::-1] for column_index, column in enumerate(A_ ): current_solution -= column * solutions[column_index] solutions.append(A_ ) _lowerCamelCase : Optional[int] = [] for item in solutions: final.append(float(round(A_, 5 ) ) ) return final[::-1] if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase__ = [ [2, 1, 1, 1, 1, 4], [1, 2, 1, 1, 1, 5], [1, 1, 2, 1, 1, 6], [1, 1, 1, 2, 1, 7], [1, 1, 1, 1, 2, 8], ] print(solve_simultaneous(eq)) print(solve_simultaneous([[4, 2]]))
72
0
"""simple docstring""" from math import isqrt, loga def UpperCamelCase ( _lowerCAmelCase : int ) -> Dict: _UpperCAmelCase : Optional[Any] = [True] * max_number for i in range(2, isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2, A_, A_ ): _UpperCAmelCase : str = False return [i for i in range(2, A_ ) if is_prime[i]] def UpperCamelCase ( _lowerCAmelCase : int = 800800, _lowerCAmelCase : int = 800800 ) -> Tuple: _UpperCAmelCase : Dict = degree * loga(A_ ) _UpperCAmelCase : Any = int(A_ ) _UpperCAmelCase : List[Any] = calculate_prime_numbers(A_ ) _UpperCAmelCase : Dict = 0 _UpperCAmelCase : Dict = 0 _UpperCAmelCase : str = len(A_ ) - 1 while left < right: while ( prime_numbers[right] * loga(prime_numbers[left] ) + prime_numbers[left] * loga(prime_numbers[right] ) > upper_bound ): right -= 1 hybrid_integers_count += right - left left += 1 return hybrid_integers_count if __name__ == "__main__": print(F'''{solution() = }''')
246
"""simple docstring""" import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class __snake_case ( _lowercase): snake_case__ : List[Any] = "Speech2TextFeatureExtractor" snake_case__ : Union[str, Any] = "Speech2TextTokenizer" def __init__( self : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] ): """simple docstring""" super().__init__(__lowerCAmelCase , __lowerCAmelCase ) _lowerCamelCase : List[str] = self.feature_extractor _lowerCamelCase : str = False def __call__( self : List[Any] , *__lowerCAmelCase : int , **__lowerCAmelCase : List[str] ): """simple docstring""" if self._in_target_context_manager: return self.current_processor(*__lowerCAmelCase , **__lowerCAmelCase ) if "raw_speech" in kwargs: warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' ) _lowerCamelCase : str = kwargs.pop('''raw_speech''' ) else: _lowerCamelCase : Tuple = kwargs.pop('''audio''' , __lowerCAmelCase ) _lowerCamelCase : Optional[Any] = kwargs.pop('''sampling_rate''' , __lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = kwargs.pop('''text''' , __lowerCAmelCase ) if len(__lowerCAmelCase ) > 0: _lowerCamelCase : List[Any] = args[0] _lowerCamelCase : int = args[1:] if audio is None and text is None: raise ValueError('''You need to specify either an `audio` or `text` input to process.''' ) if audio is not None: _lowerCamelCase : List[Any] = self.feature_extractor(__lowerCAmelCase , *__lowerCAmelCase , sampling_rate=__lowerCAmelCase , **__lowerCAmelCase ) if text is not None: _lowerCamelCase : List[Any] = self.tokenizer(__lowerCAmelCase , **__lowerCAmelCase ) if text is None: return inputs elif audio is None: return encodings else: _lowerCamelCase : List[str] = encodings['''input_ids'''] return inputs def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : Tuple ): """simple docstring""" return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : int ): """simple docstring""" return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase ) @contextmanager def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" warnings.warn( '''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ''' '''labels by using the argument `text` of the regular `__call__` method (either in the same call as ''' '''your audio inputs, or in a separate call.''' ) _lowerCamelCase : Union[str, Any] = True _lowerCamelCase : Any = self.tokenizer yield _lowerCamelCase : List[str] = self.feature_extractor _lowerCamelCase : Tuple = False
72
0
from typing import Dict from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, get_torch_dist_unique_port, require_torch_multi_gpu, require_torch_neuroncore, ) from transformers.training_args import ParallelMode from transformers.utils import logging __A = logging.get_logger(__name__) if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset from transformers import Trainer class _SCREAMING_SNAKE_CASE ( _lowercase ): '''simple docstring''' def __init__(self : List[Any] , UpperCAmelCase_ : int = 101) ->Dict: '''simple docstring''' lowerCamelCase__: Dict =length def __len__(self : Dict) ->str: '''simple docstring''' return self.length def __getitem__(self : List[Any] , UpperCAmelCase_ : str) ->Optional[Any]: '''simple docstring''' return i class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __call__(self : str , UpperCAmelCase_ : Optional[Any]) ->Optional[int]: '''simple docstring''' return {"input_ids": torch.tensor(__lowerCAmelCase), "labels": torch.tensor(__lowerCAmelCase)} class _SCREAMING_SNAKE_CASE ( nn.Module ): '''simple docstring''' def __init__(self : str) ->Dict: '''simple docstring''' super().__init__() # Add some (unused) params otherwise DDP will complain. lowerCamelCase__: List[Any] =nn.Linear(120 , 80) def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str=None) ->Union[str, Any]: '''simple docstring''' if labels is not None: return torch.tensor(0.0 , device=input_ids.device), input_ids else: return input_ids class _SCREAMING_SNAKE_CASE ( _lowercase ): '''simple docstring''' @require_torch_neuroncore def SCREAMING_SNAKE_CASE_ (self : str) ->Tuple: '''simple docstring''' lowerCamelCase__: Optional[int] =F"""--nproc_per_node=2 --master_port={get_torch_dist_unique_port()} {self.test_file_dir}/test_trainer_distributed.py """.split() lowerCamelCase__: int =self.get_auto_remove_tmp_dir() lowerCamelCase__: Optional[int] =F"""--output_dir {output_dir}""".split() lowerCamelCase__: int =['''torchrun'''] + distributed_args + args execute_subprocess_async(__lowerCAmelCase , env=self.get_env()) # successful return here == success - any errors would have caused an error in the sub-call class _SCREAMING_SNAKE_CASE ( _lowercase ): '''simple docstring''' @require_torch_multi_gpu def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->int: '''simple docstring''' lowerCamelCase__: int =F"""--nproc_per_node={torch.cuda.device_count()} --master_port={get_torch_dist_unique_port()} {self.test_file_dir}/test_trainer_distributed.py """.split() lowerCamelCase__: Optional[int] =self.get_auto_remove_tmp_dir() lowerCamelCase__: Tuple =F"""--output_dir {output_dir}""".split() lowerCamelCase__: str =['''torchrun'''] + distributed_args + args execute_subprocess_async(__lowerCAmelCase , env=self.get_env()) # successful return here == success - any errors would have caused an error in the sub-call if __name__ == "__main__": # The script below is meant to be run under torch.distributed, on a machine with multiple GPUs: # # PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py __A = HfArgumentParser((TrainingArguments,)) __A = parser.parse_args_into_dataclasses()[0] logger.warning( f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, ' f'distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}' ) # Essentially, what we want to verify in the distributed case is that we get all samples back, # in the right order. (this is crucial for prediction for instance) for dataset_length in [101, 40, 7]: __A = DummyDataset(dataset_length) def lowerCAmelCase_ ( __a ) -> List[Any]: """simple docstring""" lowerCamelCase__: str =list(range(len(A_ ) ) ) lowerCamelCase__: List[Any] =p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential if not success and training_args.local_rank == 0: logger.warning( "Predictions and/or labels do not match expected results:\n - predictions: " F"""{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}""" ) return {"success": success} __A = Trainer( model=DummyModel(), args=training_args, data_collator=DummyDataCollator(), eval_dataset=dataset, compute_metrics=compute_metrics, ) __A = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) __A = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) __A = 2 __A = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) __A = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) __A = None
10
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase__ = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['''ReformerTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['''ReformerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ReformerAttention''', '''ReformerForMaskedLM''', '''ReformerForQuestionAnswering''', '''ReformerForSequenceClassification''', '''ReformerLayer''', '''ReformerModel''', '''ReformerModelWithLMHead''', '''ReformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
72
0
import collections import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester from ..vit.test_modeling_flax_vit import FlaxViTModelTester if is_flax_available(): from transformers import ( FlaxBertModel, FlaxCLIPVisionModel, FlaxVisionTextDualEncoderModel, FlaxViTModel, VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import VisionTextDualEncoderModel if is_vision_available(): from PIL import Image def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] ) -> str: """simple docstring""" if isinstance(A_ , collections.abc.Iterable ): return x return (x, x) @require_flax class __lowerCAmelCase : """simple docstring""" def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> str: '''simple docstring''' pass def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' pass def lowercase_ ( self ) -> Dict: '''simple docstring''' pass def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Dict: '''simple docstring''' __lowerCamelCase = np.abs((a - b) ).max() self.assertLessEqual(__lowerCAmelCase , __lowerCAmelCase , f"""Difference between torch and flax is {diff} (>= {tol}).""" ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , **lowerCamelCase__ ) -> List[str]: '''simple docstring''' __lowerCamelCase = VisionTextDualEncoderConfig.from_vision_text_configs(__lowerCAmelCase , __lowerCAmelCase ) __lowerCamelCase = FlaxVisionTextDualEncoderModel(__lowerCAmelCase ) __lowerCamelCase = model(input_ids=__lowerCAmelCase , pixel_values=__lowerCAmelCase , attention_mask=__lowerCAmelCase ) self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , **lowerCamelCase__ ) -> Any: '''simple docstring''' __lowerCamelCase = self.get_vision_text_model(__lowerCAmelCase , __lowerCAmelCase ) __lowerCamelCase = {'''vision_model''': vision_model, '''text_model''': text_model} __lowerCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__lowerCAmelCase ) __lowerCamelCase = model(input_ids=__lowerCAmelCase , pixel_values=__lowerCAmelCase , attention_mask=__lowerCAmelCase ) self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , **lowerCamelCase__ ) -> List[Any]: '''simple docstring''' __lowerCamelCase = self.get_vision_text_model(__lowerCAmelCase , __lowerCAmelCase ) __lowerCamelCase = {'''vision_model''': vision_model, '''text_model''': text_model} __lowerCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__lowerCAmelCase ) __lowerCamelCase = model(input_ids=__lowerCAmelCase , pixel_values=__lowerCAmelCase , attention_mask=__lowerCAmelCase ) __lowerCamelCase = output[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__lowerCAmelCase ) __lowerCamelCase = FlaxVisionTextDualEncoderModel.from_pretrained(__lowerCAmelCase ) __lowerCamelCase = model(input_ids=__lowerCAmelCase , pixel_values=__lowerCAmelCase , attention_mask=__lowerCAmelCase ) __lowerCamelCase = after_output[0] __lowerCamelCase = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(__lowerCAmelCase , 1e-3 ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , **lowerCamelCase__ ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = self.get_vision_text_model(__lowerCAmelCase , __lowerCAmelCase ) __lowerCamelCase = {'''vision_model''': vision_model, '''text_model''': text_model} __lowerCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__lowerCAmelCase ) __lowerCamelCase = model( input_ids=__lowerCAmelCase , pixel_values=__lowerCAmelCase , attention_mask=__lowerCAmelCase , output_attentions=__lowerCAmelCase ) __lowerCamelCase = output.vision_model_output.attentions self.assertEqual(len(__lowerCAmelCase ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) __lowerCamelCase = to_atuple(vision_model.config.image_size ) __lowerCamelCase = to_atuple(vision_model.config.patch_size ) __lowerCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) __lowerCamelCase = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) __lowerCamelCase = output.text_model_output.attentions self.assertEqual(len(__lowerCAmelCase ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]: '''simple docstring''' pt_model.to(__lowerCAmelCase ) pt_model.eval() # prepare inputs __lowerCamelCase = inputs_dict __lowerCamelCase = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()} with torch.no_grad(): __lowerCamelCase = pt_model(**__lowerCAmelCase ).to_tuple() __lowerCamelCase = fx_model(**__lowerCAmelCase ).to_tuple() self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ): self.assert_almost_equals(__lowerCAmelCase , pt_output.numpy() , 4e-2 ) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(__lowerCAmelCase ) __lowerCamelCase = FlaxVisionTextDualEncoderModel.from_pretrained(__lowerCAmelCase , from_pt=__lowerCAmelCase ) __lowerCamelCase = fx_model_loaded(**__lowerCAmelCase ).to_tuple() self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ): self.assert_almost_equals(__lowerCAmelCase , pt_output.numpy() , 4e-2 ) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(__lowerCAmelCase ) __lowerCamelCase = VisionTextDualEncoderModel.from_pretrained(__lowerCAmelCase , from_flax=__lowerCAmelCase ) pt_model_loaded.to(__lowerCAmelCase ) pt_model_loaded.eval() with torch.no_grad(): __lowerCamelCase = pt_model_loaded(**__lowerCAmelCase ).to_tuple() self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ): self.assert_almost_equals(__lowerCAmelCase , pt_output_loaded.numpy() , 4e-2 ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int: '''simple docstring''' __lowerCamelCase = VisionTextDualEncoderConfig.from_vision_text_configs(__lowerCAmelCase , __lowerCAmelCase ) __lowerCamelCase = VisionTextDualEncoderModel(__lowerCAmelCase ) __lowerCamelCase = FlaxVisionTextDualEncoderModel(__lowerCAmelCase ) __lowerCamelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __lowerCAmelCase ) __lowerCamelCase = fx_state self.check_pt_flax_equivalence(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int: '''simple docstring''' __lowerCamelCase = VisionTextDualEncoderConfig.from_vision_text_configs(__lowerCAmelCase , __lowerCAmelCase ) __lowerCamelCase = VisionTextDualEncoderModel(__lowerCAmelCase ) __lowerCamelCase = FlaxVisionTextDualEncoderModel(__lowerCAmelCase ) __lowerCamelCase = load_flax_weights_in_pytorch_model(__lowerCAmelCase , fx_model.params ) self.check_pt_flax_equivalence(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def lowercase_ ( self ) -> Tuple: '''simple docstring''' __lowerCamelCase = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**__lowerCAmelCase ) def lowercase_ ( self ) -> str: '''simple docstring''' __lowerCamelCase = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**__lowerCAmelCase ) def lowercase_ ( self ) -> Any: '''simple docstring''' __lowerCamelCase = self.prepare_config_and_inputs() self.check_save_load(**__lowerCAmelCase ) def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' __lowerCamelCase = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**__lowerCAmelCase ) @is_pt_flax_cross_test def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' __lowerCamelCase = self.prepare_config_and_inputs() __lowerCamelCase = config_inputs_dict.pop('vision_config' ) __lowerCamelCase = config_inputs_dict.pop('text_config' ) __lowerCamelCase = config_inputs_dict self.check_equivalence_pt_to_flax(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) self.check_equivalence_flax_to_pt(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) @slow def lowercase_ ( self ) -> Dict: '''simple docstring''' __lowerCamelCase = self.get_pretrained_model_and_inputs() __lowerCamelCase = model_a(**__lowerCAmelCase ) __lowerCamelCase = outputs[0] with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(__lowerCAmelCase ) __lowerCamelCase = FlaxVisionTextDualEncoderModel.from_pretrained(__lowerCAmelCase ) __lowerCamelCase = model_a(**__lowerCAmelCase ) __lowerCamelCase = after_outputs[0] __lowerCamelCase = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(__lowerCAmelCase , 1e-5 ) @require_flax class __lowerCAmelCase ( _lowercase , unittest.TestCase ): """simple docstring""" def lowercase_ ( self ) -> List[Any]: '''simple docstring''' __lowerCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( 'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-bert' , vision_from_pt=__lowerCAmelCase , text_from_pt=__lowerCAmelCase , ) __lowerCamelCase = 13 __lowerCamelCase = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) __lowerCamelCase = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) __lowerCamelCase = random_attention_mask([batch_size, 4] ) __lowerCamelCase = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask} return model, inputs def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple: '''simple docstring''' __lowerCamelCase = FlaxViTModel(__lowerCAmelCase ) __lowerCamelCase = FlaxBertModel(__lowerCAmelCase ) return vision_model, text_model def lowercase_ ( self ) -> Any: '''simple docstring''' __lowerCamelCase = FlaxViTModelTester(self ) __lowerCamelCase = FlaxBertModelTester(self ) __lowerCamelCase = vit_model_tester.prepare_config_and_inputs() __lowerCamelCase = bert_model_tester.prepare_config_and_inputs() __lowerCamelCase = vision_config_and_inputs __lowerCamelCase = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_torch class __lowerCAmelCase ( _lowercase , unittest.TestCase ): """simple docstring""" def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' __lowerCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( 'hf-internal-testing/tiny-random-clip' , 'hf-internal-testing/tiny-bert' , vision_from_pt=__lowerCAmelCase , text_from_pt=__lowerCAmelCase , ) __lowerCamelCase = 13 __lowerCamelCase = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) __lowerCamelCase = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) __lowerCamelCase = random_attention_mask([batch_size, 4] ) __lowerCamelCase = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask} return model, inputs def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase = FlaxCLIPVisionModel(__lowerCAmelCase ) __lowerCamelCase = FlaxBertModel(__lowerCAmelCase ) return vision_model, text_model def lowercase_ ( self ) -> str: '''simple docstring''' __lowerCamelCase = FlaxCLIPVisionModelTester(self ) __lowerCamelCase = FlaxBertModelTester(self ) __lowerCamelCase = clip_model_tester.prepare_config_and_inputs() __lowerCamelCase = bert_model_tester.prepare_config_and_inputs() __lowerCamelCase = vision_config_and_inputs __lowerCamelCase = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_flax @require_vision class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def lowercase_ ( self ) -> List[str]: '''simple docstring''' __lowerCamelCase = FlaxVisionTextDualEncoderModel.from_pretrained('clip-italian/clip-italian' , logit_scale_init_value=1.0 ) __lowerCamelCase = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' ) __lowerCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) __lowerCamelCase = processor( text=['una foto di un gatto', 'una foto di un cane'] , images=__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors='np' ) __lowerCamelCase = model(**__lowerCAmelCase ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) __lowerCamelCase = np.array([[1.2_28_47_27, 0.3_10_41_22]] ) self.assertTrue(np.allclose(outputs.logits_per_image , __lowerCAmelCase , atol=1e-3 ) )
90
"""simple docstring""" from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase__ = { '''configuration_autoformer''': [ '''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''AutoformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''AutoformerForPrediction''', '''AutoformerModel''', '''AutoformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_autoformer import ( AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_autoformer import ( AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, AutoformerForPrediction, AutoformerModel, AutoformerPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
72
0
from importlib import import_module from .logging import get_logger a__: List[str] = get_logger(__name__) class SCREAMING_SNAKE_CASE__ : def __init__( self,__lowerCamelCase,__lowerCamelCase=None ): A__ = attrs or [] if module is not None: for key in module.__dict__: if key in attrs or not key.startswith('''__''' ): setattr(self,__lowerCAmelCase,getattr(__lowerCAmelCase,__lowerCAmelCase ) ) A__ = module._original_module if isinstance(__lowerCAmelCase,_PatchedModuleObj ) else module class SCREAMING_SNAKE_CASE__ : __SCREAMING_SNAKE_CASE = [] def __init__( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase=None ): A__ = obj A__ = target A__ = new A__ = target.split('''.''' )[0] A__ = {} A__ = attrs or [] def __enter__( self ): A__ = self.target.split('''.''' ) # Patch modules: # it's used to patch attributes of submodules like "os.path.join"; # in this case we need to patch "os" and "os.path" for i in range(len(__lowerCAmelCase ) ): try: A__ = import_module('''.'''.join(submodules[: i + 1] ) ) except ModuleNotFoundError: continue # We iterate over all the globals in self.obj in case we find "os" or "os.path" for attr in self.obj.__dir__(): A__ = getattr(self.obj,__lowerCAmelCase ) # We don't check for the name of the global, but rather if its value *is* "os" or "os.path". # This allows to patch renamed modules like "from os import path as ospath". if obj_attr is submodule or ( (isinstance(__lowerCAmelCase,_PatchedModuleObj ) and obj_attr._original_module is submodule) ): A__ = obj_attr # patch at top level setattr(self.obj,__lowerCAmelCase,_PatchedModuleObj(__lowerCAmelCase,attrs=self.attrs ) ) A__ = getattr(self.obj,__lowerCAmelCase ) # construct lower levels patches for key in submodules[i + 1 :]: setattr(__lowerCAmelCase,__lowerCAmelCase,_PatchedModuleObj(getattr(__lowerCAmelCase,__lowerCAmelCase,__lowerCAmelCase ),attrs=self.attrs ) ) A__ = getattr(__lowerCAmelCase,__lowerCAmelCase ) # finally set the target attribute setattr(__lowerCAmelCase,__lowerCAmelCase,self.new ) # Patch attribute itself: # it's used for builtins like "open", # and also to patch "os.path.join" we may also need to patch "join" # itself if it was imported as "from os.path import join". if submodules: # if it's an attribute of a submodule like "os.path.join" try: A__ = getattr(import_module('''.'''.join(__lowerCAmelCase ) ),__lowerCAmelCase ) except (AttributeError, ModuleNotFoundError): return # We iterate over all the globals in self.obj in case we find "os.path.join" for attr in self.obj.__dir__(): # We don't check for the name of the global, but rather if its value *is* "os.path.join". # This allows to patch renamed attributes like "from os.path import join as pjoin". if getattr(self.obj,__lowerCAmelCase ) is attr_value: A__ = getattr(self.obj,__lowerCAmelCase ) setattr(self.obj,__lowerCAmelCase,self.new ) elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open" A__ = globals()['''__builtins__'''][target_attr] setattr(self.obj,__lowerCAmelCase,self.new ) else: raise RuntimeError(f"Tried to patch attribute {target_attr} instead of a submodule." ) def __exit__( self,*__lowerCamelCase ): for attr in list(self.original ): setattr(self.obj,__lowerCAmelCase,self.original.pop(__lowerCAmelCase ) ) def UpperCamelCase ( self ): self.__enter__() self._active_patches.append(self ) def UpperCamelCase ( self ): try: self._active_patches.remove(self ) except ValueError: # If the patch hasn't been started this will fail return None return self.__exit__()
193
"""simple docstring""" import math def snake_case_ ( A_ : int ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5, int(math.sqrt(A_ ) + 1 ), 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def snake_case_ ( A_ : float = 0.1 ): '''simple docstring''' _lowerCamelCase : Optional[int] = 3 _lowerCamelCase : List[str] = 3 while primes / (2 * j - 1) >= ratio: for i in range(j * j + j + 1, (j + 2) * (j + 2), j + 1 ): primes += is_prime(A_ ) j += 2 return j if __name__ == "__main__": import doctest doctest.testmod()
72
0
from pathlib import Path import fire def _a ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int ) -> Tuple: """simple docstring""" __lowerCAmelCase: Optional[int] = Path(A_ ) __lowerCAmelCase: Optional[int] = Path(A_ ) dest_dir.mkdir(exist_ok=A_ ) for path in src_dir.iterdir(): __lowerCAmelCase: Union[str, Any] = [x.rstrip() for x in list(path.open().readlines() )][:n] __lowerCAmelCase: int = dest_dir.joinpath(path.name ) print(A_ ) dest_path.open('w' ).write('\n'.join(A_ ) ) if __name__ == "__main__": fire.Fire(minify)
322
"""simple docstring""" import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class __snake_case ( unittest.TestCase): def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _lowerCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase ) _lowerCamelCase : Tuple = -1 _lowerCamelCase : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase ) _lowerCamelCase : List[Any] = model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: _lowerCamelCase : Union[str, Any] = TextStreamer(__lowerCAmelCase ) model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _lowerCamelCase : int = cs.out[:-1] self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" _lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _lowerCamelCase : Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase ) _lowerCamelCase : Tuple = -1 _lowerCamelCase : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase ) _lowerCamelCase : Optional[int] = model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase ) _lowerCamelCase : List[str] = tokenizer.decode(greedy_ids[0] ) _lowerCamelCase : Tuple = TextIteratorStreamer(__lowerCAmelCase ) _lowerCamelCase : Tuple = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer} _lowerCamelCase : List[Any] = Thread(target=model.generate , kwargs=__lowerCAmelCase ) thread.start() _lowerCamelCase : int = '''''' for new_text in streamer: streamer_text += new_text self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" _lowerCamelCase : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _lowerCamelCase : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase ) _lowerCamelCase : Tuple = -1 _lowerCamelCase : Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase ) _lowerCamelCase : int = model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = greedy_ids[:, input_ids.shape[1] :] _lowerCamelCase : int = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: _lowerCamelCase : Any = TextStreamer(__lowerCAmelCase , skip_prompt=__lowerCAmelCase ) model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _lowerCamelCase : Union[str, Any] = cs.out[:-1] self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" _lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained('''distilgpt2''' ) _lowerCamelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(__lowerCAmelCase ) _lowerCamelCase : str = -1 _lowerCamelCase : Any = torch.ones((1, 5) , device=__lowerCAmelCase ).long() * model.config.bos_token_id with CaptureStdout() as cs: _lowerCamelCase : List[Any] = TextStreamer(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase ) model.generate(__lowerCAmelCase , max_new_tokens=1 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token _lowerCamelCase : Any = cs.out[:-1] # Remove the final "\n" _lowerCamelCase : int = tokenizer(__lowerCAmelCase , return_tensors='''pt''' ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _lowerCamelCase : Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = -1 _lowerCamelCase : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase ) _lowerCamelCase : List[str] = TextIteratorStreamer(__lowerCAmelCase , timeout=0.0_01 ) _lowerCamelCase : str = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer} _lowerCamelCase : List[Any] = Thread(target=model.generate , kwargs=__lowerCAmelCase ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(__lowerCAmelCase ): _lowerCamelCase : Optional[Any] = '''''' for new_text in streamer: streamer_text += new_text
72
0
"""simple docstring""" import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def __lowerCAmelCase ( self : List[str] ): lowerCAmelCase__ : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowerCAmelCase__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase ) lowerCAmelCase__ : Tuple = -1 lowerCAmelCase__ : List[Any] = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(__lowerCAmelCase ) lowerCAmelCase__ : List[Any] = model.generate(__lowerCAmelCase ,max_new_tokens=1_0 ,do_sample=__lowerCAmelCase ) lowerCAmelCase__ : Optional[Any] = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: lowerCAmelCase__ : Union[str, Any] = TextStreamer(__lowerCAmelCase ) model.generate(__lowerCAmelCase ,max_new_tokens=1_0 ,do_sample=__lowerCAmelCase ,streamer=__lowerCAmelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer lowerCAmelCase__ : int = cs.out[:-1] self.assertEqual(__lowerCAmelCase ,__lowerCAmelCase ) def __lowerCAmelCase ( self : Dict ): lowerCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowerCAmelCase__ : Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase ) lowerCAmelCase__ : Tuple = -1 lowerCAmelCase__ : List[Any] = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(__lowerCAmelCase ) lowerCAmelCase__ : Optional[int] = model.generate(__lowerCAmelCase ,max_new_tokens=1_0 ,do_sample=__lowerCAmelCase ) lowerCAmelCase__ : List[str] = tokenizer.decode(greedy_ids[0] ) lowerCAmelCase__ : Tuple = TextIteratorStreamer(__lowerCAmelCase ) lowerCAmelCase__ : Tuple = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer} lowerCAmelCase__ : List[Any] = Thread(target=model.generate ,kwargs=__lowerCAmelCase ) thread.start() lowerCAmelCase__ : int = '''''' for new_text in streamer: streamer_text += new_text self.assertEqual(__lowerCAmelCase ,__lowerCAmelCase ) def __lowerCAmelCase ( self : str ): lowerCAmelCase__ : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowerCAmelCase__ : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase ) lowerCAmelCase__ : Tuple = -1 lowerCAmelCase__ : Optional[Any] = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(__lowerCAmelCase ) lowerCAmelCase__ : int = model.generate(__lowerCAmelCase ,max_new_tokens=1_0 ,do_sample=__lowerCAmelCase ) lowerCAmelCase__ : Optional[Any] = greedy_ids[:, input_ids.shape[1] :] lowerCAmelCase__ : int = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: lowerCAmelCase__ : Any = TextStreamer(__lowerCAmelCase ,skip_prompt=__lowerCAmelCase ) model.generate(__lowerCAmelCase ,max_new_tokens=1_0 ,do_sample=__lowerCAmelCase ,streamer=__lowerCAmelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer lowerCAmelCase__ : Union[str, Any] = cs.out[:-1] self.assertEqual(__lowerCAmelCase ,__lowerCAmelCase ) def __lowerCAmelCase ( self : int ): lowerCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained('''distilgpt2''' ) lowerCAmelCase__ : Optional[Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(__lowerCAmelCase ) lowerCAmelCase__ : str = -1 lowerCAmelCase__ : Any = torch.ones((1, 5) ,device=__lowerCAmelCase ).long() * model.config.bos_token_id with CaptureStdout() as cs: lowerCAmelCase__ : List[Any] = TextStreamer(__lowerCAmelCase ,skip_special_tokens=__lowerCAmelCase ) model.generate(__lowerCAmelCase ,max_new_tokens=1 ,do_sample=__lowerCAmelCase ,streamer=__lowerCAmelCase ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token lowerCAmelCase__ : Any = cs.out[:-1] # Remove the final "\n" lowerCAmelCase__ : int = tokenizer(__lowerCAmelCase ,return_tensors='''pt''' ) self.assertEqual(streamer_text_tokenized.input_ids.shape ,(1, 1) ) def __lowerCAmelCase ( self : Tuple ): lowerCAmelCase__ : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowerCAmelCase__ : Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase ) lowerCAmelCase__ : Union[str, Any] = -1 lowerCAmelCase__ : Any = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(__lowerCAmelCase ) lowerCAmelCase__ : List[str] = TextIteratorStreamer(__lowerCAmelCase ,timeout=0.001 ) lowerCAmelCase__ : str = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer} lowerCAmelCase__ : List[Any] = Thread(target=model.generate ,kwargs=__lowerCAmelCase ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(__lowerCAmelCase ): lowerCAmelCase__ : Optional[Any] = '''''' for new_text in streamer: streamer_text += new_text
106
"""simple docstring""" from ....configuration_utils import PretrainedConfig from ....utils import logging lowerCAmelCase__ = logging.get_logger(__name__) # TODO: upload to AWS lowerCAmelCase__ = { '''yjernite/retribert-base-uncased''': ( '''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json''' ), } class __snake_case ( _lowercase): snake_case__ : int = "retribert" def __init__( self : Optional[int] , __lowerCAmelCase : str=3_0_5_2_2 , __lowerCAmelCase : Tuple=7_6_8 , __lowerCAmelCase : Union[str, Any]=8 , __lowerCAmelCase : Any=1_2 , __lowerCAmelCase : Optional[int]=3_0_7_2 , __lowerCAmelCase : List[str]="gelu" , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Tuple=5_1_2 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : Optional[Any]=1E-12 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Any=1_2_8 , __lowerCAmelCase : Optional[int]=0 , **__lowerCAmelCase : str , ): """simple docstring""" super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase ) _lowerCamelCase : Dict = vocab_size _lowerCamelCase : Union[str, Any] = hidden_size _lowerCamelCase : Dict = num_hidden_layers _lowerCamelCase : int = num_attention_heads _lowerCamelCase : int = hidden_act _lowerCamelCase : str = intermediate_size _lowerCamelCase : Union[str, Any] = hidden_dropout_prob _lowerCamelCase : List[Any] = attention_probs_dropout_prob _lowerCamelCase : Optional[int] = max_position_embeddings _lowerCamelCase : List[Any] = type_vocab_size _lowerCamelCase : Any = initializer_range _lowerCamelCase : Optional[int] = layer_norm_eps _lowerCamelCase : int = share_encoders _lowerCamelCase : Optional[Any] = projection_dim
72
0
'''simple docstring''' import unittest import numpy as np import torch from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class lowerCAmelCase_( unittest.TestCase ): '''simple docstring''' @property def UpperCAmelCase_ ( self ) -> List[Any]: torch.manual_seed(0 ) lowerCAmelCase__ : str = UNetaDModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") ,up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") ,) return model def UpperCAmelCase_ ( self ) -> List[Any]: lowerCAmelCase__ : Optional[int] = self.dummy_uncond_unet lowerCAmelCase__ : str = ScoreSdeVeScheduler() lowerCAmelCase__ : Union[str, Any] = ScoreSdeVePipeline(unet=__lowerCAmelCase ,scheduler=__lowerCAmelCase ) sde_ve.to(__lowerCAmelCase ) sde_ve.set_progress_bar_config(disable=__lowerCAmelCase ) lowerCAmelCase__ : int = torch.manual_seed(0 ) lowerCAmelCase__ : Union[str, Any] = sde_ve(num_inference_steps=2 ,output_type="""numpy""" ,generator=__lowerCAmelCase ).images lowerCAmelCase__ : int = torch.manual_seed(0 ) lowerCAmelCase__ : Dict = sde_ve(num_inference_steps=2 ,output_type="""numpy""" ,generator=__lowerCAmelCase ,return_dict=__lowerCAmelCase )[ 0 ] lowerCAmelCase__ : Union[str, Any] = image[0, -3:, -3:, -1] lowerCAmelCase__ : Tuple = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCAmelCase__ : int = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class lowerCAmelCase_( unittest.TestCase ): '''simple docstring''' def UpperCAmelCase_ ( self ) -> Tuple: lowerCAmelCase__ : Union[str, Any] = '''google/ncsnpp-church-256''' lowerCAmelCase__ : int = UNetaDModel.from_pretrained(__lowerCAmelCase ) lowerCAmelCase__ : List[str] = ScoreSdeVeScheduler.from_pretrained(__lowerCAmelCase ) lowerCAmelCase__ : Any = ScoreSdeVePipeline(unet=__lowerCAmelCase ,scheduler=__lowerCAmelCase ) sde_ve.to(__lowerCAmelCase ) sde_ve.set_progress_bar_config(disable=__lowerCAmelCase ) lowerCAmelCase__ : str = torch.manual_seed(0 ) lowerCAmelCase__ : int = sde_ve(num_inference_steps=10 ,output_type="""numpy""" ,generator=__lowerCAmelCase ).images lowerCAmelCase__ : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) lowerCAmelCase__ : str = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
37
"""simple docstring""" import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class __snake_case ( unittest.TestCase): def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" _lowerCamelCase : Union[str, Any] = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : List[Any] = [ '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" _lowerCamelCase : Optional[int] = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', '''unet/diffusion_pytorch_model.bin''', # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : Optional[Any] = [ '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', ] self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" _lowerCamelCase : int = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', # Removed: 'text_encoder/model.safetensors', '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertFalse(is_safetensors_compatible(__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase : int = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] _lowerCamelCase : Optional[int] = '''fp16''' self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" _lowerCamelCase : Optional[Any] = [ '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] _lowerCamelCase : Union[str, Any] = '''fp16''' self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" _lowerCamelCase : str = [ '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] _lowerCamelCase : Optional[Any] = '''fp16''' self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase : Tuple = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', '''unet/diffusion_pytorch_model.fp16.bin''', # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] _lowerCamelCase : Any = '''fp16''' self.assertFalse(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" _lowerCamelCase : Optional[Any] = [ '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', ] _lowerCamelCase : str = '''fp16''' self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" _lowerCamelCase : Optional[Any] = [ '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', ] _lowerCamelCase : Union[str, Any] = '''fp16''' self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : int = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', # 'text_encoder/model.fp16.safetensors', '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] _lowerCamelCase : int = '''fp16''' self.assertFalse(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
72
0
import importlib import inspect import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py _lowerCamelCase : Union[str, Any] = """src/transformers""" # This is to make sure the transformers module imported is the one in the repo. _lowerCamelCase : Any = importlib.util.spec_from_file_location( """transformers""", os.path.join(PATH_TO_TRANSFORMERS, """__init__.py"""), submodule_search_locations=[PATH_TO_TRANSFORMERS], ) _lowerCamelCase : List[str] = spec.loader.load_module() _lowerCamelCase : List[Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` _lowerCamelCase : str = re.compile("""\[(.+?)\]\((https://huggingface\.co/.+?)\)""") _lowerCamelCase : Optional[int] = { """CLIPConfigMixin""", """DecisionTransformerConfigMixin""", """EncoderDecoderConfigMixin""", """RagConfigMixin""", """SpeechEncoderDecoderConfigMixin""", """VisionEncoderDecoderConfigMixin""", """VisionTextDualEncoderConfigMixin""", } def SCREAMING_SNAKE_CASE ( ) -> List[str]: """simple docstring""" A__ = [] for config_class in list(CONFIG_MAPPING.values() ): A__ = False # source code of `config_class` A__ = inspect.getsource(A_ ) A__ = _re_checkpoint.findall(A_ ) for checkpoint in checkpoints: # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` A__ = checkpoint # verify the checkpoint name corresponds to the checkpoint link A__ = f"""https://huggingface.co/{ckpt_name}""" if ckpt_link == ckpt_link_from_name: A__ = True break A__ = config_class.__name__ if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(A_ ) if len(A_ ) > 0: A__ = '''\n'''.join(sorted(A_ ) ) raise ValueError(f"""The following configurations don\'t contain any valid checkpoint:\n{message}""" ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
14
"""simple docstring""" import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class __snake_case : def __init__( self : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any=2 , __lowerCAmelCase : Any=True , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Optional[Any]=1_0 , __lowerCAmelCase : List[str]=3 , __lowerCAmelCase : int=3_2 * 4 , __lowerCAmelCase : Dict=3_2 * 6 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : List[str]=3_2 , ): """simple docstring""" _lowerCamelCase : List[str] = parent _lowerCamelCase : str = batch_size _lowerCamelCase : Dict = is_training _lowerCamelCase : str = use_auxiliary_loss _lowerCamelCase : Any = num_queries _lowerCamelCase : List[Any] = num_channels _lowerCamelCase : int = min_size _lowerCamelCase : Any = max_size _lowerCamelCase : int = num_labels _lowerCamelCase : List[str] = mask_feature_size def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( __lowerCAmelCase ) _lowerCamelCase : List[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowerCAmelCase ) > 0.5 ).float() _lowerCamelCase : Optional[int] = (torch.rand((self.batch_size, self.num_labels) , device=__lowerCAmelCase ) > 0.5).long() _lowerCamelCase : Dict = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = self.prepare_config_and_inputs() _lowerCamelCase : List[str] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask} return config, inputs_dict def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] ): """simple docstring""" _lowerCamelCase : List[str] = output.encoder_hidden_states _lowerCamelCase : Tuple = output.pixel_decoder_hidden_states _lowerCamelCase : Dict = output.transformer_decoder_hidden_states self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__lowerCAmelCase ) , config.decoder_config.decoder_layers ) def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any]=False ): """simple docstring""" with torch.no_grad(): _lowerCamelCase : Optional[int] = MaskFormerModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : Tuple = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase ) _lowerCamelCase : List[str] = model(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(__lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] ): """simple docstring""" _lowerCamelCase : str = MaskFormerForInstanceSegmentation(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() def comm_check_on_output(__lowerCAmelCase : Dict ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): _lowerCamelCase : str = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = model(__lowerCAmelCase ) comm_check_on_output(__lowerCAmelCase ) _lowerCamelCase : List[str] = model( pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ) comm_check_on_output(__lowerCAmelCase ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class __snake_case ( _lowercase , _lowercase , unittest.TestCase): snake_case__ : Dict = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () snake_case__ : Any = ( {"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) snake_case__ : List[str] = False snake_case__ : List[str] = False snake_case__ : Optional[int] = False snake_case__ : Dict = False def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" _lowerCamelCase : Optional[int] = MaskFormerModelTester(self ) _lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowerCAmelCase ) @unittest.skip(reason='''MaskFormer does not use inputs_embeds''' ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" pass @unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' ) def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" pass @unittest.skip(reason='''MaskFormer is not a generative model''' ) def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" pass @unittest.skip(reason='''MaskFormer does not use token embeddings''' ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" pass @require_torch_multi_gpu @unittest.skip( reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" pass def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : str = model_class(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCamelCase : Dict = [*signature.parameters.keys()] _lowerCamelCase : Optional[Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __lowerCAmelCase ) @slow def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" for model_name in ["facebook/maskformer-swin-small-coco"]: _lowerCamelCase : Union[str, Any] = MaskFormerModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : List[Any] = (self.model_tester.min_size,) * 2 _lowerCamelCase : Union[str, Any] = { '''pixel_values''': torch.randn((2, 3, *size) , device=__lowerCAmelCase ), '''mask_labels''': torch.randn((2, 1_0, *size) , device=__lowerCAmelCase ), '''class_labels''': torch.zeros(2 , 1_0 , device=__lowerCAmelCase ).long(), } _lowerCamelCase : int = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = model(**__lowerCAmelCase ) self.assertTrue(outputs.loss is not None ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : List[Any] = model_class(__lowerCAmelCase ).to(__lowerCAmelCase ) _lowerCamelCase : List[str] = model(**__lowerCAmelCase , output_attentions=__lowerCAmelCase ) self.assertTrue(outputs.attentions is not None ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss _lowerCamelCase : Union[str, Any] = self.all_model_classes[1] _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() _lowerCamelCase : Any = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.train() _lowerCamelCase : List[Any] = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ).loss loss.backward() def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase : int = self.all_model_classes[1] _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs() _lowerCamelCase : List[str] = True _lowerCamelCase : Optional[int] = True _lowerCamelCase : Optional[Any] = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.train() _lowerCamelCase : Optional[Any] = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ) _lowerCamelCase : List[Any] = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() _lowerCamelCase : Tuple = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't _lowerCamelCase : List[str] = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() _lowerCamelCase : Optional[int] = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=__lowerCAmelCase ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) lowerCAmelCase__ = 1E-4 def snake_case_ ( ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_vision @slow class __snake_case ( unittest.TestCase): @cached_property def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" return ( MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' ) if is_vision_available() else None ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase : str = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__lowerCAmelCase ) _lowerCamelCase : Any = self.default_image_processor _lowerCamelCase : List[Any] = prepare_img() _lowerCamelCase : Any = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase ) _lowerCamelCase : Any = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): _lowerCamelCase : int = model(**__lowerCAmelCase ) _lowerCamelCase : str = torch.tensor( [[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(__lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) _lowerCamelCase : Union[str, Any] = torch.tensor( [[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(__lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) _lowerCamelCase : Optional[int] = torch.tensor( [[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(__lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" _lowerCamelCase : Optional[int] = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__lowerCAmelCase ) .eval() ) _lowerCamelCase : int = self.default_image_processor _lowerCamelCase : str = prepare_img() _lowerCamelCase : int = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase ) _lowerCamelCase : str = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): _lowerCamelCase : Optional[int] = model(**__lowerCAmelCase ) # masks_queries_logits _lowerCamelCase : Optional[int] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _lowerCamelCase : List[str] = [ [-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33], [-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95], [-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42], ] _lowerCamelCase : Any = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) # class_queries_logits _lowerCamelCase : List[Any] = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _lowerCamelCase : str = torch.tensor( [ [1.6512E00, -5.2572E00, -3.3519E00], [3.6169E-02, -5.9025E00, -2.9313E00], [1.0766E-04, -7.7630E00, -5.1263E00], ] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : Any = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' ) .to(__lowerCAmelCase ) .eval() ) _lowerCamelCase : Tuple = self.default_image_processor _lowerCamelCase : Tuple = prepare_img() _lowerCamelCase : Optional[Any] = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase ) _lowerCamelCase : List[Any] = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): _lowerCamelCase : Optional[Any] = model(**__lowerCAmelCase ) # masks_queries_logits _lowerCamelCase : List[Any] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _lowerCamelCase : int = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]] _lowerCamelCase : List[Any] = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) # class_queries_logits _lowerCamelCase : Dict = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _lowerCamelCase : Any = torch.tensor( [[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" _lowerCamelCase : str = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__lowerCAmelCase ) .eval() ) _lowerCamelCase : int = self.default_image_processor _lowerCamelCase : List[str] = image_processor( [np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors='''pt''' , ) _lowerCamelCase : Union[str, Any] = inputs['''pixel_values'''].to(__lowerCAmelCase ) _lowerCamelCase : Dict = [el.to(__lowerCAmelCase ) for el in inputs['''mask_labels''']] _lowerCamelCase : Optional[Any] = [el.to(__lowerCAmelCase ) for el in inputs['''class_labels''']] with torch.no_grad(): _lowerCamelCase : Tuple = model(**__lowerCAmelCase ) self.assertTrue(outputs.loss is not None )
72
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCAmelCase = { '''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''], '''tokenization_xlm''': ['''XLMTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = [ '''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XLMForMultipleChoice''', '''XLMForQuestionAnswering''', '''XLMForQuestionAnsweringSimple''', '''XLMForSequenceClassification''', '''XLMForTokenClassification''', '''XLMModel''', '''XLMPreTrainedModel''', '''XLMWithLMHeadModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = [ '''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXLMForMultipleChoice''', '''TFXLMForQuestionAnsweringSimple''', '''TFXLMForSequenceClassification''', '''TFXLMForTokenClassification''', '''TFXLMMainLayer''', '''TFXLMModel''', '''TFXLMPreTrainedModel''', '''TFXLMWithLMHeadModel''', ] if TYPE_CHECKING: from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig from .tokenization_xlm import XLMTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm import ( XLM_PRETRAINED_MODEL_ARCHIVE_LIST, XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMPreTrainedModel, XLMWithLMHeadModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm import ( TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, TFXLMForTokenClassification, TFXLMMainLayer, TFXLMModel, TFXLMPreTrainedModel, TFXLMWithLMHeadModel, ) else: import sys UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
141
"""simple docstring""" lowerCAmelCase__ = { '''A''': ['''B''', '''C''', '''E'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F''', '''G'''], '''D''': ['''B'''], '''E''': ['''A''', '''B''', '''D'''], '''F''': ['''C'''], '''G''': ['''C'''], } def snake_case_ ( A_ : dict, A_ : int, A_ : int ): '''simple docstring''' _lowerCamelCase : List[str] = set() # keep track of all the paths to be checked _lowerCamelCase : str = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue _lowerCamelCase : str = queue.pop(0 ) # get the last node from the path _lowerCamelCase : List[Any] = path[-1] if node not in explored: _lowerCamelCase : Union[str, Any] = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: _lowerCamelCase : Union[str, Any] = list(A_ ) new_path.append(A_ ) queue.append(A_ ) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(A_ ) # in case there's no path between the 2 nodes return [] def snake_case_ ( A_ : dict, A_ : int, A_ : Dict ): '''simple docstring''' if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 _lowerCamelCase : Optional[int] = [start] _lowerCamelCase : int = set(A_ ) # Keep tab on distances from `start` node. _lowerCamelCase : int = {start: 0, target: -1} while queue: _lowerCamelCase : Optional[Any] = queue.pop(0 ) if node == target: _lowerCamelCase : Any = ( dist[node] if dist[target] == -1 else min(dist[target], dist[node] ) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(A_ ) queue.append(A_ ) _lowerCamelCase : Any = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
72
0
'''simple docstring''' import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = [ ('''bert.bert''', '''visual_bert'''), ('''bert.cls''', '''cls'''), ('''bert.classifier''', '''cls'''), ('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''), ('''position_embeddings_visual''', '''visual_position_embeddings'''), ('''projection''', '''visual_projection'''), ] UpperCamelCase__ = [ '''nlvr2_coco_pre_trained.th''', '''nlvr2_fine_tuned.th''', '''nlvr2_pre_trained.th''', '''vcr_coco_pre_train.th''', '''vcr_fine_tune.th''', '''vcr_pre_train.th''', '''vqa_coco_pre_trained.th''', '''vqa_fine_tuned.th''', '''vqa_pre_trained.th''', ] def a__ ( lowerCAmelCase__ ) -> Union[str, Any]: UpperCAmelCase__ : Dict = torch.load(A_ , map_location='''cpu''' ) return sd def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=rename_keys_prefix ) -> Any: UpperCAmelCase__ : Optional[Any] = OrderedDict() UpperCAmelCase__ : Optional[int] = torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue UpperCAmelCase__ : Optional[Any] = key for name_pair in rename_keys_prefix: UpperCAmelCase__ : Optional[Any] = new_key.replace(name_pair[0] , name_pair[1] ) UpperCAmelCase__ : Any = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately UpperCAmelCase__ : List[Any] = new_d['''cls.predictions.bias'''] return new_d @torch.no_grad() def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]: assert ( checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS ), F"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.""" # Get Config if "pre" in checkpoint_path: UpperCAmelCase__ : List[str] = '''pretraining''' if "vcr" in checkpoint_path: UpperCAmelCase__ : List[str] = {'''visual_embedding_dim''': 5_12} elif "vqa_advanced" in checkpoint_path: UpperCAmelCase__ : str = {'''visual_embedding_dim''': 20_48} elif "vqa" in checkpoint_path: UpperCAmelCase__ : Union[str, Any] = {'''visual_embedding_dim''': 20_48} elif "nlvr" in checkpoint_path: UpperCAmelCase__ : Union[str, Any] = {'''visual_embedding_dim''': 10_24} else: raise NotImplementedError(F"""No implementation found for `{checkpoint_path}`.""" ) else: if "vcr" in checkpoint_path: UpperCAmelCase__ : Dict = {'''visual_embedding_dim''': 5_12} UpperCAmelCase__ : Tuple = '''multichoice''' elif "vqa_advanced" in checkpoint_path: UpperCAmelCase__ : Dict = {'''visual_embedding_dim''': 20_48} UpperCAmelCase__ : Dict = '''vqa_advanced''' elif "vqa" in checkpoint_path: UpperCAmelCase__ : Dict = {'''visual_embedding_dim''': 20_48, '''num_labels''': 31_29} UpperCAmelCase__ : Dict = '''vqa''' elif "nlvr" in checkpoint_path: UpperCAmelCase__ : Union[str, Any] = { '''visual_embedding_dim''': 10_24, '''num_labels''': 2, } UpperCAmelCase__ : Optional[int] = '''nlvr''' UpperCAmelCase__ : Any = VisualBertConfig(**A_ ) # Load State Dict UpperCAmelCase__ : Tuple = load_state_dict(A_ ) UpperCAmelCase__ : Union[str, Any] = get_new_dict(A_ , A_ ) if model_type == "pretraining": UpperCAmelCase__ : Union[str, Any] = VisualBertForPreTraining(A_ ) elif model_type == "vqa": UpperCAmelCase__ : Union[str, Any] = VisualBertForQuestionAnswering(A_ ) elif model_type == "nlvr": UpperCAmelCase__ : Any = VisualBertForVisualReasoning(A_ ) elif model_type == "multichoice": UpperCAmelCase__ : Dict = VisualBertForMultipleChoice(A_ ) model.load_state_dict(A_ ) # Save Checkpoints Path(A_ ).mkdir(exist_ok=A_ ) model.save_pretrained(A_ ) if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''') UpperCamelCase__ = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
181
"""simple docstring""" import argparse import torch from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert from transformers.utils import logging logging.set_verbosity_info() def snake_case_ ( A_ : Tuple, A_ : int, A_ : Dict ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = LxmertConfig.from_json_file(A_ ) print(F'''Building PyTorch model from configuration: {config}''' ) _lowerCamelCase : List[str] = LxmertForPreTraining(A_ ) # Load weights from tf checkpoint load_tf_weights_in_lxmert(A_, A_, A_ ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict(), A_ ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowerCAmelCase__ = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
72
0
'''simple docstring''' import inspect import os import sys import unittest import accelerate from accelerate.test_utils import execute_subprocess_async, require_tpu class a_ ( unittest.TestCase ): def lowercase__ ( self : Any ): """simple docstring""" lowercase_ :List[Any] = inspect.getfile(accelerate.test_utils ) lowercase_ :Dict = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] ) lowercase_ :Any = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] ) @require_tpu def lowercase__ ( self : int ): """simple docstring""" lowercase_ :Tuple = F'\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n '.split() lowercase_ :List[Any] = [sys.executable] + distributed_args execute_subprocess_async(__lowerCAmelCase , env=os.environ.copy() )
223
"""simple docstring""" from collections import defaultdict from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def snake_case_ ( ): '''simple docstring''' _lowerCamelCase , _lowerCamelCase : int = 9, 14 # noqa: F841 _lowerCamelCase : List[Any] = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] _lowerCamelCase : Any = defaultdict(A_ ) for nodea, nodea, cost in edges: adjancency[nodea].append([nodea, cost] ) adjancency[nodea].append([nodea, cost] ) _lowerCamelCase : List[str] = mst(A_ ) _lowerCamelCase : Optional[int] = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] for answer in expected: _lowerCamelCase : int = tuple(answer[:2] ) _lowerCamelCase : int = tuple(edge[::-1] ) assert edge in result or reverse in result
72
0
"""simple docstring""" import unittest from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _UpperCAmelCase : @staticmethod def __snake_case ( *_A , **_A ) -> Any: '''simple docstring''' pass @is_pipeline_test @require_torch @require_vision class _UpperCAmelCase ( unittest.TestCase): __a : Optional[Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING def __snake_case ( self , _A , _A , _A ) -> List[str]: '''simple docstring''' _UpperCAmelCase : Optional[int] = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" ) _UpperCAmelCase : Optional[Any] = [ { '''image''': Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ), '''question''': '''How many cats are there?''', }, { '''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''', '''question''': '''How many cats are there?''', }, ] return vqa_pipeline, examples def __snake_case ( self , _A , _A ) -> Union[str, Any]: '''simple docstring''' _UpperCAmelCase : Tuple = vqa_pipeline(__lowerCAmelCase , top_k=1 ) self.assertEqual( __lowerCAmelCase , [ [{"""score""": ANY(__lowerCAmelCase ), """answer""": ANY(__lowerCAmelCase )}], [{"""score""": ANY(__lowerCAmelCase ), """answer""": ANY(__lowerCAmelCase )}], ] , ) @require_torch def __snake_case ( self ) -> int: '''simple docstring''' _UpperCAmelCase : List[Any] = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" ) _UpperCAmelCase : Optional[int] = '''./tests/fixtures/tests_samples/COCO/000000039769.png''' _UpperCAmelCase : Tuple = '''How many cats are there?''' _UpperCAmelCase : Union[str, Any] = vqa_pipeline(image=__lowerCAmelCase , question="""How many cats are there?""" , top_k=2 ) self.assertEqual( __lowerCAmelCase , [{"""score""": ANY(__lowerCAmelCase ), """answer""": ANY(__lowerCAmelCase )}, {"""score""": ANY(__lowerCAmelCase ), """answer""": ANY(__lowerCAmelCase )}] ) _UpperCAmelCase : Dict = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 ) self.assertEqual( __lowerCAmelCase , [{"""score""": ANY(__lowerCAmelCase ), """answer""": ANY(__lowerCAmelCase )}, {"""score""": ANY(__lowerCAmelCase ), """answer""": ANY(__lowerCAmelCase )}] ) @slow @require_torch def __snake_case ( self ) -> Dict: '''simple docstring''' _UpperCAmelCase : Optional[int] = pipeline("""visual-question-answering""" , model="""dandelin/vilt-b32-finetuned-vqa""" ) _UpperCAmelCase : Any = '''./tests/fixtures/tests_samples/COCO/000000039769.png''' _UpperCAmelCase : str = '''How many cats are there?''' _UpperCAmelCase : List[str] = vqa_pipeline(image=__lowerCAmelCase , question=__lowerCAmelCase , top_k=2 ) self.assertEqual( nested_simplify(__lowerCAmelCase , decimals=4 ) , [{"""score""": 0.8799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}] ) _UpperCAmelCase : List[Any] = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 ) self.assertEqual( nested_simplify(__lowerCAmelCase , decimals=4 ) , [{"""score""": 0.8799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}] ) _UpperCAmelCase : str = vqa_pipeline( [{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 ) self.assertEqual( nested_simplify(__lowerCAmelCase , decimals=4 ) , [[{"""score""": 0.8799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}]] * 2 , ) @require_tf @unittest.skip("""Visual question answering not implemented in TF""" ) def __snake_case ( self ) -> Any: '''simple docstring''' pass
246
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bart import BartTokenizer lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} # See all BART models at https://huggingface.co/models?filter=bart lowerCAmelCase__ = { '''vocab_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''', }, '''merges_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json''', }, } lowerCAmelCase__ = { '''facebook/bart-base''': 1024, '''facebook/bart-large''': 1024, '''facebook/bart-large-mnli''': 1024, '''facebook/bart-large-cnn''': 1024, '''facebook/bart-large-xsum''': 1024, '''yjernite/bart_eli5''': 1024, } class __snake_case ( _lowercase): snake_case__ : Any = VOCAB_FILES_NAMES snake_case__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP snake_case__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case__ : Optional[int] = ["input_ids", "attention_mask"] snake_case__ : Any = BartTokenizer def __init__( self : int , __lowerCAmelCase : Dict=None , __lowerCAmelCase : int=None , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[Any]="replace" , __lowerCAmelCase : Any="<s>" , __lowerCAmelCase : Optional[int]="</s>" , __lowerCAmelCase : str="</s>" , __lowerCAmelCase : Dict="<s>" , __lowerCAmelCase : Union[str, Any]="<unk>" , __lowerCAmelCase : Any="<pad>" , __lowerCAmelCase : Optional[Any]="<mask>" , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Dict=True , **__lowerCAmelCase : Optional[Any] , ): """simple docstring""" super().__init__( __lowerCAmelCase , __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , errors=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase , **__lowerCAmelCase , ) _lowerCamelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , __lowerCAmelCase ) != add_prefix_space: _lowerCamelCase : Dict = getattr(__lowerCAmelCase , pre_tok_state.pop('''type''' ) ) _lowerCamelCase : Any = add_prefix_space _lowerCamelCase : int = pre_tok_class(**__lowerCAmelCase ) _lowerCamelCase : Optional[int] = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` _lowerCamelCase : List[str] = '''post_processor''' _lowerCamelCase : List[str] = getattr(self.backend_tokenizer , __lowerCAmelCase , __lowerCAmelCase ) if tokenizer_component_instance: _lowerCamelCase : int = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: _lowerCamelCase : Tuple = tuple(state['''sep'''] ) if "cls" in state: _lowerCamelCase : int = tuple(state['''cls'''] ) _lowerCamelCase : Union[str, Any] = False if state.get('''add_prefix_space''' , __lowerCAmelCase ) != add_prefix_space: _lowerCamelCase : Dict = add_prefix_space _lowerCamelCase : Optional[Any] = True if state.get('''trim_offsets''' , __lowerCAmelCase ) != trim_offsets: _lowerCamelCase : Any = trim_offsets _lowerCamelCase : str = True if changes_to_apply: _lowerCamelCase : List[str] = getattr(__lowerCAmelCase , state.pop('''type''' ) ) _lowerCamelCase : str = component_class(**__lowerCAmelCase ) setattr(self.backend_tokenizer , __lowerCAmelCase , __lowerCAmelCase ) @property def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" if self._mask_token is None: if self.verbose: logger.error('''Using mask_token, but it is not set yet.''' ) return None return str(self._mask_token ) @mask_token.setter def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : int ): """simple docstring""" _lowerCamelCase : Tuple = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else value _lowerCamelCase : str = value def SCREAMING_SNAKE_CASE ( self : int , *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : List[Any] ): """simple docstring""" _lowerCamelCase : Dict = kwargs.get('''is_split_into_words''' , __lowerCAmelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' '''to use it with pretokenized inputs.''' ) return super()._batch_encode_plus(*__lowerCAmelCase , **__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : Tuple , **__lowerCAmelCase : List[Any] ): """simple docstring""" _lowerCamelCase : Any = kwargs.get('''is_split_into_words''' , __lowerCAmelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' '''to use it with pretokenized inputs.''' ) return super()._encode_plus(*__lowerCAmelCase , **__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ): """simple docstring""" _lowerCamelCase : Tuple = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase ) return tuple(__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str]=None ): """simple docstring""" _lowerCamelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ): """simple docstring""" _lowerCamelCase : List[str] = [self.sep_token_id] _lowerCamelCase : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
72
0
import unittest import numpy as np import torch from diffusers import VersatileDiffusionImageVariationPipeline from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device __A = False class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' pass @slow @require_torch_gpu class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Tuple: '''simple docstring''' lowerCamelCase__: Optional[Any] =VersatileDiffusionImageVariationPipeline.from_pretrained("shi-labs/versatile-diffusion") pipe.to(__lowerCAmelCase) pipe.set_progress_bar_config(disable=__lowerCAmelCase) lowerCamelCase__: str =load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg") lowerCamelCase__: int =torch.manual_seed(0) lowerCamelCase__: Dict =pipe( image=__lowerCAmelCase , generator=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images lowerCamelCase__: List[str] =image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) lowerCamelCase__: List[Any] =np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
10
"""simple docstring""" from __future__ import annotations def snake_case_ ( A_ : str ): '''simple docstring''' return [ord(A_ ) - 96 for elem in plain] def snake_case_ ( A_ : list[int] ): '''simple docstring''' return "".join(chr(elem + 96 ) for elem in encoded ) def snake_case_ ( ): '''simple docstring''' _lowerCamelCase : Dict = encode(input('''-> ''' ).strip().lower() ) print('''Encoded: ''', A_ ) print('''Decoded:''', decode(A_ ) ) if __name__ == "__main__": main()
72
0
import torch from diffusers import CMStochasticIterativeScheduler from .test_schedulers import SchedulerCommonTest class __lowerCAmelCase ( _lowercase ): """simple docstring""" snake_case_ = (CMStochasticIterativeScheduler,) snake_case_ = 10 def lowercase_ ( self , **lowerCamelCase__ ) -> Tuple: '''simple docstring''' __lowerCamelCase = { '''num_train_timesteps''': 201, '''sigma_min''': 0.0_02, '''sigma_max''': 80.0, } config.update(**__lowerCAmelCase ) return config def lowercase_ ( self ) -> int: '''simple docstring''' __lowerCamelCase = 10 __lowerCamelCase = self.get_scheduler_config() __lowerCamelCase = self.scheduler_classes[0](**__lowerCAmelCase ) scheduler.set_timesteps(__lowerCAmelCase ) __lowerCamelCase = scheduler.timesteps[0] __lowerCamelCase = scheduler.timesteps[1] __lowerCamelCase = self.dummy_sample __lowerCamelCase = 0.1 * sample __lowerCamelCase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample __lowerCamelCase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def lowercase_ ( self ) -> Any: '''simple docstring''' for timesteps in [10, 50, 100, 1_000]: self.check_over_configs(num_train_timesteps=__lowerCAmelCase ) def lowercase_ ( self ) -> int: '''simple docstring''' for clip_denoised in [True, False]: self.check_over_configs(clip_denoised=__lowerCAmelCase ) def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' __lowerCamelCase = self.scheduler_classes[0] __lowerCamelCase = self.get_scheduler_config() __lowerCamelCase = scheduler_class(**__lowerCAmelCase ) __lowerCamelCase = 1 scheduler.set_timesteps(__lowerCAmelCase ) __lowerCamelCase = scheduler.timesteps __lowerCamelCase = torch.manual_seed(0 ) __lowerCamelCase = self.dummy_model() __lowerCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma for i, t in enumerate(__lowerCAmelCase ): # 1. scale model input __lowerCamelCase = scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase ) # 2. predict noise residual __lowerCamelCase = model(__lowerCAmelCase , __lowerCAmelCase ) # 3. predict previous sample x_t-1 __lowerCamelCase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample __lowerCamelCase = pred_prev_sample __lowerCamelCase = torch.sum(torch.abs(__lowerCAmelCase ) ) __lowerCamelCase = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_sum.item() - 192.7_614 ) < 1e-2 assert abs(result_mean.item() - 0.25_10 ) < 1e-3 def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = self.scheduler_classes[0] __lowerCamelCase = self.get_scheduler_config() __lowerCamelCase = scheduler_class(**__lowerCAmelCase ) __lowerCamelCase = [106, 0] scheduler.set_timesteps(timesteps=__lowerCAmelCase ) __lowerCamelCase = scheduler.timesteps __lowerCamelCase = torch.manual_seed(0 ) __lowerCamelCase = self.dummy_model() __lowerCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma for t in timesteps: # 1. scale model input __lowerCamelCase = scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase ) # 2. predict noise residual __lowerCamelCase = model(__lowerCAmelCase , __lowerCAmelCase ) # 3. predict previous sample x_t-1 __lowerCamelCase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample __lowerCamelCase = pred_prev_sample __lowerCamelCase = torch.sum(torch.abs(__lowerCAmelCase ) ) __lowerCamelCase = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_sum.item() - 347.6_357 ) < 1e-2 assert abs(result_mean.item() - 0.45_27 ) < 1e-3 def lowercase_ ( self ) -> Tuple: '''simple docstring''' __lowerCamelCase = self.scheduler_classes[0] __lowerCamelCase = self.get_scheduler_config() __lowerCamelCase = scheduler_class(**__lowerCAmelCase ) __lowerCamelCase = [39, 30, 12, 15, 0] with self.assertRaises(__lowerCAmelCase , msg='`timesteps` must be in descending order.' ): scheduler.set_timesteps(timesteps=__lowerCAmelCase ) def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase = self.scheduler_classes[0] __lowerCamelCase = self.get_scheduler_config() __lowerCamelCase = scheduler_class(**__lowerCAmelCase ) __lowerCamelCase = [39, 30, 12, 1, 0] __lowerCamelCase = len(__lowerCAmelCase ) with self.assertRaises(__lowerCAmelCase , msg='Can only pass one of `num_inference_steps` or `timesteps`.' ): scheduler.set_timesteps(num_inference_steps=__lowerCAmelCase , timesteps=__lowerCAmelCase ) def lowercase_ ( self ) -> List[str]: '''simple docstring''' __lowerCamelCase = self.scheduler_classes[0] __lowerCamelCase = self.get_scheduler_config() __lowerCamelCase = scheduler_class(**__lowerCAmelCase ) __lowerCamelCase = [scheduler.config.num_train_timesteps] with self.assertRaises( __lowerCAmelCase , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ): scheduler.set_timesteps(timesteps=__lowerCAmelCase )
90
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase__ = { '''configuration_lxmert''': ['''LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LxmertConfig'''], '''tokenization_lxmert''': ['''LxmertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['''LxmertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''LxmertEncoder''', '''LxmertForPreTraining''', '''LxmertForQuestionAnswering''', '''LxmertModel''', '''LxmertPreTrainedModel''', '''LxmertVisualFeatureEncoder''', '''LxmertXLayer''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFLxmertForPreTraining''', '''TFLxmertMainLayer''', '''TFLxmertModel''', '''TFLxmertPreTrainedModel''', '''TFLxmertVisualFeatureEncoder''', ] if TYPE_CHECKING: from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig from .tokenization_lxmert import LxmertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_lxmert_fast import LxmertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_lxmert import ( LxmertEncoder, LxmertForPreTraining, LxmertForQuestionAnswering, LxmertModel, LxmertPreTrainedModel, LxmertVisualFeatureEncoder, LxmertXLayer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_lxmert import ( TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFLxmertForPreTraining, TFLxmertMainLayer, TFLxmertModel, TFLxmertPreTrainedModel, TFLxmertVisualFeatureEncoder, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
72
0
import numpy as np import datasets a__: Dict = '\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n' a__: Union[str, Any] = '\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n' a__: int = '\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE__ ( datasets.Metric ): def UpperCamelCase ( self ): return datasets.MetricInfo( description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features( { '''X''': datasets.Sequence(datasets.Value('''float''',id='''sequence''' ),id='''X''' ), } ),) def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ): A__ = np.array(__lowerCAmelCase ) A__ = np.array(__lowerCAmelCase ) # Assert that arrays are 2D if len(X.shape ) != 2: raise ValueError('''Expected `X` to be a 2D vector''' ) if len(reference_distribution.shape ) != 2: raise ValueError('''Expected `reference_distribution` to be a 2D vector''' ) if reference_distribution.shape[0] < 2: raise ValueError( '''Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension''' ) # Get mahalanobis distance for each prediction A__ = X - np.mean(__lowerCAmelCase ) A__ = np.cov(reference_distribution.T ) try: A__ = np.linalg.inv(__lowerCAmelCase ) except np.linalg.LinAlgError: A__ = np.linalg.pinv(__lowerCAmelCase ) A__ = np.dot(__lowerCAmelCase,__lowerCAmelCase ) A__ = np.dot(__lowerCAmelCase,X_minus_mu.T ).diagonal() return {"mahalanobis": mahal_dist}
193
"""simple docstring""" def snake_case_ ( A_ : list[list[float]] ): '''simple docstring''' _lowerCamelCase : list[list[float]] = [] for data in source_data: for i, el in enumerate(A_ ): if len(A_ ) < i + 1: data_lists.append([] ) data_lists[i].append(float(A_ ) ) return data_lists def snake_case_ ( A_ : list[list[float]], A_ : list[int] ): '''simple docstring''' _lowerCamelCase : list[list[float]] = [] for dlist, weight in zip(A_, A_ ): _lowerCamelCase : Any = min(A_ ) _lowerCamelCase : Optional[Any] = max(A_ ) _lowerCamelCase : list[float] = [] # for weight 0 score is 1 - actual score if weight == 0: for item in dlist: try: score.append(1 - ((item - mind) / (maxd - mind)) ) except ZeroDivisionError: score.append(1 ) elif weight == 1: for item in dlist: try: score.append((item - mind) / (maxd - mind) ) except ZeroDivisionError: score.append(0 ) # weight not 0 or 1 else: _lowerCamelCase : str = F'''Invalid weight of {weight:f} provided''' raise ValueError(A_ ) score_lists.append(A_ ) return score_lists def snake_case_ ( A_ : list[list[float]] ): '''simple docstring''' _lowerCamelCase : list[float] = [0 for i in range(len(score_lists[0] ) )] for slist in score_lists: for j, ele in enumerate(A_ ): _lowerCamelCase : List[str] = final_scores[j] + ele return final_scores def snake_case_ ( A_ : list[list[float]], A_ : list[int] ): '''simple docstring''' _lowerCamelCase : Tuple = get_data(A_ ) _lowerCamelCase : Optional[Any] = calculate_each_score(A_, A_ ) _lowerCamelCase : str = generate_final_scores(A_ ) # append scores to source data for i, ele in enumerate(A_ ): source_data[i].append(A_ ) return source_data
72
0
import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class A_ : def __init__( self : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str=1_3 , UpperCAmelCase : Any=7 , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Any=True , UpperCAmelCase : int=True , UpperCAmelCase : Optional[int]=False , UpperCAmelCase : Any=False , UpperCAmelCase : Optional[int]=False , UpperCAmelCase : List[str]=2 , UpperCAmelCase : List[Any]=9_9 , UpperCAmelCase : str=0 , UpperCAmelCase : Any=3_2 , UpperCAmelCase : List[Any]=5 , UpperCAmelCase : Dict=4 , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Any=0.1 , UpperCAmelCase : List[str]=5_1_2 , UpperCAmelCase : List[Any]=2 , UpperCAmelCase : Optional[int]=0.02 , UpperCAmelCase : Union[str, Any]=2 , UpperCAmelCase : Any=4 , UpperCAmelCase : List[Any]="last" , UpperCAmelCase : List[str]=True , UpperCAmelCase : List[Any]=None , UpperCAmelCase : str=0 , ) -> List[str]: __lowerCAmelCase: Optional[Any] = parent __lowerCAmelCase: Any = batch_size __lowerCAmelCase: Any = seq_length __lowerCAmelCase: Tuple = is_training __lowerCAmelCase: Tuple = use_input_lengths __lowerCAmelCase: Optional[int] = use_token_type_ids __lowerCAmelCase: Dict = use_labels __lowerCAmelCase: Optional[Any] = gelu_activation __lowerCAmelCase: List[Any] = sinusoidal_embeddings __lowerCAmelCase: List[Any] = causal __lowerCAmelCase: Any = asm __lowerCAmelCase: int = n_langs __lowerCAmelCase: Tuple = vocab_size __lowerCAmelCase: str = n_special __lowerCAmelCase: List[Any] = hidden_size __lowerCAmelCase: List[str] = num_hidden_layers __lowerCAmelCase: int = num_attention_heads __lowerCAmelCase: int = hidden_dropout_prob __lowerCAmelCase: List[str] = attention_probs_dropout_prob __lowerCAmelCase: Dict = max_position_embeddings __lowerCAmelCase: Tuple = type_sequence_label_size __lowerCAmelCase: Any = initializer_range __lowerCAmelCase: Optional[Any] = num_labels __lowerCAmelCase: Dict = num_choices __lowerCAmelCase: Tuple = summary_type __lowerCAmelCase: List[Any] = use_proj __lowerCAmelCase: Tuple = scope __lowerCAmelCase: Union[str, Any] = bos_token_id def UpperCAmelCase ( self : Union[str, Any] ) -> Any: __lowerCAmelCase: Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCAmelCase: str = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCAmelCase: Dict = None if self.use_input_lengths: __lowerCAmelCase: Tuple = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length __lowerCAmelCase: Any = None if self.use_token_type_ids: __lowerCAmelCase: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) __lowerCAmelCase: Union[str, Any] = None __lowerCAmelCase: List[Any] = None __lowerCAmelCase: Optional[int] = None if self.use_labels: __lowerCAmelCase: Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCAmelCase: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowerCAmelCase: Any = ids_tensor([self.batch_size] , 2 ).float() __lowerCAmelCase: str = ids_tensor([self.batch_size] , self.num_choices ) __lowerCAmelCase: Dict = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def UpperCAmelCase ( self : Dict ) -> Tuple: return XLMConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , ) def UpperCAmelCase ( self : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Any , UpperCAmelCase : int , UpperCAmelCase : str , ) -> Union[str, Any]: __lowerCAmelCase: List[Any] = XLMModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() __lowerCAmelCase: Optional[Any] = model(__lowerCAmelCase , lengths=__lowerCAmelCase , langs=__lowerCAmelCase ) __lowerCAmelCase: List[Any] = model(__lowerCAmelCase , langs=__lowerCAmelCase ) __lowerCAmelCase: List[Any] = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase ( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : str , ) -> str: __lowerCAmelCase: str = XLMWithLMHeadModel(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() __lowerCAmelCase: List[str] = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , ) -> Tuple: __lowerCAmelCase: Any = XLMForQuestionAnsweringSimple(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() __lowerCAmelCase: List[str] = model(__lowerCAmelCase ) __lowerCAmelCase: Optional[Any] = model(__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase ) __lowerCAmelCase: List[str] = outputs self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any] , UpperCAmelCase : Any , ) -> List[Any]: __lowerCAmelCase: Optional[Any] = XLMForQuestionAnswering(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() __lowerCAmelCase: Optional[Any] = model(__lowerCAmelCase ) __lowerCAmelCase: Dict = model( __lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , cls_index=__lowerCAmelCase , is_impossible=__lowerCAmelCase , p_mask=__lowerCAmelCase , ) __lowerCAmelCase: Dict = model( __lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , cls_index=__lowerCAmelCase , is_impossible=__lowerCAmelCase , ) (__lowerCAmelCase ): List[str] = result_with_labels.to_tuple() __lowerCAmelCase: str = model(__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase ) (__lowerCAmelCase ): Optional[int] = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , ) -> Dict: __lowerCAmelCase: List[str] = XLMForSequenceClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() __lowerCAmelCase: Dict = model(__lowerCAmelCase ) __lowerCAmelCase: str = model(__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCAmelCase ( self : str , UpperCAmelCase : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , ) -> List[str]: __lowerCAmelCase: str = self.num_labels __lowerCAmelCase: int = XLMForTokenClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() __lowerCAmelCase: str = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase ( self : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any , ) -> Optional[int]: __lowerCAmelCase: Union[str, Any] = self.num_choices __lowerCAmelCase: List[str] = XLMForMultipleChoice(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() __lowerCAmelCase: Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCAmelCase: List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCAmelCase: Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCAmelCase: Tuple = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase ( self : Optional[int] ) -> int: __lowerCAmelCase: Union[str, Any] = self.prepare_config_and_inputs() ( __lowerCAmelCase ): Optional[Any] = config_and_inputs __lowerCAmelCase: Any = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths} return config, inputs_dict @require_torch class A_ ( _lowercase , _lowercase , _lowercase , unittest.TestCase ): _lowercase : Dict = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) _lowercase : Tuple = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable _lowercase : Tuple = ( { "feature-extraction": XLMModel, "fill-mask": XLMWithLMHeadModel, "question-answering": XLMForQuestionAnsweringSimple, "text-classification": XLMForSequenceClassification, "text-generation": XLMWithLMHeadModel, "token-classification": XLMForTokenClassification, "zero-shot": XLMForSequenceClassification, } if is_torch_available() else {} ) def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : int , UpperCAmelCase : int ) -> Any: if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('Fast' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def UpperCAmelCase ( self : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : str=False ) -> Tuple: __lowerCAmelCase: int = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": __lowerCAmelCase: Dict = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase ) __lowerCAmelCase: Any = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase ) return inputs_dict def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]: __lowerCAmelCase: List[Any] = XLMModelTester(self ) __lowerCAmelCase: List[Any] = ConfigTester(self , config_class=__lowerCAmelCase , emb_dim=3_7 ) def UpperCAmelCase ( self : Any ) -> str: self.config_tester.run_common_tests() def UpperCAmelCase ( self : Optional[Any] ) -> Tuple: __lowerCAmelCase: str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*__lowerCAmelCase ) def UpperCAmelCase ( self : Optional[int] ) -> Any: __lowerCAmelCase: Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*__lowerCAmelCase ) def UpperCAmelCase ( self : Optional[Any] ) -> List[str]: __lowerCAmelCase: Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*__lowerCAmelCase ) def UpperCAmelCase ( self : Any ) -> Union[str, Any]: __lowerCAmelCase: Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*__lowerCAmelCase ) def UpperCAmelCase ( self : List[str] ) -> Optional[Any]: __lowerCAmelCase: Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*__lowerCAmelCase ) def UpperCAmelCase ( self : Any ) -> Union[str, Any]: __lowerCAmelCase: Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*__lowerCAmelCase ) def UpperCAmelCase ( self : Dict ) -> Optional[Any]: __lowerCAmelCase: int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*__lowerCAmelCase ) def UpperCAmelCase ( self : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : Optional[Any]=False , UpperCAmelCase : Tuple=1 ) -> Dict: self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) self.assertListEqual( [isinstance(__lowerCAmelCase , __lowerCAmelCase ) for iter_attentions in attentions] , [True] * len(__lowerCAmelCase ) ) self.assertEqual(len(__lowerCAmelCase ) , (max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(__lowerCAmelCase ): # adds PAD dummy token __lowerCAmelCase: Optional[Any] = min_length + idx + 1 __lowerCAmelCase: Optional[Any] = min_length + idx + 1 __lowerCAmelCase: Dict = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(__lowerCAmelCase ) ) def UpperCAmelCase ( self : str , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : str=False , UpperCAmelCase : Tuple=1 ) -> int: self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) self.assertListEqual( [isinstance(__lowerCAmelCase , __lowerCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(__lowerCAmelCase ) , ) self.assertEqual(len(__lowerCAmelCase ) , (max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(__lowerCAmelCase ): # adds PAD dummy token __lowerCAmelCase: str = min_length + idx + 1 __lowerCAmelCase: int = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(__lowerCAmelCase ) , ) pass @slow def UpperCAmelCase ( self : int ) -> Union[str, Any]: for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCAmelCase: Dict = XLMModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) @require_torch class A_ ( unittest.TestCase ): @slow def UpperCAmelCase ( self : Dict ) -> str: __lowerCAmelCase: Any = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' ) model.to(__lowerCAmelCase ) __lowerCAmelCase: Tuple = torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=__lowerCAmelCase ) # the president __lowerCAmelCase: List[Any] = [ 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference __lowerCAmelCase: List[Any] = model.generate(__lowerCAmelCase , do_sample=__lowerCAmelCase ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() , __lowerCAmelCase )
322
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''microsoft/unispeech-large-1500h-cv''': ( '''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json''' ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech } class __snake_case ( _lowercase): snake_case__ : List[str] = "unispeech" def __init__( self : List[str] , __lowerCAmelCase : List[Any]=3_2 , __lowerCAmelCase : str=7_6_8 , __lowerCAmelCase : int=1_2 , __lowerCAmelCase : int=1_2 , __lowerCAmelCase : int=3_0_7_2 , __lowerCAmelCase : Tuple="gelu" , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : Tuple=0.0 , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : Dict=1E-5 , __lowerCAmelCase : Optional[int]="group" , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : int=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , __lowerCAmelCase : Optional[int]=(5, 2, 2, 2, 2, 2, 2) , __lowerCAmelCase : Union[str, Any]=(1_0, 3, 3, 3, 3, 2, 2) , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : List[str]=1_2_8 , __lowerCAmelCase : Any=1_6 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Union[str, Any]=0.05 , __lowerCAmelCase : Union[str, Any]=1_0 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Dict=0.0 , __lowerCAmelCase : Optional[int]=1_0 , __lowerCAmelCase : Dict=0 , __lowerCAmelCase : List[str]=3_2_0 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Tuple=1_0_0 , __lowerCAmelCase : Dict=2_5_6 , __lowerCAmelCase : str=2_5_6 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Dict="mean" , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Optional[Any]=2_5_6 , __lowerCAmelCase : Dict=8_0 , __lowerCAmelCase : int=0 , __lowerCAmelCase : Optional[int]=1 , __lowerCAmelCase : Dict=2 , __lowerCAmelCase : Any=0.5 , **__lowerCAmelCase : Optional[Any] , ): """simple docstring""" super().__init__(**__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase ) _lowerCamelCase : Dict = hidden_size _lowerCamelCase : Any = feat_extract_norm _lowerCamelCase : List[Any] = feat_extract_activation _lowerCamelCase : Any = list(__lowerCAmelCase ) _lowerCamelCase : Tuple = list(__lowerCAmelCase ) _lowerCamelCase : int = list(__lowerCAmelCase ) _lowerCamelCase : List[str] = conv_bias _lowerCamelCase : List[str] = num_conv_pos_embeddings _lowerCamelCase : Tuple = num_conv_pos_embedding_groups _lowerCamelCase : List[str] = len(self.conv_dim ) _lowerCamelCase : Tuple = num_hidden_layers _lowerCamelCase : List[Any] = intermediate_size _lowerCamelCase : Dict = hidden_act _lowerCamelCase : Union[str, Any] = num_attention_heads _lowerCamelCase : Tuple = hidden_dropout _lowerCamelCase : List[Any] = attention_dropout _lowerCamelCase : Optional[int] = activation_dropout _lowerCamelCase : Optional[Any] = feat_proj_dropout _lowerCamelCase : Optional[int] = final_dropout _lowerCamelCase : Any = layerdrop _lowerCamelCase : Any = layer_norm_eps _lowerCamelCase : List[Any] = initializer_range _lowerCamelCase : List[str] = num_ctc_classes _lowerCamelCase : List[Any] = vocab_size _lowerCamelCase : Optional[Any] = do_stable_layer_norm _lowerCamelCase : Tuple = use_weighted_layer_sum _lowerCamelCase : List[Any] = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _lowerCamelCase : Any = apply_spec_augment _lowerCamelCase : Dict = mask_time_prob _lowerCamelCase : List[str] = mask_time_length _lowerCamelCase : Optional[Any] = mask_time_min_masks _lowerCamelCase : List[str] = mask_feature_prob _lowerCamelCase : int = mask_feature_length _lowerCamelCase : Dict = mask_feature_min_masks # parameters for pretraining with codevector quantized representations _lowerCamelCase : Optional[Any] = num_codevectors_per_group _lowerCamelCase : int = num_codevector_groups _lowerCamelCase : List[Any] = contrastive_logits_temperature _lowerCamelCase : List[str] = feat_quantizer_dropout _lowerCamelCase : Dict = num_negatives _lowerCamelCase : Optional[int] = codevector_dim _lowerCamelCase : List[Any] = proj_codevector_dim _lowerCamelCase : List[Any] = diversity_loss_weight # ctc loss _lowerCamelCase : Union[str, Any] = ctc_loss_reduction _lowerCamelCase : Any = ctc_zero_infinity # pretraining loss _lowerCamelCase : str = replace_prob @property def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
72
0
"""simple docstring""" import argparse import os import torch from transformers.utils import WEIGHTS_NAME __UpperCamelCase : Optional[Any] = ['''small''', '''medium''', '''large'''] __UpperCamelCase : Union[str, Any] = '''lm_head.decoder.weight''' __UpperCamelCase : Any = '''lm_head.weight''' def __SCREAMING_SNAKE_CASE ( A_ , A_ ): lowerCAmelCase__ : Union[str, Any] = torch.load(A_ ) lowerCAmelCase__ : Optional[Any] = d.pop(A_ ) os.makedirs(A_ , exist_ok=A_ ) torch.save(A_ , os.path.join(A_ , A_ ) ) if __name__ == "__main__": __UpperCamelCase : str = argparse.ArgumentParser() parser.add_argument('''--dialogpt_path''', default='''.''', type=str) __UpperCamelCase : List[str] = parser.parse_args() for MODEL in DIALOGPT_MODELS: __UpperCamelCase : List[str] = os.path.join(args.dialogpt_path, F'''{MODEL}_ft.pkl''') __UpperCamelCase : Union[str, Any] = F'''./DialoGPT-{MODEL}''' convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
106
"""simple docstring""" from typing import Optional from urllib.parse import quote import huggingface_hub as hfh from packaging import version def snake_case_ ( A_ : str, A_ : str, A_ : Optional[str] = None ): '''simple docstring''' if version.parse(hfh.__version__ ).release < version.parse('''0.11.0''' ).release: # old versions of hfh don't url-encode the file path _lowerCamelCase : Optional[Any] = quote(A_ ) return hfh.hf_hub_url(A_, A_, repo_type='''dataset''', revision=A_ )
72
0
'''simple docstring''' import math def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(A_ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def _SCREAMING_SNAKE_CASE ( UpperCamelCase = 0.1 ): """simple docstring""" lowerCAmelCase__ : Optional[int] = 3 lowerCAmelCase__ : List[str] = 3 while primes / (2 * j - 1) >= ratio: for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ): primes += is_prime(A_ ) j += 2 return j if __name__ == "__main__": import doctest doctest.testmod()
37
"""simple docstring""" import unittest import numpy as np def snake_case_ ( A_ : np.ndarray, A_ : np.ndarray, A_ : np.ndarray, A_ : np.ndarray | None = None, ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = np.shape(A_ ) _lowerCamelCase : List[str] = np.shape(A_ ) _lowerCamelCase : List[str] = np.shape(A_ ) if shape_a[0] != shape_b[0]: _lowerCamelCase : Tuple = ( '''Expected the same number of rows for A and B. ''' F'''Instead found A of size {shape_a} and B of size {shape_b}''' ) raise ValueError(A_ ) if shape_b[1] != shape_c[1]: _lowerCamelCase : Tuple = ( '''Expected the same number of columns for B and C. ''' F'''Instead found B of size {shape_b} and C of size {shape_c}''' ) raise ValueError(A_ ) _lowerCamelCase : List[str] = pseudo_inv if a_inv is None: try: _lowerCamelCase : Any = np.linalg.inv(A_ ) except np.linalg.LinAlgError: raise ValueError( '''Input matrix A is not invertible. Cannot compute Schur complement.''' ) return mat_c - mat_b.T @ a_inv @ mat_b class __snake_case ( unittest.TestCase): def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _lowerCamelCase : List[str] = np.array([[0, 3], [3, 0], [2, 3]] ) _lowerCamelCase : List[str] = np.array([[2, 1], [6, 3]] ) _lowerCamelCase : List[Any] = schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) _lowerCamelCase : Dict = np.block([[a, b], [b.T, c]] ) _lowerCamelCase : Tuple = np.linalg.det(__lowerCAmelCase ) _lowerCamelCase : List[str] = np.linalg.det(__lowerCAmelCase ) _lowerCamelCase : Any = np.linalg.det(__lowerCAmelCase ) self.assertAlmostEqual(__lowerCAmelCase , det_a * det_s ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" _lowerCamelCase : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _lowerCamelCase : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] ) _lowerCamelCase : int = np.array([[2, 1], [6, 3]] ) with self.assertRaises(__lowerCAmelCase ): schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _lowerCamelCase : List[str] = np.array([[0, 3], [3, 0], [2, 3]] ) _lowerCamelCase : Union[str, Any] = np.array([[2, 1, 3], [6, 3, 5]] ) with self.assertRaises(__lowerCAmelCase ): schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
72
0
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Union[str, Any]: """simple docstring""" return [ txt[:a] + txt[a].upper() + txt[a + 1 :] for a in range(len(A_ ) ) if txt[a].isalpha() ] if __name__ == "__main__": __import__("""doctest""").testmod()
14
"""simple docstring""" def snake_case_ ( A_ : list[int], A_ : str ): '''simple docstring''' _lowerCamelCase : Tuple = int(A_ ) # Initialize Result _lowerCamelCase : Dict = [] # Traverse through all denomination for denomination in reversed(A_ ): # Find denominations while int(A_ ) >= int(A_ ): total_value -= int(A_ ) answer.append(A_ ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": lowerCAmelCase__ = [] lowerCAmelCase__ = '''0''' if ( input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower() == "y" ): lowerCAmelCase__ = int(input('''Enter the number of denominations you want to add: ''').strip()) for i in range(0, n): denominations.append(int(input(F"""Denomination {i}: """).strip())) lowerCAmelCase__ = input('''Enter the change you want to make in Indian Currency: ''').strip() else: # All denominations of Indian Currency if user does not enter lowerCAmelCase__ = [1, 2, 5, 10, 20, 50, 100, 500, 2000] lowerCAmelCase__ = input('''Enter the change you want to make: ''').strip() if int(value) == 0 or int(value) < 0: print('''The total value cannot be zero or negative.''') else: print(F"""Following is minimal change for {value}: """) lowerCAmelCase__ = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=''' ''')
72
0
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} UpperCAmelCase = { '''vocab_file''': { '''squeezebert/squeezebert-uncased''': ( '''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt''' ), '''squeezebert/squeezebert-mnli''': '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt''', '''squeezebert/squeezebert-mnli-headless''': ( '''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''squeezebert/squeezebert-uncased''': ( '''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json''' ), '''squeezebert/squeezebert-mnli''': ( '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json''' ), '''squeezebert/squeezebert-mnli-headless''': ( '''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json''' ), }, } UpperCAmelCase = { '''squeezebert/squeezebert-uncased''': 512, '''squeezebert/squeezebert-mnli''': 512, '''squeezebert/squeezebert-mnli-headless''': 512, } UpperCAmelCase = { '''squeezebert/squeezebert-uncased''': {'''do_lower_case''': True}, '''squeezebert/squeezebert-mnli''': {'''do_lower_case''': True}, '''squeezebert/squeezebert-mnli-headless''': {'''do_lower_case''': True}, } class lowerCAmelCase ( _lowercase ): lowerCAmelCase_ = VOCAB_FILES_NAMES lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase_ = PRETRAINED_INIT_CONFIGURATION lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase_ = SqueezeBertTokenizer def __init__( self : Any , __lowercase : Optional[Any]=None , __lowercase : int=None , __lowercase : Optional[Any]=True , __lowercase : List[str]="[UNK]" , __lowercase : List[str]="[SEP]" , __lowercase : str="[PAD]" , __lowercase : Union[str, Any]="[CLS]" , __lowercase : Dict="[MASK]" , __lowercase : Optional[Any]=True , __lowercase : Optional[Any]=None , **__lowercase : List[Any] , ): """simple docstring""" super().__init__( __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , do_lower_case=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , tokenize_chinese_chars=__lowerCAmelCase , strip_accents=__lowerCAmelCase , **__lowerCAmelCase , ) __lowercase =json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , __lowerCAmelCase ) != do_lower_case or normalizer_state.get('strip_accents' , __lowerCAmelCase ) != strip_accents or normalizer_state.get('handle_chinese_chars' , __lowerCAmelCase ) != tokenize_chinese_chars ): __lowercase =getattr(__lowerCAmelCase , normalizer_state.pop('type' ) ) __lowercase =do_lower_case __lowercase =strip_accents __lowercase =tokenize_chinese_chars __lowercase =normalizer_class(**__lowerCAmelCase ) __lowercase =do_lower_case def snake_case ( self : List[str] , __lowercase : List[Any] , __lowercase : Dict=None ): """simple docstring""" __lowercase =[self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def snake_case ( self : str , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ): """simple docstring""" __lowercase =[self.sep_token_id] __lowercase =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def snake_case ( self : int , __lowercase : str , __lowercase : Optional[str] = None ): """simple docstring""" __lowercase =self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase ) return tuple(__lowerCAmelCase )
141
"""simple docstring""" def snake_case_ ( A_ : int = 2_00_00_00 ): '''simple docstring''' _lowerCamelCase : int = [0 for i in range(n + 1 )] _lowerCamelCase : List[str] = 1 _lowerCamelCase : Any = 1 for i in range(2, int(n**0.5 ) + 1 ): if primality_list[i] == 0: for j in range(i * i, n + 1, A_ ): _lowerCamelCase : str = 1 _lowerCamelCase : Tuple = 0 for i in range(A_ ): if primality_list[i] == 0: sum_of_primes += i return sum_of_primes if __name__ == "__main__": print(F"""{solution() = }""")
72
0
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase__ = { '''configuration_autoformer''': [ '''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''AutoformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = [ '''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''AutoformerForPrediction''', '''AutoformerModel''', '''AutoformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_autoformer import ( AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_autoformer import ( AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, AutoformerForPrediction, AutoformerModel, AutoformerPreTrainedModel, ) else: import sys UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
181
"""simple docstring""" import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def snake_case_ ( A_ : Any ): '''simple docstring''' _lowerCamelCase : Any = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''_float_tensor''', '''decoder.output_projection.weight''', ] for k in ignore_keys: state_dict.pop(A_, A_ ) def snake_case_ ( A_ : Union[str, Any] ): '''simple docstring''' _lowerCamelCase , _lowerCamelCase : Tuple = emb.weight.shape _lowerCamelCase : Dict = nn.Linear(A_, A_, bias=A_ ) _lowerCamelCase : str = emb.weight.data return lin_layer def snake_case_ ( A_ : str, A_ : Optional[int]="facebook/mbart-large-en-ro", A_ : Union[str, Any]=False, A_ : List[str]=False ): '''simple docstring''' _lowerCamelCase : Tuple = torch.load(A_, map_location='''cpu''' )['''model'''] remove_ignore_keys_(A_ ) _lowerCamelCase : int = state_dict['''encoder.embed_tokens.weight'''].shape[0] _lowerCamelCase : Any = MBartConfig.from_pretrained(A_, vocab_size=A_ ) if mbart_aa and finetuned: _lowerCamelCase : Any = '''relu''' _lowerCamelCase : Optional[int] = state_dict['''decoder.embed_tokens.weight'''] _lowerCamelCase : Any = MBartForConditionalGeneration(A_ ) model.model.load_state_dict(A_ ) if finetuned: _lowerCamelCase : str = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.''' ) parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--hf_config''', default='''facebook/mbart-large-cc25''', type=str, help='''Which huggingface architecture to use: mbart-large''', ) parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''') parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''') lowerCAmelCase__ = parser.parse_args() lowerCAmelCase__ = convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
72
0
'''simple docstring''' from ..utils import DummyObject, requires_backends class a_ ( metaclass=_lowercase ): __A = ["torch", "scipy"] def __init__( self : List[Any] , *lowercase : List[str] , **lowercase : str ): """simple docstring""" requires_backends(self , ["torch", "scipy"] ) @classmethod def lowercase__ ( cls : List[Any] , *lowercase : Any , **lowercase : Any ): """simple docstring""" requires_backends(cls , ["torch", "scipy"] ) @classmethod def lowercase__ ( cls : List[Any] , *lowercase : Dict , **lowercase : Dict ): """simple docstring""" requires_backends(cls , ["torch", "scipy"] )
223
"""simple docstring""" def snake_case_ ( A_ : list[list] ): '''simple docstring''' _lowerCamelCase : Optional[int] = current_set.copy() for row_index, row in enumerate(A_ ): _lowerCamelCase : Tuple = row[0] for column_index, column in enumerate(A_ ): if magnitude == 0: _lowerCamelCase : List[Any] = column continue _lowerCamelCase : List[Any] = column / magnitude # Subtract to cancel term _lowerCamelCase : Union[str, Any] = current_set[0] _lowerCamelCase : Dict = [first_row] _lowerCamelCase : str = current_set[1::] for row in current_set: _lowerCamelCase : Union[str, Any] = [] # If first term is 0, it is already in form we want, so we preserve it if row[0] == 0: final_set.append(A_ ) continue for column_index in range(len(A_ ) ): temp_row.append(first_row[column_index] - row[column_index] ) final_set.append(A_ ) # Create next recursion iteration set if len(final_set[0] ) != 3: _lowerCamelCase : Any = final_set[0] _lowerCamelCase : Any = [] _lowerCamelCase : Optional[int] = [] for row in final_set[1::]: current_first_column.append(row[0] ) next_iteration.append(row[1::] ) _lowerCamelCase : Dict = simplify(A_ ) for i in range(len(A_ ) ): resultant[i].insert(0, current_first_column[i] ) resultant.insert(0, A_ ) _lowerCamelCase : Tuple = resultant return final_set def snake_case_ ( A_ : list[list] ): '''simple docstring''' if len(A_ ) == 0: raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) _lowerCamelCase : Dict = len(A_ ) + 1 if any(len(A_ ) != _length for item in equations ): raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) for row in equations: if any(not isinstance(A_, (int, float) ) for column in row ): raise ValueError('''solve_simultaneous() requires lists of integers''' ) if len(A_ ) == 1: return [equations[0][-1] / equations[0][0]] _lowerCamelCase : Optional[Any] = equations.copy() if any(0 in row for row in data_set ): _lowerCamelCase : str = data_set.copy() _lowerCamelCase : List[Any] = [] for row_index, row in enumerate(A_ ): if 0 not in row: _lowerCamelCase : Union[str, Any] = data_set.pop(A_ ) break if not full_row: raise ValueError('''solve_simultaneous() requires at least 1 full equation''' ) data_set.insert(0, A_ ) _lowerCamelCase : List[str] = data_set.copy() _lowerCamelCase : int = simplify(A_ ) _lowerCamelCase : int = simplified[::-1] _lowerCamelCase : list = [] for row in simplified: _lowerCamelCase : Tuple = row[-1] if not solutions: if row[-2] == 0: solutions.append(0 ) continue solutions.append(current_solution / row[-2] ) continue _lowerCamelCase : Optional[Any] = row.copy()[: len(A_ ) - 1 :] while temp_row[0] == 0: temp_row.pop(0 ) if len(A_ ) == 0: solutions.append(0 ) continue _lowerCamelCase : Tuple = temp_row[1::] _lowerCamelCase : Tuple = temp_row[::-1] for column_index, column in enumerate(A_ ): current_solution -= column * solutions[column_index] solutions.append(A_ ) _lowerCamelCase : Optional[int] = [] for item in solutions: final.append(float(round(A_, 5 ) ) ) return final[::-1] if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase__ = [ [2, 1, 1, 1, 1, 4], [1, 2, 1, 1, 1, 5], [1, 1, 2, 1, 1, 6], [1, 1, 1, 2, 1, 7], [1, 1, 1, 1, 2, 8], ] print(solve_simultaneous(eq)) print(solve_simultaneous([[4, 2]]))
72
0
"""simple docstring""" import argparse import os import transformers from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS from .utils import logging logging.set_verbosity_info() lowerCamelCase__ : List[str] = logging.get_logger(__name__) lowerCamelCase__ : Dict = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS} def UpperCamelCase ( _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Dict, _lowerCAmelCase : Any, _lowerCAmelCase : Optional[int] ) -> Optional[int]: if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES: raise ValueError(f'''Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.''' ) if tokenizer_name is None: _UpperCAmelCase : List[str] = TOKENIZER_CLASSES else: _UpperCAmelCase : List[str] = {tokenizer_name: getattr(A_, tokenizer_name + """Fast""" )} logger.info(f'''Loading tokenizer classes: {tokenizer_names}''' ) for tokenizer_name in tokenizer_names: _UpperCAmelCase : Optional[int] = TOKENIZER_CLASSES[tokenizer_name] _UpperCAmelCase : List[str] = True if checkpoint_name is None: _UpperCAmelCase : int = list(tokenizer_class.max_model_input_sizes.keys() ) else: _UpperCAmelCase : List[str] = [checkpoint_name] logger.info(f'''For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}''' ) for checkpoint in checkpoint_names: logger.info(f'''Loading {tokenizer_class.__class__.__name__} {checkpoint}''' ) # Load tokenizer _UpperCAmelCase : int = tokenizer_class.from_pretrained(A_, force_download=A_ ) # Save fast tokenizer logger.info(f'''Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}''' ) # For organization names we create sub-directories if "/" in checkpoint: _UpperCAmelCase : Union[str, Any] = checkpoint.split("""/""" ) _UpperCAmelCase : Dict = os.path.join(A_, A_ ) elif add_prefix: _UpperCAmelCase : List[Any] = checkpoint _UpperCAmelCase : str = dump_path else: _UpperCAmelCase : str = None _UpperCAmelCase : List[str] = dump_path logger.info(f'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' ) if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]: _UpperCAmelCase : int = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint] _UpperCAmelCase : Union[str, Any] = file_path.split(A_ )[-1][0] if next_char == "/": _UpperCAmelCase : Any = os.path.join(A_, A_ ) _UpperCAmelCase : Union[str, Any] = None logger.info(f'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' ) _UpperCAmelCase : Union[str, Any] = tokenizer.save_pretrained( A_, legacy_format=A_, filename_prefix=A_ ) logger.info(f'''=> File names {file_names}''' ) for file_name in file_names: if not file_name.endswith("""tokenizer.json""" ): os.remove(A_ ) logger.info(f'''=> removing {file_name}''' ) if __name__ == "__main__": lowerCamelCase__ : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.''' ) parser.add_argument( '''--tokenizer_name''', default=None, type=str, help=( F'''Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will ''' '''download and convert all the checkpoints from AWS.''' ), ) parser.add_argument( '''--checkpoint_name''', default=None, type=str, help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''', ) parser.add_argument( '''--force_download''', action='''store_true''', help='''Re-download checkpoints.''', ) lowerCamelCase__ : Dict = parser.parse_args() convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
246
"""simple docstring""" import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class __snake_case ( _lowercase): snake_case__ : List[Any] = "Speech2TextFeatureExtractor" snake_case__ : Union[str, Any] = "Speech2TextTokenizer" def __init__( self : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] ): """simple docstring""" super().__init__(__lowerCAmelCase , __lowerCAmelCase ) _lowerCamelCase : List[str] = self.feature_extractor _lowerCamelCase : str = False def __call__( self : List[Any] , *__lowerCAmelCase : int , **__lowerCAmelCase : List[str] ): """simple docstring""" if self._in_target_context_manager: return self.current_processor(*__lowerCAmelCase , **__lowerCAmelCase ) if "raw_speech" in kwargs: warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' ) _lowerCamelCase : str = kwargs.pop('''raw_speech''' ) else: _lowerCamelCase : Tuple = kwargs.pop('''audio''' , __lowerCAmelCase ) _lowerCamelCase : Optional[Any] = kwargs.pop('''sampling_rate''' , __lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = kwargs.pop('''text''' , __lowerCAmelCase ) if len(__lowerCAmelCase ) > 0: _lowerCamelCase : List[Any] = args[0] _lowerCamelCase : int = args[1:] if audio is None and text is None: raise ValueError('''You need to specify either an `audio` or `text` input to process.''' ) if audio is not None: _lowerCamelCase : List[Any] = self.feature_extractor(__lowerCAmelCase , *__lowerCAmelCase , sampling_rate=__lowerCAmelCase , **__lowerCAmelCase ) if text is not None: _lowerCamelCase : List[Any] = self.tokenizer(__lowerCAmelCase , **__lowerCAmelCase ) if text is None: return inputs elif audio is None: return encodings else: _lowerCamelCase : List[str] = encodings['''input_ids'''] return inputs def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : Tuple ): """simple docstring""" return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : int ): """simple docstring""" return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase ) @contextmanager def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" warnings.warn( '''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ''' '''labels by using the argument `text` of the regular `__call__` method (either in the same call as ''' '''your audio inputs, or in a separate call.''' ) _lowerCamelCase : Union[str, Any] = True _lowerCamelCase : Any = self.tokenizer yield _lowerCamelCase : List[str] = self.feature_extractor _lowerCamelCase : Tuple = False
72
0
import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import Callable, Dict, List, Tuple import timm import torch import torch.nn as nn from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf from huggingface_hub import cached_download, hf_hub_url from torch import Tensor from vissl.models.model_helpers import get_trunk_forward_outputs from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel from transformers.utils import logging logging.set_verbosity_info() __A = logging.get_logger() @dataclass class _SCREAMING_SNAKE_CASE : '''simple docstring''' lowercase_ = 42 lowercase_ = field(default_factory=_lowercase ) lowercase_ = field(default_factory=_lowercase ) def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Tensor , UpperCAmelCase_ : Tensor) ->List[str]: '''simple docstring''' lowerCamelCase__: List[Any] =len(list(m.modules())) == 1 or isinstance(__lowerCAmelCase , nn.Convad) or isinstance(__lowerCAmelCase , nn.BatchNormad) if has_not_submodules: self.traced.append(__lowerCAmelCase) def __call__(self : Optional[int] , UpperCAmelCase_ : Tensor) ->str: '''simple docstring''' for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook)) self.module(__lowerCAmelCase) [x.remove() for x in self.handles] return self @property def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Tuple: '''simple docstring''' return list(filter(lambda UpperCAmelCase_: len(list(x.state_dict().keys())) > 0 , self.traced)) @dataclass class _SCREAMING_SNAKE_CASE : '''simple docstring''' lowercase_ = 42 lowercase_ = 42 lowercase_ = 1 lowercase_ = field(default_factory=_lowercase ) lowercase_ = field(default_factory=_lowercase ) lowercase_ = True def __call__(self : Optional[int] , UpperCAmelCase_ : Tensor) ->Optional[Any]: '''simple docstring''' lowerCamelCase__: List[str] =Tracker(self.dest)(__lowerCAmelCase).parametrized lowerCamelCase__: List[Any] =Tracker(self.src)(__lowerCAmelCase).parametrized lowerCamelCase__: Optional[Any] =list(filter(lambda UpperCAmelCase_: type(__lowerCAmelCase) not in self.src_skip , __lowerCAmelCase)) lowerCamelCase__: Any =list(filter(lambda UpperCAmelCase_: type(__lowerCAmelCase) not in self.dest_skip , __lowerCAmelCase)) if len(__lowerCAmelCase) != len(__lowerCAmelCase) and self.raise_if_mismatch: raise Exception( F"""Numbers of operations are different. Source module has {len(__lowerCAmelCase)} operations while""" F""" destination module has {len(__lowerCAmelCase)}.""") for dest_m, src_m in zip(__lowerCAmelCase , __lowerCAmelCase): dest_m.load_state_dict(src_m.state_dict()) if self.verbose == 1: print(F"""Transfered from={src_m} to={dest_m}""") class _SCREAMING_SNAKE_CASE ( nn.Module ): '''simple docstring''' def __init__(self : List[Any] , UpperCAmelCase_ : nn.Module) ->Optional[int]: '''simple docstring''' super().__init__() lowerCamelCase__: List[Tuple[str, nn.Module]] =[] # - get the stem feature_blocks.append(("conv1", model.stem)) # - get all the feature blocks for k, v in model.trunk_output.named_children(): assert k.startswith("block"), F"""Unexpected layer name {k}""" lowerCamelCase__: List[Any] =len(__lowerCAmelCase) + 1 feature_blocks.append((F"""res{block_index}""", v)) lowerCamelCase__: Optional[int] =nn.ModuleDict(__lowerCAmelCase) def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Tensor) ->List[str]: '''simple docstring''' return get_trunk_forward_outputs( __lowerCAmelCase , out_feat_keys=__lowerCAmelCase , feature_blocks=self._feature_blocks , ) class _SCREAMING_SNAKE_CASE ( _lowercase ): '''simple docstring''' def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : str) ->Union[str, Any]: '''simple docstring''' lowerCamelCase__: Any =x.split("-") return x_split[0] + x_split[1] + "_" + "".join(x_split[2:]) def __getitem__(self : List[str] , UpperCAmelCase_ : str) ->Optional[Any]: '''simple docstring''' if x not in self: lowerCamelCase__: Tuple =self.convert_name_to_timm(__lowerCAmelCase) lowerCamelCase__: str =partial(lambda: (timm.create_model(__lowerCAmelCase , pretrained=__lowerCAmelCase).eval(), None)) else: lowerCamelCase__: Tuple =super().__getitem__(__lowerCAmelCase) return val class _SCREAMING_SNAKE_CASE ( _lowercase ): '''simple docstring''' def __getitem__(self : str , UpperCAmelCase_ : str) ->List[str]: '''simple docstring''' if "seer" in x and "in1k" not in x: lowerCamelCase__: str =RegNetModel else: lowerCamelCase__: Union[str, Any] =RegNetForImageClassification return val def lowerCAmelCase_ ( __a , __a , __a ) -> Optional[int]: """simple docstring""" for from_key, to_key in keys: lowerCamelCase__: List[str] =from_state_dict[from_key].clone() print(F"""Copied key={from_key} to={to_key}""" ) return to_state_dict def lowerCAmelCase_ ( __a , __a , __a , __a , __a , __a = True , ) -> str: """simple docstring""" print(F"""Converting {name}...""" ) with torch.no_grad(): lowerCamelCase__: str =from_model_func() lowerCamelCase__: Union[str, Any] =our_model_func(A_ ).eval() lowerCamelCase__: Any =ModuleTransfer(src=A_ , dest=A_ , raise_if_mismatch=A_ ) lowerCamelCase__: Tuple =torch.randn((1, 3, 224, 224) ) module_transfer(A_ ) if from_state_dict is not None: lowerCamelCase__: Any =[] # for seer - in1k finetuned we have to manually copy the head if "seer" in name and "in1k" in name: lowerCamelCase__: Optional[int] =[('''0.clf.0.weight''', '''classifier.1.weight'''), ('''0.clf.0.bias''', '''classifier.1.bias''')] lowerCamelCase__: int =manually_copy_vissl_head(A_ , our_model.state_dict() , A_ ) our_model.load_state_dict(A_ ) lowerCamelCase__: List[Any] =our_model(A_ , output_hidden_states=A_ ) lowerCamelCase__: List[Any] =( our_outputs.logits if isinstance(A_ , A_ ) else our_outputs.last_hidden_state ) lowerCamelCase__: str =from_model(A_ ) lowerCamelCase__: int =from_output[-1] if type(A_ ) is list else from_output # now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state if "seer" in name and "in1k" in name: lowerCamelCase__: Union[str, Any] =our_outputs.hidden_states[-1] assert torch.allclose(A_ , A_ ), "The model logits don't match the original one." if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / name , commit_message="Add model" , use_temp_dir=A_ , ) lowerCamelCase__: int =224 if '''seer''' not in name else 384 # we can use the convnext one lowerCamelCase__: Any =AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" , size=A_ ) image_processor.push_to_hub( repo_path_or_name=save_directory / name , commit_message="Add image processor" , use_temp_dir=A_ , ) print(F"""Pushed {name}""" ) def lowerCAmelCase_ ( __a , __a = None , __a = True ) -> int: """simple docstring""" lowerCamelCase__: int ='''imagenet-1k-id2label.json''' lowerCamelCase__: Any =1000 lowerCamelCase__: str =(1, num_labels) lowerCamelCase__: Any ='''huggingface/label-files''' lowerCamelCase__: Any =num_labels lowerCamelCase__: Optional[Any] =json.load(open(cached_download(hf_hub_url(A_ , A_ , repo_type="dataset" ) ) , "r" ) ) lowerCamelCase__: List[str] ={int(A_ ): v for k, v in idalabel.items()} lowerCamelCase__: List[Any] =idalabel lowerCamelCase__: List[Any] ={v: k for k, v in idalabel.items()} lowerCamelCase__: Dict =partial(A_ , num_labels=A_ , idalabel=A_ , labelaid=A_ ) lowerCamelCase__: Any ={ '''regnet-x-002''': ImageNetPreTrainedConfig( depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type="x" ), '''regnet-x-004''': ImageNetPreTrainedConfig( depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type="x" ), '''regnet-x-006''': ImageNetPreTrainedConfig( depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type="x" ), '''regnet-x-008''': ImageNetPreTrainedConfig( depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type="x" ), '''regnet-x-016''': ImageNetPreTrainedConfig( depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type="x" ), '''regnet-x-032''': ImageNetPreTrainedConfig( depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1008] , groups_width=48 , layer_type="x" ), '''regnet-x-040''': ImageNetPreTrainedConfig( depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1360] , groups_width=40 , layer_type="x" ), '''regnet-x-064''': ImageNetPreTrainedConfig( depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1624] , groups_width=56 , layer_type="x" ), '''regnet-x-080''': ImageNetPreTrainedConfig( depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1920] , groups_width=120 , layer_type="x" ), '''regnet-x-120''': ImageNetPreTrainedConfig( depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 , layer_type="x" ), '''regnet-x-160''': ImageNetPreTrainedConfig( depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2048] , groups_width=128 , layer_type="x" ), '''regnet-x-320''': ImageNetPreTrainedConfig( depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1344, 2520] , groups_width=168 , layer_type="x" ), # y variant '''regnet-y-002''': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ), '''regnet-y-004''': ImageNetPreTrainedConfig( depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ), '''regnet-y-006''': ImageNetPreTrainedConfig( depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ), '''regnet-y-008''': ImageNetPreTrainedConfig( depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ), '''regnet-y-016''': ImageNetPreTrainedConfig( depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ), '''regnet-y-032''': ImageNetPreTrainedConfig( depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1512] , groups_width=24 ), '''regnet-y-040''': ImageNetPreTrainedConfig( depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1088] , groups_width=64 ), '''regnet-y-064''': ImageNetPreTrainedConfig( depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1296] , groups_width=72 ), '''regnet-y-080''': ImageNetPreTrainedConfig( depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2016] , groups_width=56 ), '''regnet-y-120''': ImageNetPreTrainedConfig( depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 ), '''regnet-y-160''': ImageNetPreTrainedConfig( depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1232, 3024] , groups_width=112 ), '''regnet-y-320''': ImageNetPreTrainedConfig( depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ), # models created by SEER -> https://arxiv.org/abs/2202.08360 '''regnet-y-320-seer''': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ), '''regnet-y-640-seer''': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ), '''regnet-y-1280-seer''': RegNetConfig( depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ), '''regnet-y-2560-seer''': RegNetConfig( depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ), '''regnet-y-10b-seer''': ImageNetPreTrainedConfig( depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ), # finetuned on imagenet '''regnet-y-320-seer-in1k''': ImageNetPreTrainedConfig( depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ), '''regnet-y-640-seer-in1k''': ImageNetPreTrainedConfig( depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ), '''regnet-y-1280-seer-in1k''': ImageNetPreTrainedConfig( depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ), '''regnet-y-2560-seer-in1k''': ImageNetPreTrainedConfig( depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ), '''regnet-y-10b-seer-in1k''': ImageNetPreTrainedConfig( depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ), } lowerCamelCase__: Union[str, Any] =NameToOurModelFuncMap() lowerCamelCase__: List[str] =NameToFromModelFuncMap() # add seer weights logic def load_using_classy_vision(__a , __a ) -> Tuple[nn.Module, Dict]: lowerCamelCase__: Dict =torch.hub.load_state_dict_from_url(A_ , model_dir=str(A_ ) , map_location="cpu" ) lowerCamelCase__: List[str] =model_func() # check if we have a head, if yes add it lowerCamelCase__: Any =files['''classy_state_dict''']['''base_model''']['''model'''] lowerCamelCase__: Tuple =model_state_dict['''trunk'''] model.load_state_dict(A_ ) return model.eval(), model_state_dict["heads"] # pretrained lowerCamelCase__: Optional[int] =partial( A_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) lowerCamelCase__: List[Any] =partial( A_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) lowerCamelCase__: Dict =partial( A_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , ) lowerCamelCase__: str =partial( A_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch" , lambda: FakeRegNetVisslWrapper( RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=6_2_0.8_3 , w_m=2.5_2 ) ) ) , ) # IN1K finetuned lowerCamelCase__: Optional[Any] =partial( A_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) lowerCamelCase__: Tuple =partial( A_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) lowerCamelCase__: Tuple =partial( A_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , ) lowerCamelCase__: Optional[Any] =partial( A_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch" , lambda: FakeRegNetVisslWrapper( RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=6_2_0.8_3 , w_m=2.5_2 ) ) ) , ) if model_name: convert_weight_and_push( A_ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , A_ , A_ , ) else: for model_name, config in names_to_config.items(): convert_weight_and_push( A_ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , A_ , A_ , A_ , ) return config, expected_shape if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default=None, type=str, help=( "The name of the model you wish to convert, it must be one of the supported regnet* architecture," " currently: regnetx-*, regnety-*. If `None`, all of them will the converted." ), ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=Path, required=True, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", default=True, type=bool, required=False, help="If True, push model and image processor to the hub.", ) __A = parser.parse_args() __A = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
10
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase__ = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['''ReformerTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['''ReformerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ReformerAttention''', '''ReformerForMaskedLM''', '''ReformerForQuestionAnswering''', '''ReformerForSequenceClassification''', '''ReformerLayer''', '''ReformerModel''', '''ReformerModelWithLMHead''', '''ReformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
72
0
import numpy as np import pandas as pd from sklearn.preprocessing import Normalizer from sklearn.svm import SVR from statsmodels.tsa.statespace.sarimax import SARIMAX def lowerCamelCase_ ( UpperCamelCase__ : list , UpperCamelCase__ : list , UpperCamelCase__ : list , UpperCamelCase__ : list , UpperCamelCase__ : list ) -> Any: """simple docstring""" __lowerCamelCase = np.array([[1, item, train_mtch[i]] for i, item in enumerate(A_ )] ) __lowerCamelCase = np.array(A_ ) __lowerCamelCase = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , A_ ) ) , x.transpose() ) , A_ ) return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] ) def lowerCamelCase_ ( UpperCamelCase__ : list , UpperCamelCase__ : list , UpperCamelCase__ : list ) -> Tuple: """simple docstring""" __lowerCamelCase = (1, 2, 1) __lowerCamelCase = (1, 1, 0, 7) __lowerCamelCase = SARIMAX( A_ , exog=A_ , order=A_ , seasonal_order=A_ ) __lowerCamelCase = model.fit(disp=A_ , maxiter=600 , method='nm' ) __lowerCamelCase = model_fit.predict(1 , len(A_ ) , exog=[test_match] ) return result[0] def lowerCamelCase_ ( UpperCamelCase__ : list , UpperCamelCase__ : list , UpperCamelCase__ : list ) -> Optional[int]: """simple docstring""" __lowerCamelCase = SVR(kernel='rbf' , C=1 , gamma=0.1 , epsilon=0.1 ) regressor.fit(A_ , A_ ) __lowerCamelCase = regressor.predict(A_ ) return y_pred[0] def lowerCamelCase_ ( UpperCamelCase__ : list ) -> Tuple: """simple docstring""" train_user.sort() __lowerCamelCase = np.percentile(A_ , 25 ) __lowerCamelCase = np.percentile(A_ , 75 ) __lowerCamelCase = qa - qa __lowerCamelCase = qa - (iqr * 0.1) return low_lim def lowerCamelCase_ ( UpperCamelCase__ : list , UpperCamelCase__ : float ) -> int: """simple docstring""" __lowerCamelCase = 0 __lowerCamelCase = 0 for i in list_vote: if i > actual_result: __lowerCamelCase = not_safe + 1 else: if abs(abs(A_ ) - abs(A_ ) ) <= 0.1: safe += 1 else: not_safe += 1 return safe > not_safe if __name__ == "__main__": # data_input_df = pd.read_csv("ex_data.csv", header=None) __A = [[1_82_31, 0.0, 1], [2_26_21, 1.0, 2], [1_56_75, 0.0, 3], [2_35_83, 1.0, 4]] __A = pd.DataFrame( data_input, columns=["total_user", "total_even", "days"] ) __A = Normalizer().fit_transform(data_input_df.values) # split data __A = normalize_df[:, 2].tolist() __A = normalize_df[:, 0].tolist() __A = normalize_df[:, 1].tolist() # for svr (input variable = total date and total match) __A = normalize_df[:, [1, 2]].tolist() __A = x[: len(x) - 1] __A = x[len(x) - 1 :] # for linear regression & sarimax __A = total_date[: len(total_date) - 1] __A = total_user[: len(total_user) - 1] __A = total_match[: len(total_match) - 1] __A = total_date[len(total_date) - 1 :] __A = total_user[len(total_user) - 1 :] __A = total_match[len(total_match) - 1 :] # voting system with forecasting __A = [ linear_regression_prediction( trn_date, trn_user, trn_match, tst_date, tst_match ), sarimax_predictor(trn_user, trn_match, tst_match), support_vector_regressor(x_train, x_test, trn_user), ] # check the safety of today's data __A = "" if data_safety_checker(res_vote, tst_user) else "not " print("Today\'s data is {not_str}safe.")
90
"""simple docstring""" from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase__ = { '''configuration_autoformer''': [ '''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''AutoformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''AutoformerForPrediction''', '''AutoformerModel''', '''AutoformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_autoformer import ( AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_autoformer import ( AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, AutoformerForPrediction, AutoformerModel, AutoformerPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
72
0
import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class SCREAMING_SNAKE_CASE__ ( _lowercase ): def __init__( self,__lowerCamelCase,__lowerCamelCase=13,__lowerCamelCase=7,__lowerCamelCase=True,__lowerCamelCase=True,__lowerCamelCase=True,__lowerCamelCase=True,__lowerCamelCase=True,__lowerCamelCase=False,__lowerCamelCase=False,__lowerCamelCase=False,__lowerCamelCase=2,__lowerCamelCase=99,__lowerCamelCase=0,__lowerCamelCase=32,__lowerCamelCase=5,__lowerCamelCase=4,__lowerCamelCase=0.1,__lowerCamelCase=0.1,__lowerCamelCase=512,__lowerCamelCase=12,__lowerCamelCase=2,__lowerCamelCase=0.02,__lowerCamelCase=3,__lowerCamelCase=4,__lowerCamelCase="last",__lowerCamelCase=None,__lowerCamelCase=None,): A__ = parent A__ = batch_size A__ = seq_length A__ = is_training A__ = use_input_lengths A__ = use_token_type_ids A__ = use_labels A__ = gelu_activation A__ = sinusoidal_embeddings A__ = causal A__ = asm A__ = n_langs A__ = vocab_size A__ = n_special A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = type_vocab_size A__ = type_sequence_label_size A__ = initializer_range A__ = num_labels A__ = num_choices A__ = summary_type A__ = use_proj A__ = scope def UpperCamelCase ( self ): A__ = ids_tensor([self.batch_size, self.seq_length],self.vocab_size ) A__ = random_attention_mask([self.batch_size, self.seq_length] ) A__ = None if self.use_input_lengths: A__ = ( ids_tensor([self.batch_size],vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length A__ = None if self.use_token_type_ids: A__ = ids_tensor([self.batch_size, self.seq_length],self.n_langs ) A__ = None A__ = None A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size],self.type_sequence_label_size ) A__ = ids_tensor([self.batch_size, self.seq_length],self.num_labels ) A__ = ids_tensor([self.batch_size],2 ).float() A__ = ids_tensor([self.batch_size],self.num_choices ) A__ = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def UpperCamelCase ( self ): return FlaubertConfig( vocab_size=self.vocab_size,n_special=self.n_special,emb_dim=self.hidden_size,n_layers=self.num_hidden_layers,n_heads=self.num_attention_heads,dropout=self.hidden_dropout_prob,attention_dropout=self.attention_probs_dropout_prob,gelu_activation=self.gelu_activation,sinusoidal_embeddings=self.sinusoidal_embeddings,asm=self.asm,causal=self.causal,n_langs=self.n_langs,max_position_embeddings=self.max_position_embeddings,initializer_range=self.initializer_range,summary_type=self.summary_type,use_proj=self.use_proj,) def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,): A__ = FlaubertModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() A__ = model(__lowerCAmelCase,lengths=__lowerCAmelCase,langs=__lowerCAmelCase ) A__ = model(__lowerCAmelCase,langs=__lowerCAmelCase ) A__ = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,): A__ = FlaubertWithLMHeadModel(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() A__ = model(__lowerCAmelCase,token_type_ids=__lowerCAmelCase,labels=__lowerCAmelCase ) self.parent.assertEqual(result.loss.shape,() ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,): A__ = FlaubertForQuestionAnsweringSimple(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() A__ = model(__lowerCAmelCase ) A__ = model(__lowerCAmelCase,start_positions=__lowerCAmelCase,end_positions=__lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) ) def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,): A__ = FlaubertForQuestionAnswering(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() A__ = model(__lowerCAmelCase ) A__ = model( __lowerCAmelCase,start_positions=__lowerCAmelCase,end_positions=__lowerCAmelCase,cls_index=__lowerCAmelCase,is_impossible=__lowerCAmelCase,p_mask=__lowerCAmelCase,) A__ = model( __lowerCAmelCase,start_positions=__lowerCAmelCase,end_positions=__lowerCAmelCase,cls_index=__lowerCAmelCase,is_impossible=__lowerCAmelCase,) (A__ ) = result_with_labels.to_tuple() A__ = model(__lowerCAmelCase,start_positions=__lowerCAmelCase,end_positions=__lowerCAmelCase ) (A__ ) = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape,() ) self.parent.assertEqual(result.start_top_log_probs.shape,(self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape,(self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape,(self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape,(self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape,(self.batch_size,) ) def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,): A__ = FlaubertForSequenceClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() A__ = model(__lowerCAmelCase ) A__ = model(__lowerCAmelCase,labels=__lowerCAmelCase ) self.parent.assertEqual(result.loss.shape,() ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) ) def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,): A__ = self.num_labels A__ = FlaubertForTokenClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() A__ = model(__lowerCAmelCase,attention_mask=__lowerCAmelCase,labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,): A__ = self.num_choices A__ = FlaubertForMultipleChoice(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() A__ = input_ids.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous() A__ = token_type_ids.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous() A__ = input_mask.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous() A__ = model( __lowerCAmelCase,attention_mask=__lowerCAmelCase,token_type_ids=__lowerCAmelCase,labels=__lowerCAmelCase,) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) ) def UpperCamelCase ( self ): A__ = self.prepare_config_and_inputs() ( A__ ) = config_and_inputs A__ = { '''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( _lowercase , _lowercase , unittest.TestCase ): __SCREAMING_SNAKE_CASE = ( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) __SCREAMING_SNAKE_CASE = ( { "feature-extraction": FlaubertModel, "fill-mask": FlaubertWithLMHeadModel, "question-answering": FlaubertForQuestionAnsweringSimple, "text-classification": FlaubertForSequenceClassification, "token-classification": FlaubertForTokenClassification, "zero-shot": FlaubertForSequenceClassification, } if is_torch_available() else {} ) def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('''Fast''' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase=False ): A__ = super()._prepare_for_class(__lowerCAmelCase,__lowerCAmelCase,return_labels=__lowerCAmelCase ) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": A__ = torch.zeros( self.model_tester.batch_size,dtype=torch.long,device=__lowerCAmelCase ) A__ = torch.zeros( self.model_tester.batch_size,dtype=torch.long,device=__lowerCAmelCase ) return inputs_dict def UpperCamelCase ( self ): A__ = FlaubertModelTester(self ) A__ = ConfigTester(self,config_class=__lowerCAmelCase,emb_dim=37 ) def UpperCamelCase ( self ): self.config_tester.run_common_tests() def UpperCamelCase ( self ): A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*__lowerCAmelCase ) def UpperCamelCase ( self ): A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*__lowerCAmelCase ) def UpperCamelCase ( self ): A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*__lowerCAmelCase ) def UpperCamelCase ( self ): A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*__lowerCAmelCase ) def UpperCamelCase ( self ): A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*__lowerCAmelCase ) def UpperCamelCase ( self ): A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*__lowerCAmelCase ) def UpperCamelCase ( self ): A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*__lowerCAmelCase ) @slow def UpperCamelCase ( self ): for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ = FlaubertModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) @slow @require_torch_gpu def UpperCamelCase ( self ): A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return A__ = True A__ = model_class(config=__lowerCAmelCase ) A__ = self._prepare_for_class(__lowerCAmelCase,__lowerCAmelCase ) A__ = torch.jit.trace( __lowerCAmelCase,(inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(__lowerCAmelCase,os.path.join(__lowerCAmelCase,'''traced_model.pt''' ) ) A__ = torch.jit.load(os.path.join(__lowerCAmelCase,'''traced_model.pt''' ),map_location=__lowerCAmelCase ) loaded(inputs_dict['''input_ids'''].to(__lowerCAmelCase ),inputs_dict['''attention_mask'''].to(__lowerCAmelCase ) ) @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @slow def UpperCamelCase ( self ): A__ = FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' ) A__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) with torch.no_grad(): A__ = model(__lowerCAmelCase )[0] A__ = torch.Size((1, 11, 768) ) self.assertEqual(output.shape,__lowerCAmelCase ) A__ = torch.tensor( [[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3],__lowerCAmelCase,atol=1E-4 ) )
193
"""simple docstring""" import math def snake_case_ ( A_ : int ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5, int(math.sqrt(A_ ) + 1 ), 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def snake_case_ ( A_ : float = 0.1 ): '''simple docstring''' _lowerCamelCase : Optional[int] = 3 _lowerCamelCase : List[str] = 3 while primes / (2 * j - 1) >= ratio: for i in range(j * j + j + 1, (j + 2) * (j + 2), j + 1 ): primes += is_prime(A_ ) j += 2 return j if __name__ == "__main__": import doctest doctest.testmod()
72
0
import json import os import unittest from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class A_ ( _lowercase , unittest.TestCase ): _lowercase : int = GPTaTokenizer _lowercase : Dict = GPTaTokenizerFast _lowercase : Union[str, Any] = True _lowercase : Dict = {"add_prefix_space": True} _lowercase : List[Any] = False def UpperCAmelCase ( self : str ) -> str: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __lowerCAmelCase: Dict = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', '''<|endoftext|>''', ] __lowerCAmelCase: int = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) ) __lowerCAmelCase: Optional[int] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] __lowerCAmelCase: List[str] = {'''unk_token''': '''<unk>'''} __lowerCAmelCase: Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) __lowerCAmelCase: List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(__lowerCAmelCase ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(__lowerCAmelCase ) ) def UpperCAmelCase ( self : Any , **UpperCAmelCase : str ) -> int: kwargs.update(self.special_tokens_map ) return GPTaTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def UpperCAmelCase ( self : int , **UpperCAmelCase : Union[str, Any] ) -> List[Any]: kwargs.update(self.special_tokens_map ) return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def UpperCAmelCase ( self : int , UpperCAmelCase : Tuple ) -> int: __lowerCAmelCase: Union[str, Any] = '''lower newer''' __lowerCAmelCase: List[str] = '''lower newer''' return input_text, output_text def UpperCAmelCase ( self : Optional[Any] ) -> List[Any]: __lowerCAmelCase: Optional[int] = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) __lowerCAmelCase: Any = '''lower newer''' __lowerCAmelCase: Tuple = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] __lowerCAmelCase: Any = tokenizer.tokenize(__lowerCAmelCase , add_prefix_space=__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) __lowerCAmelCase: Any = tokens + [tokenizer.unk_token] __lowerCAmelCase: Any = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9] self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase ) def UpperCAmelCase ( self : Optional[Any] ) -> Dict: if not self.test_rust_tokenizer: return __lowerCAmelCase: List[str] = self.get_tokenizer() __lowerCAmelCase: Optional[int] = self.get_rust_tokenizer(add_prefix_space=__lowerCAmelCase ) __lowerCAmelCase: List[str] = '''lower newer''' # Testing tokenization __lowerCAmelCase: Dict = tokenizer.tokenize(__lowerCAmelCase , add_prefix_space=__lowerCAmelCase ) __lowerCAmelCase: Optional[Any] = rust_tokenizer.tokenize(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) # Testing conversion to ids without special tokens __lowerCAmelCase: Optional[Any] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase ) __lowerCAmelCase: List[str] = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) # Testing conversion to ids with special tokens __lowerCAmelCase: List[Any] = self.get_rust_tokenizer(add_prefix_space=__lowerCAmelCase ) __lowerCAmelCase: List[str] = tokenizer.encode(__lowerCAmelCase , add_prefix_space=__lowerCAmelCase ) __lowerCAmelCase: Union[str, Any] = rust_tokenizer.encode(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) # Testing the unknown token __lowerCAmelCase: Union[str, Any] = tokens + [rust_tokenizer.unk_token] __lowerCAmelCase: List[str] = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase ) def UpperCAmelCase ( self : str , *UpperCAmelCase : Tuple , **UpperCAmelCase : List[str] ) -> List[str]: pass def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : int=1_5 ) -> int: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __lowerCAmelCase: List[Any] = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase ) # Simple input __lowerCAmelCase: Union[str, Any] = '''This is a simple input''' __lowerCAmelCase: int = ['''This is a simple input 1''', '''This is a simple input 2'''] __lowerCAmelCase: Any = ('''This is a simple input''', '''This is a pair''') __lowerCAmelCase: List[Any] = [ ('''This is a simple input 1''', '''This is a simple input 2'''), ('''This is a simple pair 1''', '''This is a simple pair 2'''), ] # Simple input tests self.assertRaises(__lowerCAmelCase , tokenizer_r.encode , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='max_length' ) # Simple input self.assertRaises(__lowerCAmelCase , tokenizer_r.encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='max_length' ) # Simple input self.assertRaises( __lowerCAmelCase , tokenizer_r.batch_encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='max_length' , ) # Pair input self.assertRaises(__lowerCAmelCase , tokenizer_r.encode , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='max_length' ) # Pair input self.assertRaises(__lowerCAmelCase , tokenizer_r.encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='max_length' ) # Pair input self.assertRaises( __lowerCAmelCase , tokenizer_r.batch_encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='max_length' , ) def UpperCAmelCase ( self : Optional[Any] ) -> List[str]: __lowerCAmelCase: Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' ) # Simple input __lowerCAmelCase: Optional[int] = '''This is a simple input''' __lowerCAmelCase: Dict = ['''This is a simple input looooooooong''', '''This is a simple input'''] __lowerCAmelCase: Any = ('''This is a simple input''', '''This is a pair''') __lowerCAmelCase: Union[str, Any] = [ ('''This is a simple input loooooong''', '''This is a simple input'''), ('''This is a simple pair loooooong''', '''This is a simple pair'''), ] __lowerCAmelCase: Tuple = tokenizer.pad_token_id __lowerCAmelCase: Optional[int] = tokenizer(__lowerCAmelCase , padding='max_length' , max_length=3_0 , return_tensors='np' ) __lowerCAmelCase: Optional[int] = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , truncate=__lowerCAmelCase , return_tensors='np' ) __lowerCAmelCase: List[str] = tokenizer(*__lowerCAmelCase , padding='max_length' , max_length=6_0 , return_tensors='np' ) __lowerCAmelCase: Any = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , truncate=__lowerCAmelCase , return_tensors='np' ) # s # test single string max_length padding self.assertEqual(out_s['input_ids'].shape[-1] , 3_0 ) self.assertTrue(pad_token_id in out_s['input_ids'] ) self.assertTrue(0 in out_s['attention_mask'] ) # s2 # test automatic padding self.assertEqual(out_sa['input_ids'].shape[-1] , 3_3 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa['input_ids'][0] ) self.assertFalse(0 in out_sa['attention_mask'][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa['input_ids'][1] ) self.assertTrue(0 in out_sa['attention_mask'][1] ) # p # test single pair max_length padding self.assertEqual(out_p['input_ids'].shape[-1] , 6_0 ) self.assertTrue(pad_token_id in out_p['input_ids'] ) self.assertTrue(0 in out_p['attention_mask'] ) # p2 # test automatic padding pair self.assertEqual(out_pa['input_ids'].shape[-1] , 5_2 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa['input_ids'][0] ) self.assertFalse(0 in out_pa['attention_mask'][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa['input_ids'][1] ) self.assertTrue(0 in out_pa['attention_mask'][1] ) def UpperCAmelCase ( self : Union[str, Any] ) -> int: __lowerCAmelCase: str = '''$$$''' __lowerCAmelCase: Optional[Any] = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=__lowerCAmelCase , add_bos_token=__lowerCAmelCase ) __lowerCAmelCase: int = '''This is a simple input''' __lowerCAmelCase: Optional[Any] = ['''This is a simple input 1''', '''This is a simple input 2'''] __lowerCAmelCase: Optional[int] = tokenizer.bos_token_id __lowerCAmelCase: Union[str, Any] = tokenizer(__lowerCAmelCase ) __lowerCAmelCase: Union[str, Any] = tokenizer(__lowerCAmelCase ) self.assertEqual(out_s.input_ids[0] , __lowerCAmelCase ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) __lowerCAmelCase: str = tokenizer.decode(out_s.input_ids ) __lowerCAmelCase: Optional[Any] = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , __lowerCAmelCase ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) def UpperCAmelCase ( self : str ) -> Any: pass def UpperCAmelCase ( self : List[Any] ) -> List[str]: __lowerCAmelCase: Dict = [self.get_tokenizer(do_lower_case=__lowerCAmelCase , add_bos_token=__lowerCAmelCase )] for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): __lowerCAmelCase: Any = '''Encode this.''' __lowerCAmelCase: List[Any] = '''This one too please.''' __lowerCAmelCase: Optional[Any] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) encoded_sequence += tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) __lowerCAmelCase: str = tokenizer.encode_plus( __lowerCAmelCase , __lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , ) __lowerCAmelCase: str = encoded_sequence_dict['''input_ids'''] __lowerCAmelCase: List[str] = encoded_sequence_dict['''special_tokens_mask'''] self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) ) __lowerCAmelCase: Any = [ (x if not special_tokens_mask[i] else None) for i, x in enumerate(__lowerCAmelCase ) ] __lowerCAmelCase: Optional[Any] = [x for x in filtered_sequence if x is not None] self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) @require_tokenizers class A_ ( unittest.TestCase ): def UpperCAmelCase ( self : int ) -> Tuple: __lowerCAmelCase: Dict = AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=__lowerCAmelCase ) __lowerCAmelCase: Tuple = '''A photo of a cat''' __lowerCAmelCase: str = tokenizer.encode( __lowerCAmelCase , ) self.assertEqual(__lowerCAmelCase , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] ) tokenizer.save_pretrained('test_opt' ) __lowerCAmelCase: List[Any] = AutoTokenizer.from_pretrained('./test_opt' ) __lowerCAmelCase: List[str] = tokenizer.encode( __lowerCAmelCase , ) self.assertEqual(__lowerCAmelCase , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] ) def UpperCAmelCase ( self : Optional[int] ) -> Dict: __lowerCAmelCase: int = AutoTokenizer.from_pretrained('facebook/opt-350m' , use_slow=__lowerCAmelCase ) __lowerCAmelCase: int = '''A photo of a cat''' __lowerCAmelCase: Any = tokenizer.encode( __lowerCAmelCase , ) # Same as above self.assertEqual(__lowerCAmelCase , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] ) @unittest.skip('This test is failing because of a bug in the fast tokenizer' ) def UpperCAmelCase ( self : int ) -> Tuple: __lowerCAmelCase: List[Any] = AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=__lowerCAmelCase ) __lowerCAmelCase: Dict = '''bos''' __lowerCAmelCase: int = tokenizer.get_vocab()['''bos'''] __lowerCAmelCase: List[Any] = '''A photo of a cat''' __lowerCAmelCase: List[str] = tokenizer.encode( __lowerCAmelCase , ) # We changed the bos token self.assertEqual(__lowerCAmelCase , [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] ) tokenizer.save_pretrained('./tok' ) __lowerCAmelCase: Any = AutoTokenizer.from_pretrained('./tok' ) self.assertTrue(tokenizer.is_fast ) __lowerCAmelCase: int = tokenizer.encode( __lowerCAmelCase , ) self.assertEqual(__lowerCAmelCase , [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
322
"""simple docstring""" import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class __snake_case ( unittest.TestCase): def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _lowerCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase ) _lowerCamelCase : Tuple = -1 _lowerCamelCase : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase ) _lowerCamelCase : List[Any] = model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: _lowerCamelCase : Union[str, Any] = TextStreamer(__lowerCAmelCase ) model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _lowerCamelCase : int = cs.out[:-1] self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" _lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _lowerCamelCase : Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase ) _lowerCamelCase : Tuple = -1 _lowerCamelCase : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase ) _lowerCamelCase : Optional[int] = model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase ) _lowerCamelCase : List[str] = tokenizer.decode(greedy_ids[0] ) _lowerCamelCase : Tuple = TextIteratorStreamer(__lowerCAmelCase ) _lowerCamelCase : Tuple = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer} _lowerCamelCase : List[Any] = Thread(target=model.generate , kwargs=__lowerCAmelCase ) thread.start() _lowerCamelCase : int = '''''' for new_text in streamer: streamer_text += new_text self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" _lowerCamelCase : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _lowerCamelCase : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase ) _lowerCamelCase : Tuple = -1 _lowerCamelCase : Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase ) _lowerCamelCase : int = model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = greedy_ids[:, input_ids.shape[1] :] _lowerCamelCase : int = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: _lowerCamelCase : Any = TextStreamer(__lowerCAmelCase , skip_prompt=__lowerCAmelCase ) model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _lowerCamelCase : Union[str, Any] = cs.out[:-1] self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" _lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained('''distilgpt2''' ) _lowerCamelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(__lowerCAmelCase ) _lowerCamelCase : str = -1 _lowerCamelCase : Any = torch.ones((1, 5) , device=__lowerCAmelCase ).long() * model.config.bos_token_id with CaptureStdout() as cs: _lowerCamelCase : List[Any] = TextStreamer(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase ) model.generate(__lowerCAmelCase , max_new_tokens=1 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token _lowerCamelCase : Any = cs.out[:-1] # Remove the final "\n" _lowerCamelCase : int = tokenizer(__lowerCAmelCase , return_tensors='''pt''' ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _lowerCamelCase : Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = -1 _lowerCamelCase : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase ) _lowerCamelCase : List[str] = TextIteratorStreamer(__lowerCAmelCase , timeout=0.0_01 ) _lowerCamelCase : str = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer} _lowerCamelCase : List[Any] = Thread(target=model.generate , kwargs=__lowerCAmelCase ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(__lowerCAmelCase ): _lowerCamelCase : Optional[Any] = '''''' for new_text in streamer: streamer_text += new_text
72
0
"""simple docstring""" def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ ): lowerCAmelCase__ : int = [False] * len(A_ ) lowerCAmelCase__ : Union[str, Any] = [] queue.append(A_ ) lowerCAmelCase__ : str = True while queue: lowerCAmelCase__ : List[Any] = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(A_ ) lowerCAmelCase__ : int = True lowerCAmelCase__ : Union[str, Any] = u return visited[t] def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ ): lowerCAmelCase__ : Optional[Any] = [-1] * (len(A_ )) lowerCAmelCase__ : Optional[Any] = 0 while bfs(A_ , A_ , A_ , A_ ): lowerCAmelCase__ : Optional[Any] = float('''Inf''' ) lowerCAmelCase__ : Union[str, Any] = sink while s != source: # Find the minimum value in select path lowerCAmelCase__ : Union[str, Any] = min(A_ , graph[parent[s]][s] ) lowerCAmelCase__ : List[str] = parent[s] max_flow += path_flow lowerCAmelCase__ : List[str] = sink while v != source: lowerCAmelCase__ : Union[str, Any] = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow lowerCAmelCase__ : Optional[Any] = parent[v] return max_flow __UpperCamelCase : Dict = [ [0, 1_6, 1_3, 0, 0, 0], [0, 0, 1_0, 1_2, 0, 0], [0, 4, 0, 0, 1_4, 0], [0, 0, 9, 0, 0, 2_0], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] __UpperCamelCase , __UpperCamelCase : List[str] = 0, 5 print(ford_fulkerson(graph, source, sink))
106
"""simple docstring""" from ....configuration_utils import PretrainedConfig from ....utils import logging lowerCAmelCase__ = logging.get_logger(__name__) # TODO: upload to AWS lowerCAmelCase__ = { '''yjernite/retribert-base-uncased''': ( '''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json''' ), } class __snake_case ( _lowercase): snake_case__ : int = "retribert" def __init__( self : Optional[int] , __lowerCAmelCase : str=3_0_5_2_2 , __lowerCAmelCase : Tuple=7_6_8 , __lowerCAmelCase : Union[str, Any]=8 , __lowerCAmelCase : Any=1_2 , __lowerCAmelCase : Optional[int]=3_0_7_2 , __lowerCAmelCase : List[str]="gelu" , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Tuple=5_1_2 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : Optional[Any]=1E-12 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Any=1_2_8 , __lowerCAmelCase : Optional[int]=0 , **__lowerCAmelCase : str , ): """simple docstring""" super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase ) _lowerCamelCase : Dict = vocab_size _lowerCamelCase : Union[str, Any] = hidden_size _lowerCamelCase : Dict = num_hidden_layers _lowerCamelCase : int = num_attention_heads _lowerCamelCase : int = hidden_act _lowerCamelCase : str = intermediate_size _lowerCamelCase : Union[str, Any] = hidden_dropout_prob _lowerCamelCase : List[Any] = attention_probs_dropout_prob _lowerCamelCase : Optional[int] = max_position_embeddings _lowerCamelCase : List[Any] = type_vocab_size _lowerCamelCase : Any = initializer_range _lowerCamelCase : Optional[int] = layer_norm_eps _lowerCamelCase : int = share_encoders _lowerCamelCase : Optional[Any] = projection_dim
72
0
'''simple docstring''' from collections.abc import Iterable from typing import Generic, TypeVar _lowerCAmelCase = TypeVar('''_T''') class lowerCAmelCase_( Generic[_T] ): '''simple docstring''' def __init__( self ,__UpperCAmelCase = None ) -> Any: lowerCAmelCase__ : list[_T] = list(iterable or [] ) lowerCAmelCase__ : list[_T] = [] def __len__( self ) -> List[str]: return len(self._stacka ) + len(self._stacka ) def __repr__( self ) -> Dict: return F"""Queue({tuple(self._stacka[::-1] + self._stacka )})""" def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Tuple: self._stacka.append(__lowerCAmelCase ) def UpperCAmelCase_ ( self ) -> Any: lowerCAmelCase__ : Any = self._stacka.pop lowerCAmelCase__ : Any = self._stacka.append if not self._stacka: while self._stacka: stacka_append(stacka_pop() ) if not self._stacka: raise IndexError("""Queue is empty""" ) return self._stacka.pop() if __name__ == "__main__": from doctest import testmod testmod()
37
"""simple docstring""" import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class __snake_case ( unittest.TestCase): def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" _lowerCamelCase : Union[str, Any] = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : List[Any] = [ '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" _lowerCamelCase : Optional[int] = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', '''unet/diffusion_pytorch_model.bin''', # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : Optional[Any] = [ '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', ] self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" _lowerCamelCase : int = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', # Removed: 'text_encoder/model.safetensors', '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertFalse(is_safetensors_compatible(__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase : int = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] _lowerCamelCase : Optional[int] = '''fp16''' self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" _lowerCamelCase : Optional[Any] = [ '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] _lowerCamelCase : Union[str, Any] = '''fp16''' self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" _lowerCamelCase : str = [ '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] _lowerCamelCase : Optional[Any] = '''fp16''' self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase : Tuple = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', '''unet/diffusion_pytorch_model.fp16.bin''', # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] _lowerCamelCase : Any = '''fp16''' self.assertFalse(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" _lowerCamelCase : Optional[Any] = [ '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', ] _lowerCamelCase : str = '''fp16''' self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" _lowerCamelCase : Optional[Any] = [ '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', ] _lowerCamelCase : Union[str, Any] = '''fp16''' self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : int = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', # 'text_encoder/model.fp16.safetensors', '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] _lowerCamelCase : int = '''fp16''' self.assertFalse(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
72
0
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> List[Any]: """simple docstring""" if not (isinstance(A_ , A_ ) and isinstance(A_ , A_ )): raise ValueError('''longest_common_substring() takes two strings for inputs''' ) A__ = len(A_ ) A__ = len(A_ ) A__ = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )] A__ = 0 A__ = 0 for i in range(1 , texta_length + 1 ): for j in range(1 , texta_length + 1 ): if texta[i - 1] == texta[j - 1]: A__ = 1 + dp[i - 1][j - 1] if dp[i][j] > ans_length: A__ = i A__ = dp[i][j] return texta[ans_index - ans_length : ans_index] if __name__ == "__main__": import doctest doctest.testmod()
14
"""simple docstring""" import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class __snake_case : def __init__( self : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any=2 , __lowerCAmelCase : Any=True , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Optional[Any]=1_0 , __lowerCAmelCase : List[str]=3 , __lowerCAmelCase : int=3_2 * 4 , __lowerCAmelCase : Dict=3_2 * 6 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : List[str]=3_2 , ): """simple docstring""" _lowerCamelCase : List[str] = parent _lowerCamelCase : str = batch_size _lowerCamelCase : Dict = is_training _lowerCamelCase : str = use_auxiliary_loss _lowerCamelCase : Any = num_queries _lowerCamelCase : List[Any] = num_channels _lowerCamelCase : int = min_size _lowerCamelCase : Any = max_size _lowerCamelCase : int = num_labels _lowerCamelCase : List[str] = mask_feature_size def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( __lowerCAmelCase ) _lowerCamelCase : List[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowerCAmelCase ) > 0.5 ).float() _lowerCamelCase : Optional[int] = (torch.rand((self.batch_size, self.num_labels) , device=__lowerCAmelCase ) > 0.5).long() _lowerCamelCase : Dict = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = self.prepare_config_and_inputs() _lowerCamelCase : List[str] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask} return config, inputs_dict def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] ): """simple docstring""" _lowerCamelCase : List[str] = output.encoder_hidden_states _lowerCamelCase : Tuple = output.pixel_decoder_hidden_states _lowerCamelCase : Dict = output.transformer_decoder_hidden_states self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__lowerCAmelCase ) , config.decoder_config.decoder_layers ) def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any]=False ): """simple docstring""" with torch.no_grad(): _lowerCamelCase : Optional[int] = MaskFormerModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : Tuple = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase ) _lowerCamelCase : List[str] = model(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(__lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] ): """simple docstring""" _lowerCamelCase : str = MaskFormerForInstanceSegmentation(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() def comm_check_on_output(__lowerCAmelCase : Dict ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): _lowerCamelCase : str = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = model(__lowerCAmelCase ) comm_check_on_output(__lowerCAmelCase ) _lowerCamelCase : List[str] = model( pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ) comm_check_on_output(__lowerCAmelCase ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class __snake_case ( _lowercase , _lowercase , unittest.TestCase): snake_case__ : Dict = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () snake_case__ : Any = ( {"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) snake_case__ : List[str] = False snake_case__ : List[str] = False snake_case__ : Optional[int] = False snake_case__ : Dict = False def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" _lowerCamelCase : Optional[int] = MaskFormerModelTester(self ) _lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowerCAmelCase ) @unittest.skip(reason='''MaskFormer does not use inputs_embeds''' ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" pass @unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' ) def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" pass @unittest.skip(reason='''MaskFormer is not a generative model''' ) def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" pass @unittest.skip(reason='''MaskFormer does not use token embeddings''' ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" pass @require_torch_multi_gpu @unittest.skip( reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" pass def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : str = model_class(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCamelCase : Dict = [*signature.parameters.keys()] _lowerCamelCase : Optional[Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __lowerCAmelCase ) @slow def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" for model_name in ["facebook/maskformer-swin-small-coco"]: _lowerCamelCase : Union[str, Any] = MaskFormerModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : List[Any] = (self.model_tester.min_size,) * 2 _lowerCamelCase : Union[str, Any] = { '''pixel_values''': torch.randn((2, 3, *size) , device=__lowerCAmelCase ), '''mask_labels''': torch.randn((2, 1_0, *size) , device=__lowerCAmelCase ), '''class_labels''': torch.zeros(2 , 1_0 , device=__lowerCAmelCase ).long(), } _lowerCamelCase : int = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = model(**__lowerCAmelCase ) self.assertTrue(outputs.loss is not None ) def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : List[Any] = model_class(__lowerCAmelCase ).to(__lowerCAmelCase ) _lowerCamelCase : List[str] = model(**__lowerCAmelCase , output_attentions=__lowerCAmelCase ) self.assertTrue(outputs.attentions is not None ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss _lowerCamelCase : Union[str, Any] = self.all_model_classes[1] _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() _lowerCamelCase : Any = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.train() _lowerCamelCase : List[Any] = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ).loss loss.backward() def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" _lowerCamelCase : int = self.all_model_classes[1] _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs() _lowerCamelCase : List[str] = True _lowerCamelCase : Optional[int] = True _lowerCamelCase : Optional[Any] = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.train() _lowerCamelCase : Optional[Any] = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ) _lowerCamelCase : List[Any] = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() _lowerCamelCase : Tuple = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't _lowerCamelCase : List[str] = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() _lowerCamelCase : Optional[int] = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=__lowerCAmelCase ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) lowerCAmelCase__ = 1E-4 def snake_case_ ( ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_vision @slow class __snake_case ( unittest.TestCase): @cached_property def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" return ( MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' ) if is_vision_available() else None ) def SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" _lowerCamelCase : str = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__lowerCAmelCase ) _lowerCamelCase : Any = self.default_image_processor _lowerCamelCase : List[Any] = prepare_img() _lowerCamelCase : Any = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase ) _lowerCamelCase : Any = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): _lowerCamelCase : int = model(**__lowerCAmelCase ) _lowerCamelCase : str = torch.tensor( [[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(__lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) _lowerCamelCase : Union[str, Any] = torch.tensor( [[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(__lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) _lowerCamelCase : Optional[int] = torch.tensor( [[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(__lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" _lowerCamelCase : Optional[int] = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__lowerCAmelCase ) .eval() ) _lowerCamelCase : int = self.default_image_processor _lowerCamelCase : str = prepare_img() _lowerCamelCase : int = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase ) _lowerCamelCase : str = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): _lowerCamelCase : Optional[int] = model(**__lowerCAmelCase ) # masks_queries_logits _lowerCamelCase : Optional[int] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _lowerCamelCase : List[str] = [ [-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33], [-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95], [-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42], ] _lowerCamelCase : Any = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) # class_queries_logits _lowerCamelCase : List[Any] = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _lowerCamelCase : str = torch.tensor( [ [1.6512E00, -5.2572E00, -3.3519E00], [3.6169E-02, -5.9025E00, -2.9313E00], [1.0766E-04, -7.7630E00, -5.1263E00], ] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : Any = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' ) .to(__lowerCAmelCase ) .eval() ) _lowerCamelCase : Tuple = self.default_image_processor _lowerCamelCase : Tuple = prepare_img() _lowerCamelCase : Optional[Any] = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase ) _lowerCamelCase : List[Any] = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(__lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): _lowerCamelCase : Optional[Any] = model(**__lowerCAmelCase ) # masks_queries_logits _lowerCamelCase : List[Any] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _lowerCamelCase : int = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]] _lowerCamelCase : List[Any] = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) # class_queries_logits _lowerCamelCase : Dict = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _lowerCamelCase : Any = torch.tensor( [[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" _lowerCamelCase : str = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(__lowerCAmelCase ) .eval() ) _lowerCamelCase : int = self.default_image_processor _lowerCamelCase : List[str] = image_processor( [np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors='''pt''' , ) _lowerCamelCase : Union[str, Any] = inputs['''pixel_values'''].to(__lowerCAmelCase ) _lowerCamelCase : Dict = [el.to(__lowerCAmelCase ) for el in inputs['''mask_labels''']] _lowerCamelCase : Optional[Any] = [el.to(__lowerCAmelCase ) for el in inputs['''class_labels''']] with torch.no_grad(): _lowerCamelCase : Tuple = model(**__lowerCAmelCase ) self.assertTrue(outputs.loss is not None )
72
0
'''simple docstring''' import unittest import numpy as np def __UpperCamelCase ( lowercase__ : np.ndarray, lowercase__ : np.ndarray, lowercase__ : np.ndarray, lowercase__ : np.ndarray | None = None, ): '''simple docstring''' __lowercase =np.shape(A_ ) __lowercase =np.shape(A_ ) __lowercase =np.shape(A_ ) if shape_a[0] != shape_b[0]: __lowercase =( '''Expected the same number of rows for A and B. ''' F'''Instead found A of size {shape_a} and B of size {shape_b}''' ) raise ValueError(A_ ) if shape_b[1] != shape_c[1]: __lowercase =( '''Expected the same number of columns for B and C. ''' F'''Instead found B of size {shape_b} and C of size {shape_c}''' ) raise ValueError(A_ ) __lowercase =pseudo_inv if a_inv is None: try: __lowercase =np.linalg.inv(A_ ) except np.linalg.LinAlgError: raise ValueError( 'Input matrix A is not invertible. Cannot compute Schur complement.' ) return mat_c - mat_b.T @ a_inv @ mat_b class lowerCAmelCase ( unittest.TestCase ): def snake_case ( self : Any ): """simple docstring""" __lowercase =np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) __lowercase =np.array([[0, 3], [3, 0], [2, 3]] ) __lowercase =np.array([[2, 1], [6, 3]] ) __lowercase =schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) __lowercase =np.block([[a, b], [b.T, c]] ) __lowercase =np.linalg.det(__lowerCAmelCase ) __lowercase =np.linalg.det(__lowerCAmelCase ) __lowercase =np.linalg.det(__lowerCAmelCase ) self.assertAlmostEqual(__lowerCAmelCase , det_a * det_s ) def snake_case ( self : Optional[Any] ): """simple docstring""" __lowercase =np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) __lowercase =np.array([[0, 3], [3, 0], [2, 3]] ) __lowercase =np.array([[2, 1], [6, 3]] ) with self.assertRaises(__lowerCAmelCase ): schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def snake_case ( self : List[str] ): """simple docstring""" __lowercase =np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) __lowercase =np.array([[0, 3], [3, 0], [2, 3]] ) __lowercase =np.array([[2, 1, 3], [6, 3, 5]] ) with self.assertRaises(__lowerCAmelCase ): schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
141
"""simple docstring""" lowerCAmelCase__ = { '''A''': ['''B''', '''C''', '''E'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F''', '''G'''], '''D''': ['''B'''], '''E''': ['''A''', '''B''', '''D'''], '''F''': ['''C'''], '''G''': ['''C'''], } def snake_case_ ( A_ : dict, A_ : int, A_ : int ): '''simple docstring''' _lowerCamelCase : List[str] = set() # keep track of all the paths to be checked _lowerCamelCase : str = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue _lowerCamelCase : str = queue.pop(0 ) # get the last node from the path _lowerCamelCase : List[Any] = path[-1] if node not in explored: _lowerCamelCase : Union[str, Any] = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: _lowerCamelCase : Union[str, Any] = list(A_ ) new_path.append(A_ ) queue.append(A_ ) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(A_ ) # in case there's no path between the 2 nodes return [] def snake_case_ ( A_ : dict, A_ : int, A_ : Dict ): '''simple docstring''' if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 _lowerCamelCase : Optional[int] = [start] _lowerCamelCase : int = set(A_ ) # Keep tab on distances from `start` node. _lowerCamelCase : int = {start: 0, target: -1} while queue: _lowerCamelCase : Optional[Any] = queue.pop(0 ) if node == target: _lowerCamelCase : Any = ( dist[node] if dist[target] == -1 else min(dist[target], dist[node] ) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(A_ ) queue.append(A_ ) _lowerCamelCase : Any = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
72
0