code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
"""simple docstring""" from __future__ import annotations from collections.abc import Iterator from typing import Generic, TypeVar _UpperCamelCase = TypeVar("""T""") class lowerCamelCase__ ( Generic[T] ): def __init__( self ,A ): UpperCAmelCase = data UpperCAmelCase = None def __str__( self ): return F'''{self.data}''' class lowerCamelCase__ ( Generic[T] ): def __init__( self ): UpperCAmelCase = None def __iter__( self ): UpperCAmelCase = self.top while node: yield node.data UpperCAmelCase = node.next def __str__( self ): return "->".join([str(A ) for item in self] ) def __len__( self ): return len(tuple(iter(self ) ) ) def _UpperCamelCase ( self ): return self.top is None def _UpperCamelCase ( self ,A ): UpperCAmelCase = Node(A ) if not self.is_empty(): UpperCAmelCase = self.top UpperCAmelCase = node def _UpperCamelCase ( self ): if self.is_empty(): raise IndexError("""pop from empty stack""" ) assert isinstance(self.top ,A ) UpperCAmelCase = self.top UpperCAmelCase = self.top.next return pop_node.data def _UpperCamelCase ( self ): if self.is_empty(): raise IndexError("""peek from empty stack""" ) assert self.top is not None return self.top.data def _UpperCamelCase ( self ): UpperCAmelCase = None if __name__ == "__main__": from doctest import testmod testmod()
711
"""simple docstring""" from __future__ import annotations from collections.abc import MutableSequence class lowerCamelCase__ : def __init__( self ,A ,A ): if len(A ) != degree + 1: raise ValueError( """The number of coefficients should be equal to the degree + 1.""" ) UpperCAmelCase = list(A ) UpperCAmelCase = degree def __add__( self ,A ): if self.degree > polynomial_a.degree: UpperCAmelCase = self.coefficients[:] for i in range(polynomial_a.degree + 1 ): coefficients[i] += polynomial_a.coefficients[i] return Polynomial(self.degree ,A ) else: UpperCAmelCase = polynomial_a.coefficients[:] for i in range(self.degree + 1 ): coefficients[i] += self.coefficients[i] return Polynomial(polynomial_a.degree ,A ) def __sub__( self ,A ): return self + polynomial_a * Polynomial(0 ,[-1] ) def __neg__( self ): return Polynomial(self.degree ,[-c for c in self.coefficients] ) def __mul__( self ,A ): UpperCAmelCase = [0] * (self.degree + polynomial_a.degree + 1) for i in range(self.degree + 1 ): for j in range(polynomial_a.degree + 1 ): coefficients[i + j] += ( self.coefficients[i] * polynomial_a.coefficients[j] ) return Polynomial(self.degree + polynomial_a.degree ,A ) def _UpperCamelCase ( self ,A ): UpperCAmelCase = 0 for i in range(self.degree + 1 ): result += self.coefficients[i] * (substitution**i) return result def __str__( self ): UpperCAmelCase = """""" for i in range(self.degree ,-1 ,-1 ): if self.coefficients[i] == 0: continue elif self.coefficients[i] > 0: if polynomial: polynomial += " + " else: polynomial += " - " if i == 0: polynomial += str(abs(self.coefficients[i] ) ) elif i == 1: polynomial += str(abs(self.coefficients[i] ) ) + "x" else: polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(A ) return polynomial def __repr__( self ): return self.__str__() def _UpperCamelCase ( self ): UpperCAmelCase = [0] * self.degree for i in range(self.degree ): UpperCAmelCase = self.coefficients[i + 1] * (i + 1) return Polynomial(self.degree - 1 ,A ) def _UpperCamelCase ( self ,A = 0 ): UpperCAmelCase = [0] * (self.degree + 2) UpperCAmelCase = constant for i in range(self.degree + 1 ): UpperCAmelCase = self.coefficients[i] / (i + 1) return Polynomial(self.degree + 1 ,A ) def __eq__( self ,A ): if not isinstance(A ,A ): return False if self.degree != polynomial_a.degree: return False for i in range(self.degree + 1 ): if self.coefficients[i] != polynomial_a.coefficients[i]: return False return True def __ne__( self ,A ): return not self.__eq__(A )
74
0
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFXLMRobertaModel @require_tf @require_sentencepiece @require_tokenizers class lowerCamelCase__ ( unittest.TestCase ): @slow def _UpperCamelCase ( self ): UpperCAmelCase = TFXLMRobertaModel.from_pretrained("""jplu/tf-xlm-roberta-base""" ) UpperCAmelCase = { """input_ids""": tf.convert_to_tensor([[0, 2_646, 10_269, 83, 99_942, 2]] ,dtype=tf.intaa ), # "My dog is cute" """attention_mask""": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] ,dtype=tf.intaa ), } UpperCAmelCase = model(A )["""last_hidden_state"""] UpperCAmelCase = tf.TensorShape((1, 6, 768) ) self.assertEqual(output.shape ,A ) # compare the actual values for a slice. UpperCAmelCase = tf.convert_to_tensor( [ [ [0.0681762, 0.10894451, 0.06772504], [-0.06423668, 0.02366615, 0.04329344], [-0.06057295, 0.09974135, -0.00070584], ] ] ,dtype=tf.floataa ,) self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
712
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy as np import tensorflow as tf from transformers import ( TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST, FlaubertConfig, TFFlaubertForMultipleChoice, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForSequenceClassification, TFFlaubertForTokenClassification, TFFlaubertModel, TFFlaubertWithLMHeadModel, ) class lowerCamelCase__ : def __init__( self ,A ,): UpperCAmelCase = parent UpperCAmelCase = 13 UpperCAmelCase = 7 UpperCAmelCase = True UpperCAmelCase = True UpperCAmelCase = True UpperCAmelCase = True UpperCAmelCase = True UpperCAmelCase = False UpperCAmelCase = False UpperCAmelCase = False UpperCAmelCase = 2 UpperCAmelCase = 99 UpperCAmelCase = 0 UpperCAmelCase = 32 UpperCAmelCase = 2 UpperCAmelCase = 4 UpperCAmelCase = 0.1 UpperCAmelCase = 0.1 UpperCAmelCase = 512 UpperCAmelCase = 16 UpperCAmelCase = 2 UpperCAmelCase = 0.02 UpperCAmelCase = 3 UpperCAmelCase = 4 UpperCAmelCase = """last""" UpperCAmelCase = True UpperCAmelCase = None UpperCAmelCase = 0 def _UpperCamelCase ( self ): UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ,dtype=tf.floataa ) UpperCAmelCase = None if self.use_input_lengths: UpperCAmelCase = ( ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length UpperCAmelCase = None if self.use_token_type_ids: UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs ) UpperCAmelCase = None UpperCAmelCase = None UpperCAmelCase = None if self.use_labels: UpperCAmelCase = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) UpperCAmelCase = ids_tensor([self.batch_size] ,2 ,dtype=tf.floataa ) UpperCAmelCase = ids_tensor([self.batch_size] ,self.num_choices ) UpperCAmelCase = FlaubertConfig( vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,bos_token_id=self.bos_token_id ,) return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,): UpperCAmelCase = TFFlaubertModel(config=A ) UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids} UpperCAmelCase = model(A ) UpperCAmelCase = [input_ids, input_mask] UpperCAmelCase = model(A ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,): UpperCAmelCase = TFFlaubertWithLMHeadModel(A ) UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids} UpperCAmelCase = model(A ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,): UpperCAmelCase = TFFlaubertForQuestionAnsweringSimple(A ) UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths} UpperCAmelCase = model(A ) self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,): UpperCAmelCase = TFFlaubertForSequenceClassification(A ) UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths} UpperCAmelCase = model(A ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,): UpperCAmelCase = self.num_labels UpperCAmelCase = TFFlaubertForTokenClassification(config=A ) UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} UpperCAmelCase = model(A ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,): UpperCAmelCase = self.num_choices UpperCAmelCase = TFFlaubertForMultipleChoice(config=A ) UpperCAmelCase = tf.tile(tf.expand_dims(A ,1 ) ,(1, self.num_choices, 1) ) UpperCAmelCase = tf.tile(tf.expand_dims(A ,1 ) ,(1, self.num_choices, 1) ) UpperCAmelCase = tf.tile(tf.expand_dims(A ,1 ) ,(1, self.num_choices, 1) ) UpperCAmelCase = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, """token_type_ids""": multiple_choice_token_type_ids, } UpperCAmelCase = model(A ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) ) def _UpperCamelCase ( self ): UpperCAmelCase = self.prepare_config_and_inputs() ( ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ) = config_and_inputs UpperCAmelCase = { """input_ids""": input_ids, """token_type_ids""": token_type_ids, """langs""": token_type_ids, """lengths""": input_lengths, } return config, inputs_dict @require_tf class lowerCamelCase__ ( snake_case , snake_case , unittest.TestCase ): SCREAMING_SNAKE_CASE = ( ( TFFlaubertModel, TFFlaubertWithLMHeadModel, TFFlaubertForSequenceClassification, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForTokenClassification, TFFlaubertForMultipleChoice, ) if is_tf_available() else () ) SCREAMING_SNAKE_CASE = ( (TFFlaubertWithLMHeadModel,) if is_tf_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable SCREAMING_SNAKE_CASE = ( { '''feature-extraction''': TFFlaubertModel, '''fill-mask''': TFFlaubertWithLMHeadModel, '''question-answering''': TFFlaubertForQuestionAnsweringSimple, '''text-classification''': TFFlaubertForSequenceClassification, '''token-classification''': TFFlaubertForTokenClassification, '''zero-shot''': TFFlaubertForSequenceClassification, } if is_tf_available() else {} ) SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False def _UpperCamelCase ( self ,A ,A ,A ,A ,A ): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("""Fast""" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def _UpperCamelCase ( self ): UpperCAmelCase = TFFlaubertModelTester(self ) UpperCAmelCase = ConfigTester(self ,config_class=A ,emb_dim=37 ) def _UpperCamelCase ( self ): self.config_tester.run_common_tests() def _UpperCamelCase ( self ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*A ) def _UpperCamelCase ( self ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*A ) def _UpperCamelCase ( self ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*A ) def _UpperCamelCase ( self ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*A ) def _UpperCamelCase ( self ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_token_classification(*A ) def _UpperCamelCase ( self ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_multiple_choice(*A ) @slow def _UpperCamelCase ( self ): for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase = TFFlaubertModel.from_pretrained(A ) self.assertIsNotNone(A ) @require_tf @require_sentencepiece @require_tokenizers class lowerCamelCase__ ( unittest.TestCase ): @slow def _UpperCamelCase ( self ): UpperCAmelCase = TFFlaubertModel.from_pretrained("""jplu/tf-flaubert-small-cased""" ) UpperCAmelCase = tf.convert_to_tensor( [[0, 158, 735, 2_592, 1_424, 6_727, 82, 1]] ,dtype=tf.intaa ,) # "J'aime flaubert !" UpperCAmelCase = model(A )[0] UpperCAmelCase = tf.TensorShape((1, 8, 512) ) self.assertEqual(output.shape ,A ) # compare the actual values for a slice. UpperCAmelCase = tf.convert_to_tensor( [ [ [-1.8768773, -1.566555, 0.27072418], [-1.6920038, -0.5873505, 1.9329599], [-2.9563985, -1.6993835, 1.7972052], ] ] ,dtype=tf.floataa ,) self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
74
0
"""simple docstring""" from abc import ABC, abstractmethod from typing import List, Optional class lowerCamelCase__ ( snake_case ): def __init__( self ): # test for the above condition self.test() def _UpperCamelCase ( self ): UpperCAmelCase = 0 UpperCAmelCase = False while not completed: if counter == 1: self.reset() UpperCAmelCase = self.advance() if not self.does_advance(A ): raise Exception( """Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.""" ) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self.update(A ) counter += 1 if counter > 10_000: raise Exception("""update() does not fulfill the constraint.""" ) if self.remaining() != 0: raise Exception("""Custom Constraint is not defined correctly.""" ) @abstractmethod def _UpperCamelCase ( self ): raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def _UpperCamelCase ( self ,A ): raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def _UpperCamelCase ( self ,A ): raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def _UpperCamelCase ( self ): raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def _UpperCamelCase ( self ): raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def _UpperCamelCase ( self ,A=False ): raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class lowerCamelCase__ ( snake_case ): def __init__( self ,A ): super(A ,self ).__init__() if not isinstance(A ,A ) or len(A ) == 0: raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''' ) if any((not isinstance(A ,A ) or token_id < 0) for token_id in token_ids ): raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' ) UpperCAmelCase = token_ids UpperCAmelCase = len(self.token_ids ) UpperCAmelCase = -1 # the index of the currently fulfilled step UpperCAmelCase = False def _UpperCamelCase ( self ): if self.completed: return None return self.token_ids[self.fulfilled_idx + 1] def _UpperCamelCase ( self ,A ): if not isinstance(A ,A ): raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(A )}''' ) if self.completed: return False return token_id == self.token_ids[self.fulfilled_idx + 1] def _UpperCamelCase ( self ,A ): if not isinstance(A ,A ): raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(A )}''' ) UpperCAmelCase = False UpperCAmelCase = False UpperCAmelCase = False if self.does_advance(A ): self.fulfilled_idx += 1 UpperCAmelCase = True if self.fulfilled_idx == (self.seqlen - 1): UpperCAmelCase = True UpperCAmelCase = completed else: # failed to make progress. UpperCAmelCase = True self.reset() return stepped, completed, reset def _UpperCamelCase ( self ): UpperCAmelCase = False UpperCAmelCase = 0 def _UpperCamelCase ( self ): return self.seqlen - (self.fulfilled_idx + 1) def _UpperCamelCase ( self ,A=False ): UpperCAmelCase = PhrasalConstraint(self.token_ids ) if stateful: UpperCAmelCase = self.seqlen UpperCAmelCase = self.fulfilled_idx UpperCAmelCase = self.completed return new_constraint class lowerCamelCase__ : def __init__( self ,A ,A=True ): UpperCAmelCase = max([len(A ) for one in nested_token_ids] ) UpperCAmelCase = {} for token_ids in nested_token_ids: UpperCAmelCase = root for tidx, token_id in enumerate(A ): if token_id not in level: UpperCAmelCase = {} UpperCAmelCase = level[token_id] if no_subsets and self.has_subsets(A ,A ): raise ValueError( """Each list in `nested_token_ids` can't be a complete subset of another list, but is""" F''' {nested_token_ids}.''' ) UpperCAmelCase = root def _UpperCamelCase ( self ,A ): UpperCAmelCase = self.trie for current_token in current_seq: UpperCAmelCase = start[current_token] UpperCAmelCase = list(start.keys() ) return next_tokens def _UpperCamelCase ( self ,A ): UpperCAmelCase = self.next_tokens(A ) return len(A ) == 0 def _UpperCamelCase ( self ,A ): UpperCAmelCase = list(root.values() ) if len(A ) == 0: return 1 else: return sum([self.count_leaves(A ) for nn in next_nodes] ) def _UpperCamelCase ( self ,A ,A ): UpperCAmelCase = self.count_leaves(A ) return len(A ) != leaf_count class lowerCamelCase__ ( snake_case ): def __init__( self ,A ): super(A ,self ).__init__() if not isinstance(A ,A ) or len(A ) == 0: raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' ) if any(not isinstance(A ,A ) for token_ids in nested_token_ids ): raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' ) if any( any((not isinstance(A ,A ) or token_id < 0) for token_id in token_ids ) for token_ids in nested_token_ids ): raise ValueError( F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' ) UpperCAmelCase = DisjunctiveTrie(A ) UpperCAmelCase = nested_token_ids UpperCAmelCase = self.trie.max_height UpperCAmelCase = [] UpperCAmelCase = False def _UpperCamelCase ( self ): UpperCAmelCase = self.trie.next_tokens(self.current_seq ) if len(A ) == 0: return None else: return token_list def _UpperCamelCase ( self ,A ): if not isinstance(A ,A ): raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(A )}''' ) UpperCAmelCase = self.trie.next_tokens(self.current_seq ) return token_id in next_tokens def _UpperCamelCase ( self ,A ): if not isinstance(A ,A ): raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(A )}''' ) UpperCAmelCase = False UpperCAmelCase = False UpperCAmelCase = False if self.does_advance(A ): self.current_seq.append(A ) UpperCAmelCase = True else: UpperCAmelCase = True self.reset() UpperCAmelCase = self.trie.reached_leaf(self.current_seq ) UpperCAmelCase = completed return stepped, completed, reset def _UpperCamelCase ( self ): UpperCAmelCase = False UpperCAmelCase = [] def _UpperCamelCase ( self ): if self.completed: # since this can be completed without reaching max height return 0 else: return self.seqlen - len(self.current_seq ) def _UpperCamelCase ( self ,A=False ): UpperCAmelCase = DisjunctiveConstraint(self.token_ids ) if stateful: UpperCAmelCase = self.seqlen UpperCAmelCase = self.current_seq UpperCAmelCase = self.completed return new_constraint class lowerCamelCase__ : def __init__( self ,A ): UpperCAmelCase = constraints # max # of steps required to fulfill a given constraint UpperCAmelCase = max([c.seqlen for c in constraints] ) UpperCAmelCase = len(A ) UpperCAmelCase = False self.init_state() def _UpperCamelCase ( self ): UpperCAmelCase = [] UpperCAmelCase = None UpperCAmelCase = [constraint.copy(stateful=A ) for constraint in self.constraints] def _UpperCamelCase ( self ): UpperCAmelCase = 0 if self.inprogress_constraint: # extra points for having a constraint mid-fulfilled add += self.max_seqlen - self.inprogress_constraint.remaining() return (len(self.complete_constraints ) * self.max_seqlen) + add def _UpperCamelCase ( self ): UpperCAmelCase = [] if self.inprogress_constraint is None: for constraint in self.pending_constraints: # "pending" == "unfulfilled yet" UpperCAmelCase = constraint.advance() if isinstance(A ,A ): token_list.append(A ) elif isinstance(A ,A ): token_list.extend(A ) else: UpperCAmelCase = self.inprogress_constraint.advance() if isinstance(A ,A ): token_list.append(A ) elif isinstance(A ,A ): token_list.extend(A ) if len(A ) == 0: return None else: return token_list def _UpperCamelCase ( self ,A ): self.init_state() if token_ids is not None: for token in token_ids: # completes or steps **one** constraint UpperCAmelCase , UpperCAmelCase = self.add(A ) # the entire list of constraints are fulfilled if self.completed: break def _UpperCamelCase ( self ,A ): if not isinstance(A ,A ): raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''' ) UpperCAmelCase , UpperCAmelCase = False, False if self.completed: UpperCAmelCase = True UpperCAmelCase = False return complete, stepped if self.inprogress_constraint is not None: # In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current # job, simply update the state UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self.inprogress_constraint.update(A ) if reset: # 1. If the next token breaks the progress, then we must restart. # e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books". # But that doesn't mean we self.init_state(), since we only reset the state for this particular # constraint, not the full list of constraints. self.pending_constraints.append(self.inprogress_constraint.copy(stateful=A ) ) UpperCAmelCase = None if complete: # 2. If the next token completes the constraint, move it to completed list, set # inprogress to None. If there are no pending constraints either, then this full list of constraints # is complete. self.complete_constraints.append(self.inprogress_constraint ) UpperCAmelCase = None if len(self.pending_constraints ) == 0: # we're done! UpperCAmelCase = True else: # Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list # of constraints? for cidx, pending_constraint in enumerate(self.pending_constraints ): if pending_constraint.does_advance(A ): UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = pending_constraint.update(A ) if not stepped: raise Exception( """`constraint.update(token_id)` is not yielding incremental progress, """ """even though `constraint.does_advance(token_id)` is true.""" ) if complete: self.complete_constraints.append(A ) UpperCAmelCase = None if not complete and stepped: UpperCAmelCase = pending_constraint if complete or stepped: # If we made any progress at all, then it's at least not a "pending constraint". UpperCAmelCase = ( self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :] ) if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None: # If there's no longer any pending after this and no inprogress either, then we must be # complete. UpperCAmelCase = True break # prevent accidentally stepping through multiple constraints with just one token. return complete, stepped def _UpperCamelCase ( self ,A=True ): UpperCAmelCase = ConstraintListState(self.constraints ) # we actually never though self.constraints objects # throughout this process. So it's at initialization state. if stateful: UpperCAmelCase = [ constraint.copy(stateful=A ) for constraint in self.complete_constraints ] if self.inprogress_constraint is not None: UpperCAmelCase = self.inprogress_constraint.copy(stateful=A ) UpperCAmelCase = [constraint.copy() for constraint in self.pending_constraints] return new_state
713
"""simple docstring""" import argparse from collections import OrderedDict from pathlib import Path import requests import torch from PIL import Image from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor from transformers.utils import logging logging.set_verbosity_info() _UpperCamelCase = logging.get_logger(__name__) def _a ( _snake_case ): """simple docstring""" UpperCAmelCase = OrderedDict() for key, value in state_dict.items(): if key.startswith("""module.encoder""" ): UpperCAmelCase = key.replace("""module.encoder""" , """glpn.encoder""" ) if key.startswith("""module.decoder""" ): UpperCAmelCase = key.replace("""module.decoder""" , """decoder.stages""" ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 UpperCAmelCase = key[key.find("""patch_embed""" ) + len("""patch_embed""" )] UpperCAmelCase = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(_snake_case )-1}''' ) if "norm" in key: UpperCAmelCase = key.replace("""norm""" , """layer_norm""" ) if "glpn.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 UpperCAmelCase = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )] UpperCAmelCase = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(_snake_case )-1}''' ) if "layer_norm1" in key: UpperCAmelCase = key.replace("""layer_norm1""" , """layer_norm_1""" ) if "layer_norm2" in key: UpperCAmelCase = key.replace("""layer_norm2""" , """layer_norm_2""" ) if "block" in key: # replace for example block1 by block.0 UpperCAmelCase = key[key.find("""block""" ) + len("""block""" )] UpperCAmelCase = key.replace(F'''block{idx}''' , F'''block.{int(_snake_case )-1}''' ) if "attn.q" in key: UpperCAmelCase = key.replace("""attn.q""" , """attention.self.query""" ) if "attn.proj" in key: UpperCAmelCase = key.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in key: UpperCAmelCase = key.replace("""attn""" , """attention.self""" ) if "fc1" in key: UpperCAmelCase = key.replace("""fc1""" , """dense1""" ) if "fc2" in key: UpperCAmelCase = key.replace("""fc2""" , """dense2""" ) if "linear_pred" in key: UpperCAmelCase = key.replace("""linear_pred""" , """classifier""" ) if "linear_fuse" in key: UpperCAmelCase = key.replace("""linear_fuse.conv""" , """linear_fuse""" ) UpperCAmelCase = key.replace("""linear_fuse.bn""" , """batch_norm""" ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 UpperCAmelCase = key[key.find("""linear_c""" ) + len("""linear_c""" )] UpperCAmelCase = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(_snake_case )-1}''' ) if "bot_conv" in key: UpperCAmelCase = key.replace("""bot_conv""" , """0.convolution""" ) if "skip_conv1" in key: UpperCAmelCase = key.replace("""skip_conv1""" , """1.convolution""" ) if "skip_conv2" in key: UpperCAmelCase = key.replace("""skip_conv2""" , """2.convolution""" ) if "fusion1" in key: UpperCAmelCase = key.replace("""fusion1""" , """1.fusion""" ) if "fusion2" in key: UpperCAmelCase = key.replace("""fusion2""" , """2.fusion""" ) if "fusion3" in key: UpperCAmelCase = key.replace("""fusion3""" , """3.fusion""" ) if "fusion" in key and "conv" in key: UpperCAmelCase = key.replace("""conv""" , """convolutional_layer""" ) if key.startswith("""module.last_layer_depth""" ): UpperCAmelCase = key.replace("""module.last_layer_depth""" , """head.head""" ) UpperCAmelCase = value return new_state_dict def _a ( _snake_case , _snake_case ): """simple docstring""" for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) UpperCAmelCase = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' ) UpperCAmelCase = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' ) # next, add keys and values (in that order) to the state dict UpperCAmelCase = kv_weight[ : config.hidden_sizes[i], : ] UpperCAmelCase = kv_bias[: config.hidden_sizes[i]] UpperCAmelCase = kv_weight[ config.hidden_sizes[i] :, : ] UpperCAmelCase = kv_bias[config.hidden_sizes[i] :] def _a ( ): """simple docstring""" UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" UpperCAmelCase = Image.open(requests.get(_snake_case , stream=_snake_case ).raw ) return image @torch.no_grad() def _a ( _snake_case , _snake_case , _snake_case=False , _snake_case=None ): """simple docstring""" UpperCAmelCase = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] ) # load image processor (only resize + rescale) UpperCAmelCase = GLPNImageProcessor() # prepare image UpperCAmelCase = prepare_img() UpperCAmelCase = image_processor(images=_snake_case , return_tensors="""pt""" ).pixel_values logger.info("""Converting model...""" ) # load original state dict UpperCAmelCase = torch.load(_snake_case , map_location=torch.device("""cpu""" ) ) # rename keys UpperCAmelCase = rename_keys(_snake_case ) # key and value matrices need special treatment read_in_k_v(_snake_case , _snake_case ) # create HuggingFace model and load state dict UpperCAmelCase = GLPNForDepthEstimation(_snake_case ) model.load_state_dict(_snake_case ) model.eval() # forward pass UpperCAmelCase = model(_snake_case ) UpperCAmelCase = outputs.predicted_depth # verify output if model_name is not None: if "nyu" in model_name: UpperCAmelCase = torch.tensor( [[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] ) elif "kitti" in model_name: UpperCAmelCase = torch.tensor( [[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] ) else: raise ValueError(F'''Unknown model name: {model_name}''' ) UpperCAmelCase = torch.Size([1, 480, 640] ) assert predicted_depth.shape == expected_shape assert torch.allclose(predicted_depth[0, :3, :3] , _snake_case , atol=1E-4 ) print("""Looks ok!""" ) # finally, push to hub if required if push_to_hub: logger.info("""Pushing model and image processor to the hub...""" ) model.push_to_hub( repo_path_or_name=Path(_snake_case , _snake_case ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=_snake_case , ) image_processor.push_to_hub( repo_path_or_name=Path(_snake_case , _snake_case ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=_snake_case , ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() parser.add_argument( """--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file).""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub.""" ) parser.add_argument( """--model_name""", default="""glpn-kitti""", type=str, help="""Name of the model in case you're pushing to the hub.""", ) _UpperCamelCase = parser.parse_args() convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
74
0
"""simple docstring""" _UpperCamelCase = """2.13.1""" import platform import pyarrow from packaging import version if version.parse(platform.python_version()) < version.parse("""3.7"""): raise ImportWarning( """To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition.""" ) if version.parse(pyarrow.__version__).major < 8: raise ImportWarning( """To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n""" """If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.""" ) del platform del pyarrow del version from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip _UpperCamelCase = concatenate_datasets _UpperCamelCase = DownloadConfig _UpperCamelCase = DownloadManager _UpperCamelCase = DownloadMode _UpperCamelCase = DownloadConfig _UpperCamelCase = DownloadMode _UpperCamelCase = DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
714
"""simple docstring""" def _a ( _snake_case ): # noqa: E741 """simple docstring""" UpperCAmelCase = len(_snake_case ) UpperCAmelCase = 0 UpperCAmelCase = [0] * n UpperCAmelCase = [False] * n UpperCAmelCase = [False] * n def dfs(_snake_case , _snake_case , _snake_case , _snake_case ): if parent == root: out_edge_count += 1 UpperCAmelCase = True UpperCAmelCase = at for to in l[at]: if to == parent: pass elif not visited[to]: UpperCAmelCase = dfs(_snake_case , _snake_case , _snake_case , _snake_case ) UpperCAmelCase = min(low[at] , low[to] ) # AP found via bridge if at < low[to]: UpperCAmelCase = True # AP found via cycle if at == low[to]: UpperCAmelCase = True else: UpperCAmelCase = min(low[at] , _snake_case ) return out_edge_count for i in range(_snake_case ): if not visited[i]: UpperCAmelCase = 0 UpperCAmelCase = dfs(_snake_case , _snake_case , -1 , _snake_case ) UpperCAmelCase = out_edge_count > 1 for x in range(len(_snake_case ) ): if is_art[x] is True: print(_snake_case ) # Adjacency list of graph _UpperCamelCase = { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], 3: [2, 4], 4: [3], 5: [2, 6, 8], 6: [5, 7], 7: [6, 8], 8: [5, 7], } compute_ap(data)
74
0
"""simple docstring""" import gc import random import unittest import torch from diffusers import ( IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class lowerCamelCase__ ( snake_case , snake_case , unittest.TestCase ): SCREAMING_SNAKE_CASE = IFPipeline SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''} SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_BATCH_PARAMS SCREAMING_SNAKE_CASE = PipelineTesterMixin.required_optional_params - {'''latents'''} def _UpperCamelCase ( self ): return self._get_dummy_components() def _UpperCamelCase ( self ,A ,A=0 ): if str(A ).startswith("""mps""" ): UpperCAmelCase = torch.manual_seed(A ) else: UpperCAmelCase = torch.Generator(device=A ).manual_seed(A ) UpperCAmelCase = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def _UpperCamelCase ( self ): self._test_save_load_optional_components() @unittest.skipIf(torch_device != """cuda""" ,reason="""float16 requires CUDA""" ) def _UpperCamelCase ( self ): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1e-1 ) def _UpperCamelCase ( self ): self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def _UpperCamelCase ( self ): self._test_save_load_local() def _UpperCamelCase ( self ): self._test_inference_batch_single_identical( expected_max_diff=1e-2 ,) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() ,reason="""XFormers attention is only available with CUDA and `xformers` installed""" ,) def _UpperCamelCase ( self ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) @slow @require_torch_gpu class lowerCamelCase__ ( unittest.TestCase ): def _UpperCamelCase ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _UpperCamelCase ( self ): # if UpperCAmelCase = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" ,variant="""fp16""" ,torch_dtype=torch.floataa ) UpperCAmelCase = IFSuperResolutionPipeline.from_pretrained( """DeepFloyd/IF-II-L-v1.0""" ,variant="""fp16""" ,torch_dtype=torch.floataa ,text_encoder=A ,tokenizer=A ) # pre compute text embeddings and remove T5 to save memory pipe_a.text_encoder.to("""cuda""" ) UpperCAmelCase , UpperCAmelCase = pipe_a.encode_prompt("""anime turtle""" ,device="""cuda""" ) del pipe_a.tokenizer del pipe_a.text_encoder gc.collect() UpperCAmelCase = None UpperCAmelCase = None pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if(A ,A ,A ,A ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # img2img UpperCAmelCase = IFImgaImgPipeline(**pipe_a.components ) UpperCAmelCase = IFImgaImgSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_imgaimg(A ,A ,A ,A ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # inpainting UpperCAmelCase = IFInpaintingPipeline(**pipe_a.components ) UpperCAmelCase = IFInpaintingSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_inpainting(A ,A ,A ,A ) def _UpperCamelCase ( self ,A ,A ,A ,A ): # pipeline 1 _start_torch_memory_measurement() UpperCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 ) UpperCAmelCase = pipe_a( prompt_embeds=A ,negative_prompt_embeds=A ,num_inference_steps=2 ,generator=A ,output_type="""np""" ,) UpperCAmelCase = output.images[0] assert image.shape == (64, 64, 3) UpperCAmelCase = torch.cuda.max_memory_allocated() assert mem_bytes < 13 * 10**9 UpperCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""" ) assert_mean_pixel_difference(A ,A ) # pipeline 2 _start_torch_memory_measurement() UpperCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 ) UpperCAmelCase = floats_tensor((1, 3, 64, 64) ,rng=random.Random(0 ) ).to(A ) UpperCAmelCase = pipe_a( prompt_embeds=A ,negative_prompt_embeds=A ,image=A ,generator=A ,num_inference_steps=2 ,output_type="""np""" ,) UpperCAmelCase = output.images[0] assert image.shape == (256, 256, 3) UpperCAmelCase = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 UpperCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""" ) assert_mean_pixel_difference(A ,A ) def _UpperCamelCase ( self ,A ,A ,A ,A ): # pipeline 1 _start_torch_memory_measurement() UpperCAmelCase = floats_tensor((1, 3, 64, 64) ,rng=random.Random(0 ) ).to(A ) UpperCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 ) UpperCAmelCase = pipe_a( prompt_embeds=A ,negative_prompt_embeds=A ,image=A ,num_inference_steps=2 ,generator=A ,output_type="""np""" ,) UpperCAmelCase = output.images[0] assert image.shape == (64, 64, 3) UpperCAmelCase = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 UpperCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""" ) assert_mean_pixel_difference(A ,A ) # pipeline 2 _start_torch_memory_measurement() UpperCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 ) UpperCAmelCase = floats_tensor((1, 3, 256, 256) ,rng=random.Random(0 ) ).to(A ) UpperCAmelCase = floats_tensor((1, 3, 64, 64) ,rng=random.Random(0 ) ).to(A ) UpperCAmelCase = pipe_a( prompt_embeds=A ,negative_prompt_embeds=A ,image=A ,original_image=A ,generator=A ,num_inference_steps=2 ,output_type="""np""" ,) UpperCAmelCase = output.images[0] assert image.shape == (256, 256, 3) UpperCAmelCase = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 UpperCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""" ) assert_mean_pixel_difference(A ,A ) def _UpperCamelCase ( self ,A ,A ,A ,A ): # pipeline 1 _start_torch_memory_measurement() UpperCAmelCase = floats_tensor((1, 3, 64, 64) ,rng=random.Random(0 ) ).to(A ) UpperCAmelCase = floats_tensor((1, 3, 64, 64) ,rng=random.Random(1 ) ).to(A ) UpperCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 ) UpperCAmelCase = pipe_a( prompt_embeds=A ,negative_prompt_embeds=A ,image=A ,mask_image=A ,num_inference_steps=2 ,generator=A ,output_type="""np""" ,) UpperCAmelCase = output.images[0] assert image.shape == (64, 64, 3) UpperCAmelCase = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 UpperCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""" ) assert_mean_pixel_difference(A ,A ) # pipeline 2 _start_torch_memory_measurement() UpperCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 ) UpperCAmelCase = floats_tensor((1, 3, 64, 64) ,rng=random.Random(0 ) ).to(A ) UpperCAmelCase = floats_tensor((1, 3, 256, 256) ,rng=random.Random(0 ) ).to(A ) UpperCAmelCase = floats_tensor((1, 3, 256, 256) ,rng=random.Random(1 ) ).to(A ) UpperCAmelCase = pipe_a( prompt_embeds=A ,negative_prompt_embeds=A ,image=A ,mask_image=A ,original_image=A ,generator=A ,num_inference_steps=2 ,output_type="""np""" ,) UpperCAmelCase = output.images[0] assert image.shape == (256, 256, 3) UpperCAmelCase = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 UpperCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""" ) assert_mean_pixel_difference(A ,A ) def _a ( ): """simple docstring""" torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
715
"""simple docstring""" _UpperCamelCase = """ # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git """ _UpperCamelCase = [{"""type""": """code""", """content""": INSTALL_CONTENT}] _UpperCamelCase = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
74
0
"""simple docstring""" def _a ( _snake_case ): """simple docstring""" UpperCAmelCase = [0] * len(_snake_case ) for i in range(1 , len(_snake_case ) ): # use last results for better performance - dynamic programming UpperCAmelCase = prefix_result[i - 1] while j > 0 and input_string[i] != input_string[j]: UpperCAmelCase = prefix_result[j - 1] if input_string[i] == input_string[j]: j += 1 UpperCAmelCase = j return prefix_result def _a ( _snake_case ): """simple docstring""" return max(prefix_function(_snake_case ) ) if __name__ == "__main__": import doctest doctest.testmod()
716
"""simple docstring""" import argparse import struct import unittest class lowerCamelCase__ : def __init__( self ,A ): UpperCAmelCase = data # Initialize hash values UpperCAmelCase = [ 0x6A_09_E6_67, 0xBB_67_AE_85, 0x3C_6E_F3_72, 0xA5_4F_F5_3A, 0x51_0E_52_7F, 0x9B_05_68_8C, 0x1F_83_D9_AB, 0x5B_E0_CD_19, ] # Initialize round constants UpperCAmelCase = [ 0x42_8A_2F_98, 0x71_37_44_91, 0xB5_C0_FB_CF, 0xE9_B5_DB_A5, 0x39_56_C2_5B, 0x59_F1_11_F1, 0x92_3F_82_A4, 0xAB_1C_5E_D5, 0xD8_07_AA_98, 0x12_83_5B_01, 0x24_31_85_BE, 0x55_0C_7D_C3, 0x72_BE_5D_74, 0x80_DE_B1_FE, 0x9B_DC_06_A7, 0xC1_9B_F1_74, 0xE4_9B_69_C1, 0xEF_BE_47_86, 0x0F_C1_9D_C6, 0x24_0C_A1_CC, 0x2D_E9_2C_6F, 0x4A_74_84_AA, 0x5C_B0_A9_DC, 0x76_F9_88_DA, 0x98_3E_51_52, 0xA8_31_C6_6D, 0xB0_03_27_C8, 0xBF_59_7F_C7, 0xC6_E0_0B_F3, 0xD5_A7_91_47, 0x06_CA_63_51, 0x14_29_29_67, 0x27_B7_0A_85, 0x2E_1B_21_38, 0x4D_2C_6D_FC, 0x53_38_0D_13, 0x65_0A_73_54, 0x76_6A_0A_BB, 0x81_C2_C9_2E, 0x92_72_2C_85, 0xA2_BF_E8_A1, 0xA8_1A_66_4B, 0xC2_4B_8B_70, 0xC7_6C_51_A3, 0xD1_92_E8_19, 0xD6_99_06_24, 0xF4_0E_35_85, 0x10_6A_A0_70, 0x19_A4_C1_16, 0x1E_37_6C_08, 0x27_48_77_4C, 0x34_B0_BC_B5, 0x39_1C_0C_B3, 0x4E_D8_AA_4A, 0x5B_9C_CA_4F, 0x68_2E_6F_F3, 0x74_8F_82_EE, 0x78_A5_63_6F, 0x84_C8_78_14, 0x8C_C7_02_08, 0x90_BE_FF_FA, 0xA4_50_6C_EB, 0xBE_F9_A3_F7, 0xC6_71_78_F2, ] UpperCAmelCase = self.preprocessing(self.data ) self.final_hash() @staticmethod def _UpperCamelCase ( A ): UpperCAmelCase = b"""\x80""" + (b"""\x00""" * (63 - (len(A ) + 8) % 64)) UpperCAmelCase = struct.pack(""">Q""" ,(len(A ) * 8) ) return data + padding + big_endian_integer def _UpperCamelCase ( self ): # Convert into blocks of 64 bytes UpperCAmelCase = [ self.preprocessed_data[x : x + 64] for x in range(0 ,len(self.preprocessed_data ) ,64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers UpperCAmelCase = list(struct.unpack(""">16L""" ,A ) ) # add 48 0-ed integers words += [0] * 48 UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self.hashes for index in range(0 ,64 ): if index > 15: # modify the zero-ed indexes at the end of the array UpperCAmelCase = ( self.ror(words[index - 15] ,7 ) ^ self.ror(words[index - 15] ,18 ) ^ (words[index - 15] >> 3) ) UpperCAmelCase = ( self.ror(words[index - 2] ,17 ) ^ self.ror(words[index - 2] ,19 ) ^ (words[index - 2] >> 10) ) UpperCAmelCase = ( words[index - 16] + sa + words[index - 7] + sa ) % 0x1_00_00_00_00 # Compression UpperCAmelCase = self.ror(A ,6 ) ^ self.ror(A ,11 ) ^ self.ror(A ,25 ) UpperCAmelCase = (e & f) ^ ((~e & 0xFF_FF_FF_FF) & g) UpperCAmelCase = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0x1_00_00_00_00 UpperCAmelCase = self.ror(A ,2 ) ^ self.ror(A ,13 ) ^ self.ror(A ,22 ) UpperCAmelCase = (a & b) ^ (a & c) ^ (b & c) UpperCAmelCase = (sa + maj) % 0x1_00_00_00_00 UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = ( g, f, e, ((d + tempa) % 0x1_00_00_00_00), c, b, a, ((tempa + tempa) % 0x1_00_00_00_00), ) UpperCAmelCase = [a, b, c, d, e, f, g, h] # Modify final values UpperCAmelCase = [ ((element + mutated_hash_values[index]) % 0x1_00_00_00_00) for index, element in enumerate(self.hashes ) ] UpperCAmelCase = """""".join([hex(A )[2:].zfill(8 ) for value in self.hashes] ) def _UpperCamelCase ( self ,A ,A ): return 0xFF_FF_FF_FF & (value << (32 - rotations)) | (value >> rotations) class lowerCamelCase__ ( unittest.TestCase ): def _UpperCamelCase ( self ): import hashlib UpperCAmelCase = bytes("""Test String""" ,"""utf-8""" ) self.assertEqual(SHAaaa(A ).hash ,hashlib.shaaaa(A ).hexdigest() ) def _a ( ): """simple docstring""" import doctest doctest.testmod() UpperCAmelCase = argparse.ArgumentParser() parser.add_argument( """-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , ) parser.add_argument( """-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" ) UpperCAmelCase = parser.parse_args() UpperCAmelCase = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , """rb""" ) as f: UpperCAmelCase = f.read() else: UpperCAmelCase = bytes(_snake_case , """utf-8""" ) print(SHAaaa(_snake_case ).hash ) if __name__ == "__main__": main()
74
0
"""simple docstring""" from pathlib import Path import torch from ...utils import is_npu_available, is_xpu_available from .config_args import ClusterConfig, default_json_config_file from .config_utils import SubcommandHelpFormatter _UpperCamelCase = """Create a default config file for Accelerate with only a few flags set.""" def _a ( _snake_case="no" , _snake_case = default_json_config_file , _snake_case = False ): """simple docstring""" UpperCAmelCase = Path(_snake_case ) path.parent.mkdir(parents=_snake_case , exist_ok=_snake_case ) if path.exists(): print( F'''Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.''' ) return False UpperCAmelCase = mixed_precision.lower() if mixed_precision not in ["no", "fp16", "bf16", "fp8"]: raise ValueError( F'''`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}''' ) UpperCAmelCase = { """compute_environment""": """LOCAL_MACHINE""", """mixed_precision""": mixed_precision, } if torch.cuda.is_available(): UpperCAmelCase = torch.cuda.device_count() UpperCAmelCase = num_gpus UpperCAmelCase = False if num_gpus > 1: UpperCAmelCase = """MULTI_GPU""" else: UpperCAmelCase = """NO""" elif is_xpu_available() and use_xpu: UpperCAmelCase = torch.xpu.device_count() UpperCAmelCase = num_xpus UpperCAmelCase = False if num_xpus > 1: UpperCAmelCase = """MULTI_XPU""" else: UpperCAmelCase = """NO""" elif is_npu_available(): UpperCAmelCase = torch.npu.device_count() UpperCAmelCase = num_npus UpperCAmelCase = False if num_npus > 1: UpperCAmelCase = """MULTI_NPU""" else: UpperCAmelCase = """NO""" else: UpperCAmelCase = 0 UpperCAmelCase = True UpperCAmelCase = 1 UpperCAmelCase = """NO""" UpperCAmelCase = ClusterConfig(**_snake_case ) config.to_json_file(_snake_case ) return path def _a ( _snake_case , _snake_case ): """simple docstring""" UpperCAmelCase = parser.add_parser("""default""" , parents=_snake_case , help=_snake_case , formatter_class=_snake_case ) parser.add_argument( """--config_file""" , default=_snake_case , help=( """The path to use to store the config file. Will default to a file named default_config.yaml in the cache """ """location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """ """such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """ """with 'huggingface'.""" ) , dest="""save_location""" , ) parser.add_argument( """--mixed_precision""" , choices=["""no""", """fp16""", """bf16"""] , type=_snake_case , help="""Whether or not to use mixed precision training. """ """Choose between FP16 and BF16 (bfloat16) training. """ """BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.""" , default="""no""" , ) parser.set_defaults(func=_snake_case ) return parser def _a ( _snake_case ): """simple docstring""" UpperCAmelCase = write_basic_config(args.mixed_precision , args.save_location ) if config_file: print(F'''accelerate configuration saved at {config_file}''' )
717
"""simple docstring""" def _a ( _snake_case = 10 , _snake_case = 22 ): """simple docstring""" UpperCAmelCase = range(1 , _snake_case ) UpperCAmelCase = range(1 , _snake_case ) return sum( 1 for power in powers for base in bases if len(str(base**power ) ) == power ) if __name__ == "__main__": print(F"""{solution(10, 22) = }""")
74
0
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class lowerCamelCase__ ( unittest.TestCase ): def __init__( self ,A ,A=7 ,A=3 ,A=18 ,A=30 ,A=400 ,A=True ,A=None ,A=True ,A=None ,): UpperCAmelCase = size if size is not None else {"""shortest_edge""": 20} UpperCAmelCase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18} UpperCAmelCase = parent UpperCAmelCase = batch_size UpperCAmelCase = num_channels UpperCAmelCase = image_size UpperCAmelCase = min_resolution UpperCAmelCase = max_resolution UpperCAmelCase = do_resize UpperCAmelCase = size UpperCAmelCase = do_center_crop UpperCAmelCase = crop_size def _UpperCamelCase ( self ): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, } @require_torch @require_vision class lowerCamelCase__ ( snake_case , unittest.TestCase ): SCREAMING_SNAKE_CASE = MobileNetVaImageProcessor if is_vision_available() else None def _UpperCamelCase ( self ): UpperCAmelCase = MobileNetVaImageProcessingTester(self ) @property def _UpperCamelCase ( self ): return self.image_processor_tester.prepare_image_processor_dict() def _UpperCamelCase ( self ): UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A ,"""do_resize""" ) ) self.assertTrue(hasattr(A ,"""size""" ) ) self.assertTrue(hasattr(A ,"""do_center_crop""" ) ) self.assertTrue(hasattr(A ,"""crop_size""" ) ) def _UpperCamelCase ( self ): UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{"""shortest_edge""": 20} ) self.assertEqual(image_processor.crop_size ,{"""height""": 18, """width""": 18} ) UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 ) self.assertEqual(image_processor.size ,{"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size ,{"""height""": 84, """width""": 84} ) def _UpperCamelCase ( self ): pass def _UpperCamelCase ( self ): # Initialize image_processing UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ) for image in image_inputs: self.assertIsInstance(A ,Image.Image ) # Test not batched input UpperCAmelCase = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched UpperCAmelCase = image_processing(A ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) def _UpperCamelCase ( self ): # Initialize image_processing UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,numpify=A ) for image in image_inputs: self.assertIsInstance(A ,np.ndarray ) # Test not batched input UpperCAmelCase = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched UpperCAmelCase = image_processing(A ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) def _UpperCamelCase ( self ): # Initialize image_processing UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,torchify=A ) for image in image_inputs: self.assertIsInstance(A ,torch.Tensor ) # Test not batched input UpperCAmelCase = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched UpperCAmelCase = image_processing(A ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,)
718
"""simple docstring""" from __future__ import annotations def _a ( _snake_case ): """simple docstring""" return len(set(_snake_case ) ) == len(_snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
74
0
"""simple docstring""" from __future__ import annotations from statistics import mean def _a ( _snake_case , _snake_case , _snake_case ): """simple docstring""" UpperCAmelCase = [0] * no_of_processes UpperCAmelCase = [0] * no_of_processes # Initialize remaining_time to waiting_time. for i in range(_snake_case ): UpperCAmelCase = burst_time[i] UpperCAmelCase = [] UpperCAmelCase = 0 UpperCAmelCase = 0 # When processes are not completed, # A process whose arrival time has passed \ # and has remaining execution time is put into the ready_process. # The shortest process in the ready_process, target_process is executed. while completed != no_of_processes: UpperCAmelCase = [] UpperCAmelCase = -1 for i in range(_snake_case ): if (arrival_time[i] <= total_time) and (remaining_time[i] > 0): ready_process.append(_snake_case ) if len(_snake_case ) > 0: UpperCAmelCase = ready_process[0] for i in ready_process: if remaining_time[i] < remaining_time[target_process]: UpperCAmelCase = i total_time += burst_time[target_process] completed += 1 UpperCAmelCase = 0 UpperCAmelCase = ( total_time - arrival_time[target_process] - burst_time[target_process] ) else: total_time += 1 return waiting_time def _a ( _snake_case , _snake_case , _snake_case ): """simple docstring""" UpperCAmelCase = [0] * no_of_processes for i in range(_snake_case ): UpperCAmelCase = burst_time[i] + waiting_time[i] return turn_around_time if __name__ == "__main__": print("""[TEST CASE 01]""") _UpperCamelCase = 4 _UpperCamelCase = [2, 5, 3, 7] _UpperCamelCase = [0, 0, 0, 0] _UpperCamelCase = calculate_waitingtime(arrival_time, burst_time, no_of_processes) _UpperCamelCase = calculate_turnaroundtime( burst_time, no_of_processes, waiting_time ) # Printing the Result print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""") for i, process_id in enumerate(list(range(1, 5))): print( F"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t""" F"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}""" ) print(F"""\nAverage waiting time = {mean(waiting_time):.5f}""") print(F"""Average turnaround time = {mean(turn_around_time):.5f}""")
719
"""simple docstring""" import math def _a ( _snake_case ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_snake_case ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def _a ( _snake_case = 0.1 ): """simple docstring""" UpperCAmelCase = 3 UpperCAmelCase = 3 while primes / (2 * j - 1) >= ratio: for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ): primes += is_prime(_snake_case ) j += 2 return j if __name__ == "__main__": import doctest doctest.testmod()
74
0
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class lowerCamelCase__ ( snake_case , unittest.TestCase ): SCREAMING_SNAKE_CASE = ShapEPipeline SCREAMING_SNAKE_CASE = ['''prompt'''] SCREAMING_SNAKE_CASE = ['''prompt'''] SCREAMING_SNAKE_CASE = [ '''num_images_per_prompt''', '''num_inference_steps''', '''generator''', '''latents''', '''guidance_scale''', '''frame_size''', '''output_type''', '''return_dict''', ] SCREAMING_SNAKE_CASE = False @property def _UpperCamelCase ( self ): return 32 @property def _UpperCamelCase ( self ): return 32 @property def _UpperCamelCase ( self ): return self.time_input_dim * 4 @property def _UpperCamelCase ( self ): return 8 @property def _UpperCamelCase ( self ): UpperCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) return tokenizer @property def _UpperCamelCase ( self ): torch.manual_seed(0 ) UpperCAmelCase = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=self.text_embedder_hidden_size ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=37 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,) return CLIPTextModelWithProjection(A ) @property def _UpperCamelCase ( self ): torch.manual_seed(0 ) UpperCAmelCase = { """num_attention_heads""": 2, """attention_head_dim""": 16, """embedding_dim""": self.time_input_dim, """num_embeddings""": 32, """embedding_proj_dim""": self.text_embedder_hidden_size, """time_embed_dim""": self.time_embed_dim, """num_layers""": 1, """clip_embed_dim""": self.time_input_dim * 2, """additional_embeddings""": 0, """time_embed_act_fn""": """gelu""", """norm_in_type""": """layer""", """encoder_hid_proj_type""": None, """added_emb_type""": None, } UpperCAmelCase = PriorTransformer(**A ) return model @property def _UpperCamelCase ( self ): torch.manual_seed(0 ) UpperCAmelCase = { """param_shapes""": ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), """d_latent""": self.time_input_dim, """d_hidden""": self.renderer_dim, """n_output""": 12, """background""": ( 0.1, 0.1, 0.1, ), } UpperCAmelCase = ShapERenderer(**A ) return model def _UpperCamelCase ( self ): UpperCAmelCase = self.dummy_prior UpperCAmelCase = self.dummy_text_encoder UpperCAmelCase = self.dummy_tokenizer UpperCAmelCase = self.dummy_renderer UpperCAmelCase = HeunDiscreteScheduler( beta_schedule="""exp""" ,num_train_timesteps=1_024 ,prediction_type="""sample""" ,use_karras_sigmas=A ,clip_sample=A ,clip_sample_range=1.0 ,) UpperCAmelCase = { """prior""": prior, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """renderer""": renderer, """scheduler""": scheduler, } return components def _UpperCamelCase ( self ,A ,A=0 ): if str(A ).startswith("""mps""" ): UpperCAmelCase = torch.manual_seed(A ) else: UpperCAmelCase = torch.Generator(device=A ).manual_seed(A ) UpperCAmelCase = { """prompt""": """horse""", """generator""": generator, """num_inference_steps""": 1, """frame_size""": 32, """output_type""": """np""", } return inputs def _UpperCamelCase ( self ): UpperCAmelCase = """cpu""" UpperCAmelCase = self.get_dummy_components() UpperCAmelCase = self.pipeline_class(**A ) UpperCAmelCase = pipe.to(A ) pipe.set_progress_bar_config(disable=A ) UpperCAmelCase = pipe(**self.get_dummy_inputs(A ) ) UpperCAmelCase = output.images[0] UpperCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) UpperCAmelCase = np.array( [ 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _UpperCamelCase ( self ): # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def _UpperCamelCase ( self ): UpperCAmelCase = torch_device == """cpu""" UpperCAmelCase = True self._test_inference_batch_single_identical( batch_size=2 ,test_max_difference=A ,relax_max_difference=A ,) def _UpperCamelCase ( self ): UpperCAmelCase = self.get_dummy_components() UpperCAmelCase = self.pipeline_class(**A ) UpperCAmelCase = pipe.to(A ) pipe.set_progress_bar_config(disable=A ) UpperCAmelCase = 1 UpperCAmelCase = 2 UpperCAmelCase = self.get_dummy_inputs(A ) for key in inputs.keys(): if key in self.batch_params: UpperCAmelCase = batch_size * [inputs[key]] UpperCAmelCase = pipe(**A ,num_images_per_prompt=A )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class lowerCamelCase__ ( unittest.TestCase ): def _UpperCamelCase ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _UpperCamelCase ( self ): UpperCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/test_shap_e_np_out.npy""" ) UpperCAmelCase = ShapEPipeline.from_pretrained("""openai/shap-e""" ) UpperCAmelCase = pipe.to(A ) pipe.set_progress_bar_config(disable=A ) UpperCAmelCase = torch.Generator(device=A ).manual_seed(0 ) UpperCAmelCase = pipe( """a shark""" ,generator=A ,guidance_scale=15.0 ,num_inference_steps=64 ,frame_size=64 ,output_type="""np""" ,).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(A ,A )
720
"""simple docstring""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCamelCase__ ( snake_case ): SCREAMING_SNAKE_CASE = ['''image_processor''', '''tokenizer'''] SCREAMING_SNAKE_CASE = '''CLIPImageProcessor''' SCREAMING_SNAKE_CASE = ('''CLIPTokenizer''', '''CLIPTokenizerFast''') def __init__( self ,A=None ,A=None ,**A ): UpperCAmelCase = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" ,A ,) UpperCAmelCase = kwargs.pop("""feature_extractor""" ) UpperCAmelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(A ,A ) def __call__( self ,A=None ,A=None ,A=None ,**A ): if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: UpperCAmelCase = self.tokenizer(A ,return_tensors=A ,**A ) if images is not None: UpperCAmelCase = self.image_processor(A ,return_tensors=A ,**A ) if text is not None and images is not None: UpperCAmelCase = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**A ) ,tensor_type=A ) def _UpperCamelCase ( self ,*A ,**A ): return self.tokenizer.batch_decode(*A ,**A ) def _UpperCamelCase ( self ,*A ,**A ): return self.tokenizer.decode(*A ,**A ) @property def _UpperCamelCase ( self ): UpperCAmelCase = self.tokenizer.model_input_names UpperCAmelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def _UpperCamelCase ( self ): warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,A ,) return self.image_processor_class @property def _UpperCamelCase ( self ): warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,A ,) return self.image_processor
74
0
"""simple docstring""" from __future__ import annotations from typing import Generic, TypeVar _UpperCamelCase = TypeVar("""T""") class lowerCamelCase__ ( Generic[T] ): def __init__( self ,A ): UpperCAmelCase = data UpperCAmelCase = self UpperCAmelCase = 0 class lowerCamelCase__ ( Generic[T] ): def __init__( self ): # map from node name to the node object UpperCAmelCase = {} def _UpperCamelCase ( self ,A ): # create a new set with x as its member UpperCAmelCase = DisjointSetTreeNode(A ) def _UpperCamelCase ( self ,A ): # find the set x belongs to (with path-compression) UpperCAmelCase = self.map[data] if elem_ref != elem_ref.parent: UpperCAmelCase = self.find_set(elem_ref.parent.data ) return elem_ref.parent def _UpperCamelCase ( self ,A ,A ): # helper function for union operation if nodea.rank > nodea.rank: UpperCAmelCase = nodea else: UpperCAmelCase = nodea if nodea.rank == nodea.rank: nodea.rank += 1 def _UpperCamelCase ( self ,A ,A ): # merge 2 disjoint sets self.link(self.find_set(A ) ,self.find_set(A ) ) class lowerCamelCase__ ( Generic[T] ): def __init__( self ): # connections: map from the node to the neighbouring nodes (with weights) UpperCAmelCase = {} def _UpperCamelCase ( self ,A ): # add a node ONLY if its not present in the graph if node not in self.connections: UpperCAmelCase = {} def _UpperCamelCase ( self ,A ,A ,A ): # add an edge with the given weight self.add_node(A ) self.add_node(A ) UpperCAmelCase = weight UpperCAmelCase = weight def _UpperCamelCase ( self ): UpperCAmelCase = [] UpperCAmelCase = set() for start in self.connections: for end in self.connections[start]: if (start, end) not in seen: seen.add((end, start) ) edges.append((start, end, self.connections[start][end]) ) edges.sort(key=lambda A : x[2] ) # creating the disjoint set UpperCAmelCase = DisjointSetTree[T]() for node in self.connections: disjoint_set.make_set(A ) # MST generation UpperCAmelCase = 0 UpperCAmelCase = 0 UpperCAmelCase = GraphUndirectedWeighted[T]() while num_edges < len(self.connections ) - 1: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = edges[index] index += 1 UpperCAmelCase = disjoint_set.find_set(A ) UpperCAmelCase = disjoint_set.find_set(A ) if parent_u != parent_v: num_edges += 1 graph.add_edge(A ,A ,A ) disjoint_set.union(A ,A ) return graph
721
"""simple docstring""" from __future__ import annotations from collections.abc import Generator import requests from bsa import BeautifulSoup _UpperCamelCase = """https://www.indeed.co.in/jobs?q=mobile+app+development&l=""" def _a ( _snake_case = "mumbai" ): """simple docstring""" UpperCAmelCase = BeautifulSoup(requests.get(url + location ).content , """html.parser""" ) # This attribute finds out all the specifics listed in a job for job in soup.find_all("""div""" , attrs={"""data-tn-component""": """organicJob"""} ): UpperCAmelCase = job.find("""a""" , attrs={"""data-tn-element""": """jobTitle"""} ).text.strip() UpperCAmelCase = job.find("""span""" , {"""class""": """company"""} ).text.strip() yield job_title, company_name if __name__ == "__main__": for i, job in enumerate(fetch_jobs("""Bangalore"""), 1): print(F"""Job {i:>2} is {job[0]} at {job[1]}""")
74
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) _UpperCamelCase = {"""configuration_fnet""": ["""FNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FNetConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = ["""FNetTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = ["""FNetTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ """FNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """FNetForMaskedLM""", """FNetForMultipleChoice""", """FNetForNextSentencePrediction""", """FNetForPreTraining""", """FNetForQuestionAnswering""", """FNetForSequenceClassification""", """FNetForTokenClassification""", """FNetLayer""", """FNetModel""", """FNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys _UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
700
"""simple docstring""" import unittest from transformers.utils.backbone_utils import ( BackboneMixin, get_aligned_output_features_output_indices, verify_out_features_out_indices, ) class lowerCamelCase__ ( unittest.TestCase ): def _UpperCamelCase ( self ): UpperCAmelCase = ["""a""", """b""", """c"""] # Defaults to last layer if both are None UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(A ,A ,A ) self.assertEqual(A ,["""c"""] ) self.assertEqual(A ,[2] ) # Out indices set to match out features UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(["""a""", """c"""] ,A ,A ) self.assertEqual(A ,["""a""", """c"""] ) self.assertEqual(A ,[0, 2] ) # Out features set to match out indices UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(A ,[0, 2] ,A ) self.assertEqual(A ,["""a""", """c"""] ) self.assertEqual(A ,[0, 2] ) # Out features selected from negative indices UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(A ,[-3, -1] ,A ) self.assertEqual(A ,["""a""", """c"""] ) self.assertEqual(A ,[-3, -1] ) def _UpperCamelCase ( self ): # Stage names must be set with self.assertRaises(A ): verify_out_features_out_indices(["""a""", """b"""] ,(0, 1) ,A ) # Out features must be a list with self.assertRaises(A ): verify_out_features_out_indices(("""a""", """b""") ,(0, 1) ,["""a""", """b"""] ) # Out features must be a subset of stage names with self.assertRaises(A ): verify_out_features_out_indices(["""a""", """b"""] ,(0, 1) ,["""a"""] ) # Out indices must be a list or tuple with self.assertRaises(A ): verify_out_features_out_indices(A ,0 ,["""a""", """b"""] ) # Out indices must be a subset of stage names with self.assertRaises(A ): verify_out_features_out_indices(A ,(0, 1) ,["""a"""] ) # Out features and out indices must be the same length with self.assertRaises(A ): verify_out_features_out_indices(["""a""", """b"""] ,(0,) ,["""a""", """b""", """c"""] ) # Out features should match out indices with self.assertRaises(A ): verify_out_features_out_indices(["""a""", """b"""] ,(0, 2) ,["""a""", """b""", """c"""] ) # Out features and out indices should be in order with self.assertRaises(A ): verify_out_features_out_indices(["""b""", """a"""] ,(0, 1) ,["""a""", """b"""] ) # Check passes with valid inputs verify_out_features_out_indices(["""a""", """b""", """d"""] ,(0, 1, -1) ,["""a""", """b""", """c""", """d"""] ) def _UpperCamelCase ( self ): UpperCAmelCase = BackboneMixin() UpperCAmelCase = ["""a""", """b""", """c"""] UpperCAmelCase = ["""a""", """c"""] UpperCAmelCase = [0, 2] # Check that the output features and indices are set correctly self.assertEqual(backbone.out_features ,["""a""", """c"""] ) self.assertEqual(backbone.out_indices ,[0, 2] ) # Check out features and indices are updated correctly UpperCAmelCase = ["""a""", """b"""] self.assertEqual(backbone.out_features ,["""a""", """b"""] ) self.assertEqual(backbone.out_indices ,[0, 1] ) UpperCAmelCase = [-3, -1] self.assertEqual(backbone.out_features ,["""a""", """c"""] ) self.assertEqual(backbone.out_indices ,[-3, -1] )
74
0
"""simple docstring""" import numpy as np # Importing the Keras libraries and packages import tensorflow as tf from tensorflow.keras import layers, models if __name__ == "__main__": # Initialising the CNN # (Sequential- Building the model layer by layer) _UpperCamelCase = models.Sequential() # Step 1 - Convolution # Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel # (3,3) is the kernel size (filter matrix) classifier.add( layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation="""relu""") ) # Step 2 - Pooling classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Adding a second convolutional layer classifier.add(layers.ConvaD(32, (3, 3), activation="""relu""")) classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Step 3 - Flattening classifier.add(layers.Flatten()) # Step 4 - Full connection classifier.add(layers.Dense(units=128, activation="""relu""")) classifier.add(layers.Dense(units=1, activation="""sigmoid""")) # Compiling the CNN classifier.compile( optimizer="""adam""", loss="""binary_crossentropy""", metrics=["""accuracy"""] ) # Part 2 - Fitting the CNN to the images # Load Trained model weights # from keras.models import load_model # regressor=load_model('cnn.h5') _UpperCamelCase = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) _UpperCamelCase = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255) _UpperCamelCase = train_datagen.flow_from_directory( """dataset/training_set""", target_size=(64, 64), batch_size=32, class_mode="""binary""" ) _UpperCamelCase = test_datagen.flow_from_directory( """dataset/test_set""", target_size=(64, 64), batch_size=32, class_mode="""binary""" ) classifier.fit_generator( training_set, steps_per_epoch=5, epochs=30, validation_data=test_set ) classifier.save("""cnn.h5""") # Part 3 - Making new predictions _UpperCamelCase = tf.keras.preprocessing.image.load_img( """dataset/single_prediction/image.png""", target_size=(64, 64) ) _UpperCamelCase = tf.keras.preprocessing.image.img_to_array(test_image) _UpperCamelCase = np.expand_dims(test_image, axis=0) _UpperCamelCase = classifier.predict(test_image) # training_set.class_indices if result[0][0] == 0: _UpperCamelCase = """Normal""" if result[0][0] == 1: _UpperCamelCase = """Abnormality detected"""
701
"""simple docstring""" from __future__ import annotations from typing import Any class lowerCamelCase__ : def __init__( self ,A = 6 ): UpperCAmelCase = None UpperCAmelCase = None self.create_linked_list(A ) def _UpperCamelCase ( self ,A ): UpperCAmelCase = Node() UpperCAmelCase = current_node UpperCAmelCase = current_node UpperCAmelCase = current_node for _ in range(1 ,A ): UpperCAmelCase = Node() UpperCAmelCase = current_node UpperCAmelCase = previous_node UpperCAmelCase = current_node UpperCAmelCase = self.front UpperCAmelCase = previous_node def _UpperCamelCase ( self ): return ( self.front == self.rear and self.front is not None and self.front.data is None ) def _UpperCamelCase ( self ): self.check_can_perform_operation() return self.front.data if self.front else None def _UpperCamelCase ( self ,A ): if self.rear is None: return self.check_is_full() if not self.is_empty(): UpperCAmelCase = self.rear.next if self.rear: UpperCAmelCase = data def _UpperCamelCase ( self ): self.check_can_perform_operation() if self.rear is None or self.front is None: return None if self.front == self.rear: UpperCAmelCase = self.front.data UpperCAmelCase = None return data UpperCAmelCase = self.front UpperCAmelCase = old_front.next UpperCAmelCase = old_front.data UpperCAmelCase = None return data def _UpperCamelCase ( self ): if self.is_empty(): raise Exception("""Empty Queue""" ) def _UpperCamelCase ( self ): if self.rear and self.rear.next == self.front: raise Exception("""Full Queue""" ) class lowerCamelCase__ : def __init__( self ): UpperCAmelCase = None UpperCAmelCase = None UpperCAmelCase = None if __name__ == "__main__": import doctest doctest.testmod()
74
0
"""simple docstring""" import unittest from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin _UpperCamelCase = get_tests_dir("""fixtures/spiece.model""") @require_sentencepiece @require_tokenizers class lowerCamelCase__ ( snake_case , unittest.TestCase ): SCREAMING_SNAKE_CASE = DebertaVaTokenizer SCREAMING_SNAKE_CASE = DebertaVaTokenizerFast SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = True def _UpperCamelCase ( self ): super().setUp() # We have a SentencePiece fixture for testing UpperCAmelCase = DebertaVaTokenizer(A ,unk_token="""<unk>""" ) tokenizer.save_pretrained(self.tmpdirname ) def _UpperCamelCase ( self ,A ): UpperCAmelCase = """this is a test""" UpperCAmelCase = """this is a test""" return input_text, output_text def _UpperCamelCase ( self ): UpperCAmelCase = """<pad>""" UpperCAmelCase = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) ,A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) ,A ) def _UpperCamelCase ( self ): UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,"""<pad>""" ) self.assertEqual(vocab_keys[1] ,"""<unk>""" ) self.assertEqual(vocab_keys[-1] ,"""[PAD]""" ) self.assertEqual(len(A ) ,30_001 ) def _UpperCamelCase ( self ): self.assertEqual(self.get_tokenizer().vocab_size ,30_000 ) def _UpperCamelCase ( self ): # fmt: off UpperCAmelCase = """ \tHeLLo!how \n Are yoU? """ UpperCAmelCase = ["""▁hello""", """!""", """how""", """▁are""", """▁you""", """?"""] # fmt: on UpperCAmelCase = DebertaVaTokenizer(A ,do_lower_case=A ) UpperCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(A ,add_special_tokens=A ) ) self.assertListEqual(A ,A ) UpperCAmelCase = DebertaVaTokenizerFast(A ,do_lower_case=A ) UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A ,add_special_tokens=A ) ) self.assertListEqual(A ,A ) @unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" ) def _UpperCamelCase ( self ): pass @unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" ) def _UpperCamelCase ( self ): pass def _UpperCamelCase ( self ): # fmt: off UpperCAmelCase = """I was born in 92000, and this is falsé.""" UpperCAmelCase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ] # fmt: on UpperCAmelCase = DebertaVaTokenizer(A ,split_by_punct=A ) UpperCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(A ,add_special_tokens=A ) ) self.assertListEqual(A ,A ) UpperCAmelCase = DebertaVaTokenizerFast(A ,split_by_punct=A ) UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A ,add_special_tokens=A ) ) self.assertListEqual(A ,A ) def _UpperCamelCase ( self ): # fmt: off UpperCAmelCase = """I was born in 92000, and this is falsé.""" UpperCAmelCase = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ] # fmt: on UpperCAmelCase = DebertaVaTokenizer(A ,do_lower_case=A ,split_by_punct=A ) UpperCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(A ,add_special_tokens=A ) ) self.assertListEqual(A ,A ) UpperCAmelCase = DebertaVaTokenizerFast(A ,do_lower_case=A ,split_by_punct=A ) UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A ,add_special_tokens=A ) ) self.assertListEqual(A ,A ) def _UpperCamelCase ( self ): # fmt: off UpperCAmelCase = """I was born in 92000, and this is falsé.""" UpperCAmelCase = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ] # fmt: on UpperCAmelCase = DebertaVaTokenizer(A ,do_lower_case=A ,split_by_punct=A ) UpperCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(A ,add_special_tokens=A ) ) self.assertListEqual(A ,A ) UpperCAmelCase = DebertaVaTokenizerFast(A ,do_lower_case=A ,split_by_punct=A ) UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A ,add_special_tokens=A ) ) self.assertListEqual(A ,A ) def _UpperCamelCase ( self ): # fmt: off UpperCAmelCase = """I was born in 92000, and this is falsé.""" UpperCAmelCase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ] # fmt: on UpperCAmelCase = DebertaVaTokenizer(A ,do_lower_case=A ,split_by_punct=A ) UpperCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(A ,add_special_tokens=A ) ) self.assertListEqual(A ,A ) UpperCAmelCase = DebertaVaTokenizerFast(A ,do_lower_case=A ,split_by_punct=A ) UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A ,add_special_tokens=A ) ) self.assertListEqual(A ,A ) def _UpperCamelCase ( self ): # fmt: off UpperCAmelCase = """ \tHeLLo!how \n Are yoU? """ UpperCAmelCase = ["""▁""", """<unk>""", """e""", """<unk>""", """o""", """!""", """how""", """▁""", """<unk>""", """re""", """▁yo""", """<unk>""", """?"""] # fmt: on UpperCAmelCase = DebertaVaTokenizer(A ,do_lower_case=A ,split_by_punct=A ) UpperCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(A ,add_special_tokens=A ) ) self.assertListEqual(A ,A ) UpperCAmelCase = DebertaVaTokenizerFast(A ,do_lower_case=A ,split_by_punct=A ) UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A ,add_special_tokens=A ) ) self.assertListEqual(A ,A ) def _UpperCamelCase ( self ): UpperCAmelCase = self.get_tokenizer() UpperCAmelCase = self.get_rust_tokenizer() UpperCAmelCase = """I was born in 92000, and this is falsé.""" UpperCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(A ,add_special_tokens=A ) ) UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A ,add_special_tokens=A ) ) self.assertListEqual(A ,A ) UpperCAmelCase = tokenizer.encode(A ,add_special_tokens=A ) UpperCAmelCase = rust_tokenizer.encode(A ,add_special_tokens=A ) self.assertListEqual(A ,A ) UpperCAmelCase = self.get_rust_tokenizer() UpperCAmelCase = tokenizer.encode(A ) UpperCAmelCase = rust_tokenizer.encode(A ) self.assertListEqual(A ,A ) def _UpperCamelCase ( self ): UpperCAmelCase = """This is a test""" UpperCAmelCase = [13, 1, 4_398, 25, 21, 1_289] UpperCAmelCase = ["""▁""", """T""", """his""", """▁is""", """▁a""", """▁test"""] UpperCAmelCase = ["""▁""", """<unk>""", """his""", """▁is""", """▁a""", """▁test"""] UpperCAmelCase = DebertaVaTokenizer(A ,keep_accents=A ) UpperCAmelCase = DebertaVaTokenizerFast(A ,keep_accents=A ) UpperCAmelCase = tokenizer.encode(A ,add_special_tokens=A ) self.assertListEqual(A ,A ) UpperCAmelCase = tokenizer.tokenize(A ) self.assertListEqual(A ,A ) UpperCAmelCase = tokenizer.convert_ids_to_tokens(A ) self.assertListEqual(A ,A ) UpperCAmelCase = rust_tokenizer.encode(A ,add_special_tokens=A ) self.assertListEqual(A ,A ) UpperCAmelCase = rust_tokenizer.tokenize(A ) self.assertListEqual(A ,A ) UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(A ) self.assertListEqual(A ,A ) # fmt: off UpperCAmelCase = """I was born in 92000, and this is falsé.""" UpperCAmelCase = [13, 1, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9] UpperCAmelCase = ["""▁""", """I""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """.""", ] UpperCAmelCase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ] # fmt: on UpperCAmelCase = tokenizer.encode(A ,add_special_tokens=A ) self.assertListEqual(A ,A ) UpperCAmelCase = tokenizer.tokenize(A ) self.assertListEqual(A ,A ) UpperCAmelCase = tokenizer.convert_ids_to_tokens(A ) self.assertListEqual(A ,A ) UpperCAmelCase = rust_tokenizer.encode(A ,add_special_tokens=A ) self.assertListEqual(A ,A ) UpperCAmelCase = rust_tokenizer.tokenize(A ) self.assertListEqual(A ,A ) UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(A ) self.assertListEqual(A ,A ) def _UpperCamelCase ( self ): UpperCAmelCase = DebertaVaTokenizer(A ) UpperCAmelCase = tokenizer.encode("""sequence builders""" ) UpperCAmelCase = tokenizer.encode("""multi-sequence build""" ) UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(A ) UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(A ,A ) self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] ,A ) self.assertEqual( [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] ,A ,) @slow def _UpperCamelCase ( self ): # fmt: off UpperCAmelCase = {"""input_ids""": [[1, 39_867, 36, 19_390, 486, 27, 35_052, 81_436, 18, 60_685, 1_225, 7, 35_052, 81_436, 18, 9_367, 16_899, 18, 15_937, 53, 594, 773, 18, 16_287, 30_465, 36, 15_937, 6, 41_139, 38, 36_979, 60_763, 191, 6, 34_132, 99, 6, 50_538, 390, 43_230, 6, 34_132, 2_779, 20_850, 14, 699, 1_072, 1_194, 36, 382, 10_901, 53, 7, 699, 1_072, 2_084, 36, 20_422, 630, 53, 19, 105, 3_049, 1_896, 1_053, 16_899, 1_506, 11, 37_978, 4_243, 7, 1_237, 31_869, 200, 16_566, 654, 6, 35_052, 81_436, 7, 55_630, 13_593, 4, 2], [1, 26, 15_011, 13, 667, 8, 1_053, 18, 23_611, 1_237, 72_356, 12_820, 34, 104_134, 1_209, 35, 13_313, 6_627, 21, 202, 347, 7, 164, 2_399, 11, 46, 4_485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_232, 2_864, 15_785, 14_951, 105, 5, 8_581, 1_250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=A ,model_name="""microsoft/deberta-v2-xlarge""" ,revision="""ad6e42c1532ddf3a15c39246b63f5559d558b670""" ,)
702
"""simple docstring""" import argparse import numpy as np import torch from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging logging.set_verbosity_info() _UpperCamelCase = logging.get_logger("""transformers.models.speecht5""") def _a ( _snake_case , _snake_case , _snake_case ): """simple docstring""" hf_model.apply_weight_norm() UpperCAmelCase = checkpoint["""input_conv.weight_g"""] UpperCAmelCase = checkpoint["""input_conv.weight_v"""] UpperCAmelCase = checkpoint["""input_conv.bias"""] for i in range(len(config.upsample_rates ) ): UpperCAmelCase = checkpoint[F'''upsamples.{i}.1.weight_g'''] UpperCAmelCase = checkpoint[F'''upsamples.{i}.1.weight_v'''] UpperCAmelCase = checkpoint[F'''upsamples.{i}.1.bias'''] for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ): for j in range(len(config.resblock_dilation_sizes ) ): UpperCAmelCase = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g'''] UpperCAmelCase = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v'''] UpperCAmelCase = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias'''] UpperCAmelCase = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g'''] UpperCAmelCase = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v'''] UpperCAmelCase = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias'''] UpperCAmelCase = checkpoint["""output_conv.1.weight_g"""] UpperCAmelCase = checkpoint["""output_conv.1.weight_v"""] UpperCAmelCase = checkpoint["""output_conv.1.bias"""] hf_model.remove_weight_norm() @torch.no_grad() def _a ( _snake_case , _snake_case , _snake_case , _snake_case=None , _snake_case=None , ): """simple docstring""" if config_path is not None: UpperCAmelCase = SpeechTaHifiGanConfig.from_pretrained(_snake_case ) else: UpperCAmelCase = SpeechTaHifiGanConfig() UpperCAmelCase = SpeechTaHifiGan(_snake_case ) UpperCAmelCase = torch.load(_snake_case ) load_weights(orig_checkpoint["""model"""]["""generator"""] , _snake_case , _snake_case ) UpperCAmelCase = np.load(_snake_case ) UpperCAmelCase = stats[0].reshape(-1 ) UpperCAmelCase = stats[1].reshape(-1 ) UpperCAmelCase = torch.from_numpy(_snake_case ).float() UpperCAmelCase = torch.from_numpy(_snake_case ).float() model.save_pretrained(_snake_case ) if repo_id: print("""Pushing to the hub...""" ) model.push_to_hub(_snake_case ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""") parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub.""" ) _UpperCamelCase = parser.parse_args() convert_hifigan_checkpoint( args.checkpoint_path, args.stats_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
74
0
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_flax_available, is_torch_available, is_transformers_available, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .multicontrolnet import MultiControlNetModel from .pipeline_controlnet import StableDiffusionControlNetPipeline from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline if is_transformers_available() and is_flax_available(): from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
703
"""simple docstring""" # Lint as: python3 # pylint: enable=line-too-long # pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position _UpperCamelCase = """2.13.1""" import platform import pyarrow from packaging import version if version.parse(platform.python_version()) < version.parse("""3.7"""): raise ImportWarning( """To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition.""" ) if version.parse(pyarrow.__version__).major < 8: raise ImportWarning( """To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n""" """If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.""" ) del platform del pyarrow del version from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip _UpperCamelCase = concatenate_datasets _UpperCamelCase = DownloadConfig _UpperCamelCase = DownloadManager _UpperCamelCase = DownloadMode _UpperCamelCase = DownloadConfig _UpperCamelCase = DownloadMode _UpperCamelCase = DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
74
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = { """hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""", # See all YOLOS models at https://huggingface.co/models?filter=yolos } class lowerCamelCase__ ( snake_case ): SCREAMING_SNAKE_CASE = '''yolos''' def __init__( self ,A=768 ,A=12 ,A=12 ,A=3_072 ,A="gelu" ,A=0.0 ,A=0.0 ,A=0.02 ,A=1e-1_2 ,A=[512, 864] ,A=16 ,A=3 ,A=True ,A=100 ,A=True ,A=False ,A=1 ,A=5 ,A=2 ,A=5 ,A=2 ,A=0.1 ,**A ,): super().__init__(**A ) UpperCAmelCase = hidden_size UpperCAmelCase = num_hidden_layers UpperCAmelCase = num_attention_heads UpperCAmelCase = intermediate_size UpperCAmelCase = hidden_act UpperCAmelCase = hidden_dropout_prob UpperCAmelCase = attention_probs_dropout_prob UpperCAmelCase = initializer_range UpperCAmelCase = layer_norm_eps UpperCAmelCase = image_size UpperCAmelCase = patch_size UpperCAmelCase = num_channels UpperCAmelCase = qkv_bias UpperCAmelCase = num_detection_tokens UpperCAmelCase = use_mid_position_embeddings UpperCAmelCase = auxiliary_loss # Hungarian matcher UpperCAmelCase = class_cost UpperCAmelCase = bbox_cost UpperCAmelCase = giou_cost # Loss coefficients UpperCAmelCase = bbox_loss_coefficient UpperCAmelCase = giou_loss_coefficient UpperCAmelCase = eos_coefficient class lowerCamelCase__ ( snake_case ): SCREAMING_SNAKE_CASE = version.parse('''1.11''' ) @property def _UpperCamelCase ( self ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def _UpperCamelCase ( self ): return 1e-4 @property def _UpperCamelCase ( self ): return 12
704
"""simple docstring""" def _a ( _snake_case ): """simple docstring""" if not isinstance(_snake_case , _snake_case ): raise ValueError("""Input must be an integer""" ) if input_num <= 0: raise ValueError("""Input must be positive""" ) return sum( divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 ) if __name__ == "__main__": import doctest doctest.testmod()
74
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) _UpperCamelCase = { """configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""], """processing_layoutlmv2""": ["""LayoutLMv2Processor"""], """tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = ["""LayoutLMv2TokenizerFast"""] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = ["""LayoutLMv2FeatureExtractor"""] _UpperCamelCase = ["""LayoutLMv2ImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ """LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""", """LayoutLMv2ForQuestionAnswering""", """LayoutLMv2ForSequenceClassification""", """LayoutLMv2ForTokenClassification""", """LayoutLMv2Layer""", """LayoutLMv2Model""", """LayoutLMv2PreTrainedModel""", ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys _UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
705
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForMaskedImageModeling, HfArgumentParser, Trainer, TrainingArguments, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version _UpperCamelCase = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.31.0""") require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""") _UpperCamelCase = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys()) _UpperCamelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class lowerCamelCase__ : SCREAMING_SNAKE_CASE = field( default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , ) SCREAMING_SNAKE_CASE = field(default=snake_case , metadata={'''help''': '''A folder containing the training data.'''} ) SCREAMING_SNAKE_CASE = field(default=snake_case , metadata={'''help''': '''A folder containing the validation data.'''} ) SCREAMING_SNAKE_CASE = field( default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} ) SCREAMING_SNAKE_CASE = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} ) SCREAMING_SNAKE_CASE = field( default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def _UpperCamelCase ( self ): UpperCAmelCase = {} if self.train_dir is not None: UpperCAmelCase = self.train_dir if self.validation_dir is not None: UpperCAmelCase = self.validation_dir UpperCAmelCase = data_files if data_files else None @dataclass class lowerCamelCase__ : SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={ '''help''': ( '''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a ''' '''checkpoint identifier on the hub. ''' '''Don\'t set if you want to train a model from scratch.''' ) } , ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(snake_case )} , ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } , ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , ) SCREAMING_SNAKE_CASE = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) SCREAMING_SNAKE_CASE = field(default=snake_case , metadata={'''help''': '''Name or path of preprocessor config.'''} ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={ '''help''': ( '''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.''' ) } , ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={ '''help''': ( '''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.''' ) } , ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''Stride to use for the encoder.'''} , ) class lowerCamelCase__ : def __init__( self ,A=192 ,A=32 ,A=4 ,A=0.6 ): UpperCAmelCase = input_size UpperCAmelCase = mask_patch_size UpperCAmelCase = model_patch_size UpperCAmelCase = mask_ratio if self.input_size % self.mask_patch_size != 0: raise ValueError("""Input size must be divisible by mask patch size""" ) if self.mask_patch_size % self.model_patch_size != 0: raise ValueError("""Mask patch size must be divisible by model patch size""" ) UpperCAmelCase = self.input_size // self.mask_patch_size UpperCAmelCase = self.mask_patch_size // self.model_patch_size UpperCAmelCase = self.rand_size**2 UpperCAmelCase = int(np.ceil(self.token_count * self.mask_ratio ) ) def __call__( self ): UpperCAmelCase = np.random.permutation(self.token_count )[: self.mask_count] UpperCAmelCase = np.zeros(self.token_count ,dtype=A ) UpperCAmelCase = 1 UpperCAmelCase = mask.reshape((self.rand_size, self.rand_size) ) UpperCAmelCase = mask.repeat(self.scale ,axis=0 ).repeat(self.scale ,axis=1 ) return torch.tensor(mask.flatten() ) def _a ( _snake_case ): """simple docstring""" UpperCAmelCase = torch.stack([example["""pixel_values"""] for example in examples] ) UpperCAmelCase = torch.stack([example["""mask"""] for example in examples] ) return {"pixel_values": pixel_values, "bool_masked_pos": mask} def _a ( ): """simple docstring""" UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_mim""" , _snake_case , _snake_case ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() UpperCAmelCase = training_args.get_process_log_level() logger.setLevel(_snake_case ) transformers.utils.logging.set_verbosity(_snake_case ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. UpperCAmelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: UpperCAmelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Initialize our dataset. UpperCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. UpperCAmelCase = None if """validation""" in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , _snake_case ) and data_args.train_val_split > 0.0: UpperCAmelCase = ds["""train"""].train_test_split(data_args.train_val_split ) UpperCAmelCase = split["""train"""] UpperCAmelCase = split["""test"""] # Create config # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. UpperCAmelCase = { """cache_dir""": model_args.cache_dir, """revision""": model_args.model_revision, """use_auth_token""": True if model_args.use_auth_token else None, } if model_args.config_name_or_path: UpperCAmelCase = AutoConfig.from_pretrained(model_args.config_name_or_path , **_snake_case ) elif model_args.model_name_or_path: UpperCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , **_snake_case ) else: UpperCAmelCase = CONFIG_MAPPING[model_args.model_type]() logger.warning("""You are instantiating a new config instance from scratch.""" ) if model_args.config_overrides is not None: logger.info(F'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(F'''New config: {config}''' ) # make sure the decoder_type is "simmim" (only relevant for BEiT) if hasattr(_snake_case , """decoder_type""" ): UpperCAmelCase = """simmim""" # adapt config UpperCAmelCase = model_args.image_size if model_args.image_size is not None else config.image_size UpperCAmelCase = model_args.patch_size if model_args.patch_size is not None else config.patch_size UpperCAmelCase = ( model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride ) config.update( { """image_size""": model_args.image_size, """patch_size""": model_args.patch_size, """encoder_stride""": model_args.encoder_stride, } ) # create image processor if model_args.image_processor_name: UpperCAmelCase = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **_snake_case ) elif model_args.model_name_or_path: UpperCAmelCase = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **_snake_case ) else: UpperCAmelCase = { conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items() } UpperCAmelCase = IMAGE_PROCESSOR_TYPES[model_args.model_type]() # create model if model_args.model_name_or_path: UpperCAmelCase = AutoModelForMaskedImageModeling.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("""Training new model from scratch""" ) UpperCAmelCase = AutoModelForMaskedImageModeling.from_config(_snake_case ) if training_args.do_train: UpperCAmelCase = ds["""train"""].column_names else: UpperCAmelCase = ds["""validation"""].column_names if data_args.image_column_name is not None: UpperCAmelCase = data_args.image_column_name elif "image" in column_names: UpperCAmelCase = """image""" elif "img" in column_names: UpperCAmelCase = """img""" else: UpperCAmelCase = column_names[0] # transformations as done in original SimMIM paper # source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py UpperCAmelCase = Compose( [ Lambda(lambda _snake_case : img.convert("""RGB""" ) if img.mode != "RGB" else img ), RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) # create mask generator UpperCAmelCase = MaskGenerator( input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , ) def preprocess_images(_snake_case ): UpperCAmelCase = [transforms(_snake_case ) for image in examples[image_column_name]] UpperCAmelCase = [mask_generator() for i in range(len(examples[image_column_name] ) )] return examples if training_args.do_train: if "train" not in ds: raise ValueError("""--do_train requires a train dataset""" ) if data_args.max_train_samples is not None: UpperCAmelCase = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(_snake_case ) if training_args.do_eval: if "validation" not in ds: raise ValueError("""--do_eval requires a validation dataset""" ) if data_args.max_eval_samples is not None: UpperCAmelCase = ( ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(_snake_case ) # Initialize our trainer UpperCAmelCase = Trainer( model=_snake_case , args=_snake_case , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=_snake_case , data_collator=_snake_case , ) # Training if training_args.do_train: UpperCAmelCase = None if training_args.resume_from_checkpoint is not None: UpperCAmelCase = training_args.resume_from_checkpoint elif last_checkpoint is not None: UpperCAmelCase = last_checkpoint UpperCAmelCase = trainer.train(resume_from_checkpoint=_snake_case ) trainer.save_model() trainer.log_metrics("""train""" , train_result.metrics ) trainer.save_metrics("""train""" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: UpperCAmelCase = trainer.evaluate() trainer.log_metrics("""eval""" , _snake_case ) trainer.save_metrics("""eval""" , _snake_case ) # Write model card and (optionally) push to hub UpperCAmelCase = { """finetuned_from""": model_args.model_name_or_path, """tasks""": """masked-image-modeling""", """dataset""": data_args.dataset_name, """tags""": ["""masked-image-modeling"""], } if training_args.push_to_hub: trainer.push_to_hub(**_snake_case ) else: trainer.create_model_card(**_snake_case ) if __name__ == "__main__": main()
74
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) _UpperCamelCase = { """configuration_efficientformer""": [ """EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """EfficientFormerConfig""", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = ["""EfficientFormerImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ """EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """EfficientFormerForImageClassification""", """EfficientFormerForImageClassificationWithTeacher""", """EfficientFormerModel""", """EfficientFormerPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ """TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFEfficientFormerForImageClassification""", """TFEfficientFormerForImageClassificationWithTeacher""", """TFEfficientFormerModel""", """TFEfficientFormerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientformer import EfficientFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientformer import ( EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientFormerForImageClassification, EfficientFormerForImageClassificationWithTeacher, EfficientFormerModel, EfficientFormerPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, TFEfficientFormerPreTrainedModel, ) else: import sys _UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
706
"""simple docstring""" import logging import sys from dataclasses import dataclass, field from typing import Any, Dict, List, Optional, Union import librosa import torch from datasets import DatasetDict, load_dataset from packaging import version from torch import nn from transformers import ( HfArgumentParser, Trainer, TrainingArguments, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForPreTraining, is_apex_available, trainer_utils, ) from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices if is_apex_available(): from apex import amp if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""): _UpperCamelCase = True from torch.cuda.amp import autocast _UpperCamelCase = logging.getLogger(__name__) @dataclass class lowerCamelCase__ : SCREAMING_SNAKE_CASE = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''Whether to log verbose messages or not.'''} , ) SCREAMING_SNAKE_CASE = field( default=2.0 , metadata={'''help''': '''Maximum temperature for gumbel softmax.'''} ) SCREAMING_SNAKE_CASE = field( default=0.5 , metadata={'''help''': '''Minimum temperature for gumbel softmax.'''} ) SCREAMING_SNAKE_CASE = field( default=0.99_99_95 , metadata={'''help''': '''Decay of gumbel temperature during training.'''} ) def _a ( _snake_case , _snake_case ): """simple docstring""" logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) UpperCAmelCase = logging.WARNING if model_args.verbose_logging: UpperCAmelCase = logging.DEBUG elif trainer_utils.is_main_process(training_args.local_rank ): UpperCAmelCase = logging.INFO logger.setLevel(_snake_case ) @dataclass class lowerCamelCase__ : SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) SCREAMING_SNAKE_CASE = field( default='''train''' , metadata={ '''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\'''' } , ) SCREAMING_SNAKE_CASE = field( default='''validation''' , metadata={ '''help''': ( '''The name of the validation data set split to use (via the datasets library). Defaults to \'validation\'''' ) } , ) SCREAMING_SNAKE_CASE = field( default='''file''' , metadata={'''help''': '''Column in the dataset that contains speech file path. Defaults to \'file\''''} , ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} ) SCREAMING_SNAKE_CASE = field( default=1 , metadata={ '''help''': '''The percentage of the train set used as validation set in case there\'s no validation split''' } , ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , ) SCREAMING_SNAKE_CASE = field( default=20.0 , metadata={'''help''': '''Filter audio files that are longer than `max_duration_in_seconds` seconds'''} ) @dataclass class lowerCamelCase__ : SCREAMING_SNAKE_CASE = 42 SCREAMING_SNAKE_CASE = 42 SCREAMING_SNAKE_CASE = "longest" SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None def __call__( self ,A ): # reformat list to dict and set to pytorch format UpperCAmelCase = self.feature_extractor.pad( A ,max_length=self.max_length ,padding=self.padding ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors="""pt""" ,) UpperCAmelCase = self.model._get_feat_extract_output_lengths(batch["""input_values"""].shape[-1] ) UpperCAmelCase = batch["""input_values"""].shape[0] # make sure that no loss is computed on padded inputs if batch["attention_mask"] is not None: # compute real output lengths according to convolution formula UpperCAmelCase = self.model._get_feat_extract_output_lengths(batch["""attention_mask"""].sum(-1 ) ).to( torch.long ) UpperCAmelCase = torch.zeros( (batch_size, mask_indices_seq_length) ,dtype=torch.long ,device=batch["""input_values"""].device ) # these two operations makes sure that all values # before the output lengths indices are attended to UpperCAmelCase = 1 UpperCAmelCase = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool() # sample randomly masked indices UpperCAmelCase = _compute_mask_indices( (batch_size, mask_indices_seq_length) ,self.model.config.mask_time_prob ,self.model.config.mask_time_length ,attention_mask=A ,min_masks=2 ,) return batch class lowerCamelCase__ ( snake_case ): def __init__( self ,*A ,A=1 ,A=0 ,A=1.0 ,**A ): super().__init__(*A ,**A ) UpperCAmelCase = 0 UpperCAmelCase = max_gumbel_temp UpperCAmelCase = min_gumbel_temp UpperCAmelCase = gumbel_temp_decay def _UpperCamelCase ( self ,A ,A ): model.train() UpperCAmelCase = self._prepare_inputs(A ) if self.use_amp: with autocast(): UpperCAmelCase = self.compute_loss(A ,A ) else: UpperCAmelCase = self.compute_loss(A ,A ) if self.args.n_gpu > 1 or self.deepspeed: if model.module.config.ctc_loss_reduction == "mean": UpperCAmelCase = loss.mean() elif model.module.config.ctc_loss_reduction == "sum": UpperCAmelCase = loss.sum() / (inputs["""mask_time_indices"""]).sum() else: raise ValueError(F'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' ) if self.args.gradient_accumulation_steps > 1: UpperCAmelCase = loss / self.args.gradient_accumulation_steps if self.use_amp: self.scaler.scale(A ).backward() elif self.use_apex: with amp.scale_loss(A ,self.optimizer ) as scaled_loss: scaled_loss.backward() elif self.deepspeed: self.deepspeed.backward(A ) else: loss.backward() self.num_update_step += 1 # make sure gumbel softmax temperature is decayed if self.args.n_gpu > 1 or self.deepspeed: model.module.set_gumbel_temperature( max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step ,self.min_gumbel_temp ) ) else: model.set_gumbel_temperature( max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step ,self.min_gumbel_temp ) ) return loss.detach() def _a ( ): """simple docstring""" UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses() configure_logger(_snake_case , _snake_case ) # Downloading and loading a dataset from the hub. UpperCAmelCase = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir ) if "validation" not in datasets.keys(): # make sure only "validation" and "train" keys remain" UpperCAmelCase = DatasetDict() UpperCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[:{data_args.validation_split_percentage}%]''' , cache_dir=model_args.cache_dir , ) UpperCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[{data_args.validation_split_percentage}%:]''' , cache_dir=model_args.cache_dir , ) else: # make sure only "validation" and "train" keys remain" UpperCAmelCase = DatasetDict() UpperCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split="""validation""" , cache_dir=model_args.cache_dir , ) UpperCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}''' , cache_dir=model_args.cache_dir , ) # only normalized-inputs-training is supported UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained( model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_snake_case ) def prepare_dataset(_snake_case ): # check that all files have the correct sampling rate UpperCAmelCase , UpperCAmelCase = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate ) return batch # load audio files into numpy arrays UpperCAmelCase = datasets.map( _snake_case , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["""train"""].column_names ) # filter audio files that are too long UpperCAmelCase = vectorized_datasets.filter( lambda _snake_case : len(data["""speech"""] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) ) def normalize(_snake_case ): return feature_extractor(batch["""speech"""] , sampling_rate=feature_extractor.sampling_rate ) # normalize and transform to `BatchFeatures` UpperCAmelCase = vectorized_datasets.map( _snake_case , batched=_snake_case , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["""train"""].column_names , ) # pretraining is only supported for "newer" stable layer norm architecture # apply_spec_augment has to be True, mask_feature_prob has to be 0.0 UpperCAmelCase = WavaVecaConfig.from_pretrained( model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , ) if not config.do_stable_layer_norm or config.feat_extract_norm != "layer": raise ValueError( """PreTraining is only supported for ``config.do_stable_layer_norm=True`` and""" """ ``config.feat_extract_norm='layer'""" ) UpperCAmelCase = WavaVecaForPreTraining(_snake_case ) UpperCAmelCase = DataCollatorForWavaVecaPretraining(model=_snake_case , feature_extractor=_snake_case ) UpperCAmelCase = WavaVecaPreTrainer( model=_snake_case , data_collator=_snake_case , args=_snake_case , train_dataset=vectorized_datasets["""train"""] , eval_dataset=vectorized_datasets["""validation"""] , tokenizer=_snake_case , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , ) trainer.train() if __name__ == "__main__": main()
74
0
"""simple docstring""" import os import sys import unittest _UpperCamelCase : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path _UpperCamelCase : Optional[Any] = os.path.join(git_repo_path, """src""", """diffusers""") class lowerCamelCase__ ( unittest.TestCase ): def _UpperCamelCase ( self ): UpperCAmelCase = find_backend(""" if not is_torch_available():""" ) self.assertEqual(A ,"""torch""" ) # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") UpperCAmelCase = find_backend(""" if not (is_torch_available() and is_transformers_available()):""" ) self.assertEqual(A ,"""torch_and_transformers""" ) # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") UpperCAmelCase = find_backend( """ if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""" ) self.assertEqual(A ,"""torch_and_transformers_and_onnx""" ) def _UpperCamelCase ( self ): UpperCAmelCase = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("""torch""" ,A ) self.assertIn("""torch_and_transformers""" ,A ) self.assertIn("""flax_and_transformers""" ,A ) self.assertIn("""torch_and_transformers_and_onnx""" ,A ) # Likewise, we can't assert on the exact content of a key self.assertIn("""UNet2DModel""" ,objects["""torch"""] ) self.assertIn("""FlaxUNet2DConditionModel""" ,objects["""flax"""] ) self.assertIn("""StableDiffusionPipeline""" ,objects["""torch_and_transformers"""] ) self.assertIn("""FlaxStableDiffusionPipeline""" ,objects["""flax_and_transformers"""] ) self.assertIn("""LMSDiscreteScheduler""" ,objects["""torch_and_scipy"""] ) self.assertIn("""OnnxStableDiffusionPipeline""" ,objects["""torch_and_transformers_and_onnx"""] ) def _UpperCamelCase ( self ): UpperCAmelCase = create_dummy_object("""CONSTANT""" ,"""'torch'""" ) self.assertEqual(A ,"""\nCONSTANT = None\n""" ) UpperCAmelCase = create_dummy_object("""function""" ,"""'torch'""" ) self.assertEqual( A ,"""\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" ) UpperCAmelCase = """ class FakeClass(metaclass=DummyObject): _backends = 'torch' def __init__(self, *args, **kwargs): requires_backends(self, 'torch') @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, 'torch') @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, 'torch') """ UpperCAmelCase = create_dummy_object("""FakeClass""" ,"""'torch'""" ) self.assertEqual(A ,A ) def _UpperCamelCase ( self ): UpperCAmelCase = """# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends CONSTANT = None def function(*args, **kwargs): requires_backends(function, [\"torch\"]) class FakeClass(metaclass=DummyObject): _backends = [\"torch\"] def __init__(self, *args, **kwargs): requires_backends(self, [\"torch\"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, [\"torch\"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, [\"torch\"]) """ UpperCAmelCase = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} ) self.assertEqual(dummy_files["""torch"""] ,A )
707
"""simple docstring""" from __future__ import annotations import random import unittest from transformers import TransfoXLConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLModel, ) class lowerCamelCase__ : def __init__( self ,A ,): UpperCAmelCase = parent UpperCAmelCase = 13 UpperCAmelCase = 7 UpperCAmelCase = 30 UpperCAmelCase = self.seq_length + self.mem_len UpperCAmelCase = 15 UpperCAmelCase = True UpperCAmelCase = True UpperCAmelCase = 99 UpperCAmelCase = [10, 50, 80] UpperCAmelCase = 32 UpperCAmelCase = 32 UpperCAmelCase = 4 UpperCAmelCase = 8 UpperCAmelCase = 128 UpperCAmelCase = 2 UpperCAmelCase = 2 UpperCAmelCase = None UpperCAmelCase = 1 UpperCAmelCase = 0 UpperCAmelCase = 3 UpperCAmelCase = self.vocab_size - 1 UpperCAmelCase = 0.01 def _UpperCamelCase ( self ): UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) UpperCAmelCase = None if self.use_labels: UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) UpperCAmelCase = TransfoXLConfig( vocab_size=self.vocab_size ,mem_len=self.mem_len ,clamp_len=self.clamp_len ,cutoffs=self.cutoffs ,d_model=self.hidden_size ,d_embed=self.d_embed ,n_head=self.num_attention_heads ,d_head=self.d_head ,d_inner=self.d_inner ,div_val=self.div_val ,n_layer=self.num_hidden_layers ,eos_token_id=self.eos_token_id ,pad_token_id=self.vocab_size - 1 ,init_range=self.init_range ,num_labels=self.num_labels ,) return (config, input_ids_a, input_ids_a, lm_labels) def _UpperCamelCase ( self ): random.seed(self.seed ) tf.random.set_seed(self.seed ) def _UpperCamelCase ( self ,A ,A ,A ,A ): UpperCAmelCase = TFTransfoXLModel(A ) UpperCAmelCase , UpperCAmelCase = model(A ).to_tuple() UpperCAmelCase = {"""input_ids""": input_ids_a, """mems""": mems_a} UpperCAmelCase , UpperCAmelCase = model(A ).to_tuple() self.parent.assertEqual(hidden_states_a.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(hidden_states_a.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,) self.parent.assertListEqual( [mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,) def _UpperCamelCase ( self ,A ,A ,A ,A ): UpperCAmelCase = TFTransfoXLLMHeadModel(A ) UpperCAmelCase , UpperCAmelCase = model(A ).to_tuple() UpperCAmelCase = {"""input_ids""": input_ids_a, """labels""": lm_labels} UpperCAmelCase , UpperCAmelCase = model(A ).to_tuple() UpperCAmelCase , UpperCAmelCase = model([input_ids_a, mems_a] ).to_tuple() UpperCAmelCase = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels} UpperCAmelCase , UpperCAmelCase = model(A ).to_tuple() self.parent.assertEqual(lm_logits_a.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,) self.parent.assertEqual(lm_logits_a.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,) def _UpperCamelCase ( self ,A ,A ,A ,A ): UpperCAmelCase = TFTransfoXLForSequenceClassification(A ) UpperCAmelCase = model(A ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def _UpperCamelCase ( self ): UpperCAmelCase = self.prepare_config_and_inputs() ((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) = config_and_inputs UpperCAmelCase = {"""input_ids""": input_ids_a} return config, inputs_dict @require_tf class lowerCamelCase__ ( snake_case , snake_case , unittest.TestCase ): SCREAMING_SNAKE_CASE = ( (TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else () ) SCREAMING_SNAKE_CASE = () if is_tf_available() else () SCREAMING_SNAKE_CASE = ( { '''feature-extraction''': TFTransfoXLModel, '''text-classification''': TFTransfoXLForSequenceClassification, '''text-generation''': TFTransfoXLLMHeadModel, '''zero-shot''': TFTransfoXLForSequenceClassification, } if is_tf_available() else {} ) # TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False def _UpperCamelCase ( self ,A ,A ,A ,A ,A ): if pipeline_test_casse_name == "TextGenerationPipelineTests": # Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`. # `TransfoXLConfig` was never used in pipeline tests: cannot create a simple # tokenizer. return True return False def _UpperCamelCase ( self ): UpperCAmelCase = TFTransfoXLModelTester(self ) UpperCAmelCase = ConfigTester(self ,config_class=A ,d_embed=37 ) def _UpperCamelCase ( self ): self.config_tester.run_common_tests() def _UpperCamelCase ( self ): self.model_tester.set_seed() UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_model(*A ) def _UpperCamelCase ( self ): self.model_tester.set_seed() UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_lm_head(*A ) def _UpperCamelCase ( self ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*A ) def _UpperCamelCase ( self ): UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase = [TFTransfoXLForSequenceClassification] for model_class in self.all_model_classes: UpperCAmelCase = model_class(A ) assert isinstance(model.get_input_embeddings() ,tf.keras.layers.Layer ) if model_class in list_other_models_with_output_ebd: UpperCAmelCase = model.get_output_embeddings() assert isinstance(A ,tf.keras.layers.Layer ) UpperCAmelCase = model.get_bias() assert name is None else: UpperCAmelCase = model.get_output_embeddings() assert x is None UpperCAmelCase = model.get_bias() assert name is None def _UpperCamelCase ( self ): # TODO JP: Make TransfoXL XLA compliant pass @slow def _UpperCamelCase ( self ): for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase = TFTransfoXLModel.from_pretrained(A ) self.assertIsNotNone(A ) @unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""" ) def _UpperCamelCase ( self ): pass @require_tf class lowerCamelCase__ ( unittest.TestCase ): @unittest.skip("""Skip test until #12651 is resolved.""" ) @slow def _UpperCamelCase ( self ): UpperCAmelCase = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" ) # fmt: off UpperCAmelCase = tf.convert_to_tensor([[33,1_297,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,22,1_706,17,20_098,5,3_215,21,37,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,6_224,831,16_002,2,8,603,78_967,29_546,23,803,20,25,416,5,8,232,4,277,6,1_855,4_601,3,29_546,54,8,3_609,5,57_211,49,4,1,277,18,8,1_755,15_691,3,341,25,416,693,42_573,71,17,401,94,31,17_919,2,29_546,7_873,18,1,435,23,11_011,755,5,5_167,3,7_983,98,84,2,29_546,3_267,8,3_609,4,1,4_865,1_075,2,6_087,71,6,346,8,5_854,3,29_546,824,1_400,1_868,2,19,160,2,311,8,5_496,2,20_920,17,25,15_097,3,24,24,0]] ,dtype=tf.intaa ) # noqa: E231 # fmt: on # In 1991 , the remains of Russian Tsar Nicholas II and his family # ( except for Alexei and Maria ) are discovered . # The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the # remainder of the story . 1883 Western Siberia , # a young Grigori Rasputin is asked by his father and a group of men to perform magic . # Rasputin has a vision and denounces one of the men as a horse thief . Although his # father initially slaps him for making such an accusation , Rasputin watches as the # man is chased outside and beaten . Twenty years later , Rasputin sees a vision of # the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous , # with people , even a bishop , begging for his blessing . <eod> </s> <eos> # fmt: off UpperCAmelCase = [33,1_297,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,22,1_706,17,20_098,5,3_215,21,37,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,6_224,831,16_002,2,8,603,78_967,29_546,23,803,20,25,416,5,8,232,4,277,6,1_855,4_601,3,29_546,54,8,3_609,5,57_211,49,4,1,277,18,8,1_755,15_691,3,341,25,416,693,42_573,71,17,401,94,31,17_919,2,29_546,7_873,18,1,435,23,11_011,755,5,5_167,3,7_983,98,84,2,29_546,3_267,8,3_609,4,1,4_865,1_075,2,6_087,71,6,346,8,5_854,3,29_546,824,1_400,1_868,2,19,160,2,311,8,5_496,2,20_920,17,25,15_097,3,24,24,0,33,1,1_857,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,28,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,0] # noqa: E231 # fmt: on # In 1991, the remains of Russian Tsar Nicholas II and his family ( # except for Alexei and Maria ) are discovered. The voice of young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story. # 1883 Western Siberia, a young Grigori Rasputin is asked by his father # and a group of men to perform magic. Rasputin has a vision and # denounces one of the men as a horse thief. Although his father initially # slaps him for making such an accusation, Rasputin watches as the man # is chased outside and beaten. Twenty years later, Rasputin sees a vision # of the Virgin Mary, prompting him to become a priest. # Rasputin quickly becomes famous, with people, even a bishop, begging for # his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar # Nicholas II and his family were discovered. The voice of <unk> young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos> UpperCAmelCase = model.generate(A ,max_length=200 ,do_sample=A ) self.assertListEqual(output_ids[0].numpy().tolist() ,A )
74
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = { """google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""", """google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""", """google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""", """google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""", # See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2 } class lowerCamelCase__ ( snake_case ): SCREAMING_SNAKE_CASE = '''mobilenet_v2''' def __init__( self ,A=3 ,A=224 ,A=1.0 ,A=8 ,A=8 ,A=6 ,A=32 ,A=True ,A=True ,A="relu6" ,A=True ,A=0.8 ,A=0.02 ,A=0.001 ,A=255 ,**A ,): super().__init__(**A ) if depth_multiplier <= 0: raise ValueError("""depth_multiplier must be greater than zero.""" ) UpperCAmelCase = num_channels UpperCAmelCase = image_size UpperCAmelCase = depth_multiplier UpperCAmelCase = depth_divisible_by UpperCAmelCase = min_depth UpperCAmelCase = expand_ratio UpperCAmelCase = output_stride UpperCAmelCase = first_layer_is_expansion UpperCAmelCase = finegrained_output UpperCAmelCase = hidden_act UpperCAmelCase = tf_padding UpperCAmelCase = classifier_dropout_prob UpperCAmelCase = initializer_range UpperCAmelCase = layer_norm_eps UpperCAmelCase = semantic_loss_ignore_index class lowerCamelCase__ ( snake_case ): SCREAMING_SNAKE_CASE = version.parse('''1.11''' ) @property def _UpperCamelCase ( self ): return OrderedDict([("""pixel_values""", {0: """batch"""})] ) @property def _UpperCamelCase ( self ): if self.task == "image-classification": return OrderedDict([("""logits""", {0: """batch"""})] ) else: return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] ) @property def _UpperCamelCase ( self ): return 1e-4
708
"""simple docstring""" from math import sqrt def _a ( _snake_case = 100_0000 ): """simple docstring""" UpperCAmelCase = 0 UpperCAmelCase = 0 UpperCAmelCase = 42 while num_cuboids <= limit: max_cuboid_size += 1 for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ): if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer(): num_cuboids += ( min(_snake_case , sum_shortest_sides // 2 ) - max(1 , sum_shortest_sides - max_cuboid_size ) + 1 ) return max_cuboid_size if __name__ == "__main__": print(F"""{solution() = }""")
74
0
"""simple docstring""" import re import tempfile from pathlib import Path import pytest import yaml from datasets.utils.readme import ReadMe # @pytest.fixture # def example_yaml_structure(): _UpperCamelCase = yaml.safe_load( """\ name: \"\" allow_empty: false allow_empty_text: true subsections: - name: \"Dataset Card for X\" # First-level markdown heading allow_empty: false allow_empty_text: true subsections: - name: \"Table of Contents\" allow_empty: false allow_empty_text: false subsections: null - name: \"Dataset Description\" allow_empty: false allow_empty_text: false subsections: - name: \"Dataset Summary\" allow_empty: false allow_empty_text: false subsections: null - name: \"Supported Tasks and Leaderboards\" allow_empty: true allow_empty_text: true subsections: null - name: Languages allow_empty: false allow_empty_text: true subsections: null """ ) _UpperCamelCase = { """name""": """root""", """text""": """""", """is_empty_text""": True, """subsections""": [ { """name""": """Dataset Card for My Dataset""", """text""": """""", """is_empty_text""": True, """subsections""": [ {"""name""": """Table of Contents""", """text""": """Some text here.""", """is_empty_text""": False, """subsections""": []}, { """name""": """Dataset Description""", """text""": """Some text here.""", """is_empty_text""": False, """subsections""": [ { """name""": """Dataset Summary""", """text""": """Some text here.""", """is_empty_text""": False, """subsections""": [], }, { """name""": """Supported Tasks and Leaderboards""", """text""": """""", """is_empty_text""": True, """subsections""": [], }, {"""name""": """Languages""", """text""": """Language Text""", """is_empty_text""": False, """subsections""": []}, ], }, ], } ], } _UpperCamelCase = """\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text """ _UpperCamelCase = """\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. #### Extra Ignored Subsection ### Supported Tasks and Leaderboards ### Languages Language Text """ _UpperCamelCase = { """name""": """root""", """text""": """""", """is_empty_text""": True, """subsections""": [ { """name""": """Dataset Card for My Dataset""", """text""": """""", """is_empty_text""": True, """subsections""": [ {"""name""": """Table of Contents""", """text""": """Some text here.""", """is_empty_text""": False, """subsections""": []}, { """name""": """Dataset Description""", """text""": """Some text here.""", """is_empty_text""": False, """subsections""": [ { """name""": """Dataset Summary""", """text""": """Some text here.""", """is_empty_text""": False, """subsections""": [ { """name""": """Extra Ignored Subsection""", """text""": """""", """is_empty_text""": True, """subsections""": [], } ], }, { """name""": """Supported Tasks and Leaderboards""", """text""": """""", """is_empty_text""": True, """subsections""": [], }, {"""name""": """Languages""", """text""": """Language Text""", """is_empty_text""": False, """subsections""": []}, ], }, ], } ], } _UpperCamelCase = """\ --- --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text """ _UpperCamelCase = ( """The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.""" ) _UpperCamelCase = """\ # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text """ _UpperCamelCase = ( """The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.""" ) _UpperCamelCase = """\ --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text """ _UpperCamelCase = """The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.""" _UpperCamelCase = """\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary ### Supported Tasks and Leaderboards ### Languages Language Text """ _UpperCamelCase = """The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).""" _UpperCamelCase = """\ --- language: - zh - en --- # Dataset Card for My Dataset """ _UpperCamelCase = """The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found 'None'.""" _UpperCamelCase = """\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Languages Language Text """ _UpperCamelCase = """The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.""" _UpperCamelCase = """\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages """ _UpperCamelCase = """The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.""" _UpperCamelCase = """\ --- language: - zh - en --- ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text """ _UpperCamelCase = """The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.""" _UpperCamelCase = """\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text # Dataset Card My Dataset """ _UpperCamelCase = """The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.""" _UpperCamelCase = """\ --- language: - zh - en --- # Dataset Card My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text """ _UpperCamelCase = """The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.""" _UpperCamelCase = """""" _UpperCamelCase = """The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.""" _UpperCamelCase = """\ --- language: - zh - en --- # Dataset Card for My Dataset # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text """ _UpperCamelCase = """The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.""" @pytest.mark.parametrize( """readme_md, expected_dict""" , [ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] , ) def _a ( _snake_case , _snake_case ): """simple docstring""" assert ReadMe.from_string(_snake_case , _snake_case ).to_dict() == expected_dict @pytest.mark.parametrize( """readme_md, expected_error""" , [ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] , ) def _a ( _snake_case , _snake_case ): """simple docstring""" with pytest.raises(_snake_case , match=re.escape(expected_error.format(path="""root""" ) ) ): UpperCAmelCase = ReadMe.from_string(_snake_case , _snake_case ) readme.validate() @pytest.mark.parametrize( """readme_md, expected_error""" , [ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] , ) def _a ( _snake_case , _snake_case ): """simple docstring""" with pytest.raises(_snake_case , match=re.escape(expected_error.format(path="""root""" ) ) ): ReadMe.from_string(_snake_case , _snake_case ) @pytest.mark.parametrize( """readme_md,""" , [ (README_MULTIPLE_SAME_HEADING_1), ] , ) def _a ( _snake_case ): """simple docstring""" ReadMe.from_string(_snake_case , _snake_case , suppress_parsing_errors=_snake_case ) @pytest.mark.parametrize( """readme_md, expected_dict""" , [ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] , ) def _a ( _snake_case , _snake_case ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase = Path(_snake_case ) / """README.md""" with open(_snake_case , """w+""" ) as readme_file: readme_file.write(_snake_case ) UpperCAmelCase = ReadMe.from_readme(_snake_case , _snake_case ).to_dict() assert out["name"] == path assert out["text"] == "" assert out["is_empty_text"] assert out["subsections"] == expected_dict["subsections"] @pytest.mark.parametrize( """readme_md, expected_error""" , [ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] , ) def _a ( _snake_case , _snake_case ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase = Path(_snake_case ) / """README.md""" with open(_snake_case , """w+""" ) as readme_file: readme_file.write(_snake_case ) UpperCAmelCase = expected_error.format(path=_snake_case ) with pytest.raises(_snake_case , match=re.escape(_snake_case ) ): UpperCAmelCase = ReadMe.from_readme(_snake_case , _snake_case ) readme.validate() @pytest.mark.parametrize( """readme_md, expected_error""" , [ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] , ) def _a ( _snake_case , _snake_case ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase = Path(_snake_case ) / """README.md""" with open(_snake_case , """w+""" ) as readme_file: readme_file.write(_snake_case ) UpperCAmelCase = expected_error.format(path=_snake_case ) with pytest.raises(_snake_case , match=re.escape(_snake_case ) ): ReadMe.from_readme(_snake_case , _snake_case ) @pytest.mark.parametrize( """readme_md,""" , [ (README_MULTIPLE_SAME_HEADING_1), ] , ) def _a ( _snake_case ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase = Path(_snake_case ) / """README.md""" with open(_snake_case , """w+""" ) as readme_file: readme_file.write(_snake_case ) ReadMe.from_readme(_snake_case , _snake_case , suppress_parsing_errors=_snake_case )
709
"""simple docstring""" import argparse import re import torch from CLAP import create_model from transformers import AutoFeatureExtractor, ClapConfig, ClapModel _UpperCamelCase = { """text_branch""": """text_model""", """audio_branch""": """audio_model.audio_encoder""", """attn""": """attention.self""", """self.proj""": """output.dense""", """attention.self_mask""": """attn_mask""", """mlp.fc1""": """intermediate.dense""", """mlp.fc2""": """output.dense""", """norm1""": """layernorm_before""", """norm2""": """layernorm_after""", """bn0""": """batch_norm""", } _UpperCamelCase = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""") def _a ( _snake_case , _snake_case=False ): """simple docstring""" UpperCAmelCase , UpperCAmelCase = create_model( """HTSAT-tiny""" , """roberta""" , _snake_case , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=_snake_case , fusion_type="""aff_2d""" if enable_fusion else None , ) return model, model_cfg def _a ( _snake_case ): """simple docstring""" UpperCAmelCase = {} UpperCAmelCase = R""".*sequential.(\d+).*""" UpperCAmelCase = R""".*_projection.(\d+).*""" for key, value in state_dict.items(): # check if any key needs to be modified for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: UpperCAmelCase = key.replace(_snake_case , _snake_case ) if re.match(_snake_case , _snake_case ): # replace sequential layers with list UpperCAmelCase = re.match(_snake_case , _snake_case ).group(1 ) UpperCAmelCase = key.replace(F'''sequential.{sequential_layer}.''' , F'''layers.{int(_snake_case )//3}.linear.''' ) elif re.match(_snake_case , _snake_case ): UpperCAmelCase = int(re.match(_snake_case , _snake_case ).group(1 ) ) # Because in CLAP they use `nn.Sequential`... UpperCAmelCase = 1 if projecton_layer == 0 else 2 UpperCAmelCase = key.replace(F'''_projection.{projecton_layer}.''' , F'''_projection.linear{transformers_projection_layer}.''' ) if "audio" and "qkv" in key: # split qkv into query key and value UpperCAmelCase = value UpperCAmelCase = mixed_qkv.size(0 ) // 3 UpperCAmelCase = mixed_qkv[:qkv_dim] UpperCAmelCase = mixed_qkv[qkv_dim : qkv_dim * 2] UpperCAmelCase = mixed_qkv[qkv_dim * 2 :] UpperCAmelCase = query_layer UpperCAmelCase = key_layer UpperCAmelCase = value_layer else: UpperCAmelCase = value return model_state_dict def _a ( _snake_case , _snake_case , _snake_case , _snake_case=False ): """simple docstring""" UpperCAmelCase , UpperCAmelCase = init_clap(_snake_case , enable_fusion=_snake_case ) clap_model.eval() UpperCAmelCase = clap_model.state_dict() UpperCAmelCase = rename_state_dict(_snake_case ) UpperCAmelCase = ClapConfig() UpperCAmelCase = enable_fusion UpperCAmelCase = ClapModel(_snake_case ) # ignore the spectrogram embedding layer model.load_state_dict(_snake_case , strict=_snake_case ) model.save_pretrained(_snake_case ) transformers_config.save_pretrained(_snake_case ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""") _UpperCamelCase = parser.parse_args() convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
74
0
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor @require_vision class lowerCamelCase__ ( unittest.TestCase ): def _UpperCamelCase ( self ): UpperCAmelCase = tempfile.mkdtemp() UpperCAmelCase = [ """[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """的""", """价""", """格""", """是""", """15""", """便""", """alex""", """##andra""", """,""", """。""", """-""", """t""", """shirt""", ] UpperCAmelCase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) UpperCAmelCase = { """do_resize""": True, """size""": {"""height""": 224, """width""": 224}, """do_center_crop""": True, """crop_size""": {"""height""": 18, """width""": 18}, """do_normalize""": True, """image_mean""": [0.48145466, 0.4578275, 0.40821073], """image_std""": [0.26862954, 0.26130258, 0.27577711], """do_convert_rgb""": True, } UpperCAmelCase = os.path.join(self.tmpdirname ,A ) with open(self.image_processor_file ,"""w""" ,encoding="""utf-8""" ) as fp: json.dump(A ,A ) def _UpperCamelCase ( self ,**A ): return BertTokenizer.from_pretrained(self.tmpdirname ,**A ) def _UpperCamelCase ( self ,**A ): return BertTokenizerFast.from_pretrained(self.tmpdirname ,**A ) def _UpperCamelCase ( self ,**A ): return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname ,**A ) def _UpperCamelCase ( self ): shutil.rmtree(self.tmpdirname ) def _UpperCamelCase ( self ): UpperCAmelCase = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )] UpperCAmelCase = [Image.fromarray(np.moveaxis(A ,0 ,-1 ) ) for x in image_inputs] return image_inputs def _UpperCamelCase ( self ): UpperCAmelCase = self.get_tokenizer() UpperCAmelCase = self.get_rust_tokenizer() UpperCAmelCase = self.get_image_processor() UpperCAmelCase = ChineseCLIPProcessor(tokenizer=A ,image_processor=A ) processor_slow.save_pretrained(self.tmpdirname ) UpperCAmelCase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname ,use_fast=A ) UpperCAmelCase = ChineseCLIPProcessor(tokenizer=A ,image_processor=A ) processor_fast.save_pretrained(self.tmpdirname ) UpperCAmelCase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer ,A ) self.assertIsInstance(processor_fast.tokenizer ,A ) self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor ,A ) self.assertIsInstance(processor_fast.image_processor ,A ) def _UpperCamelCase ( self ): UpperCAmelCase = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase = self.get_tokenizer(cls_token="""(CLS)""" ,sep_token="""(SEP)""" ) UpperCAmelCase = self.get_image_processor(do_normalize=A ) UpperCAmelCase = ChineseCLIPProcessor.from_pretrained( self.tmpdirname ,cls_token="""(CLS)""" ,sep_token="""(SEP)""" ,do_normalize=A ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer ,A ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,A ) def _UpperCamelCase ( self ): UpperCAmelCase = self.get_image_processor() UpperCAmelCase = self.get_tokenizer() UpperCAmelCase = ChineseCLIPProcessor(tokenizer=A ,image_processor=A ) UpperCAmelCase = self.prepare_image_inputs() UpperCAmelCase = image_processor(A ,return_tensors="""np""" ) UpperCAmelCase = processor(images=A ,return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 ) def _UpperCamelCase ( self ): UpperCAmelCase = self.get_image_processor() UpperCAmelCase = self.get_tokenizer() UpperCAmelCase = ChineseCLIPProcessor(tokenizer=A ,image_processor=A ) UpperCAmelCase = """Alexandra,T-shirt的价格是15便士。""" UpperCAmelCase = processor(text=A ) UpperCAmelCase = tokenizer(A ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def _UpperCamelCase ( self ): UpperCAmelCase = self.get_image_processor() UpperCAmelCase = self.get_tokenizer() UpperCAmelCase = ChineseCLIPProcessor(tokenizer=A ,image_processor=A ) UpperCAmelCase = """Alexandra,T-shirt的价格是15便士。""" UpperCAmelCase = self.prepare_image_inputs() UpperCAmelCase = processor(text=A ,images=A ) self.assertListEqual(list(inputs.keys() ) ,["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] ) # test if it raises when no input is passed with pytest.raises(A ): processor() def _UpperCamelCase ( self ): UpperCAmelCase = self.get_image_processor() UpperCAmelCase = self.get_tokenizer() UpperCAmelCase = ChineseCLIPProcessor(tokenizer=A ,image_processor=A ) UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] UpperCAmelCase = processor.batch_decode(A ) UpperCAmelCase = tokenizer.batch_decode(A ) self.assertListEqual(A ,A ) def _UpperCamelCase ( self ): UpperCAmelCase = self.get_image_processor() UpperCAmelCase = self.get_tokenizer() UpperCAmelCase = ChineseCLIPProcessor(tokenizer=A ,image_processor=A ) UpperCAmelCase = """Alexandra,T-shirt的价格是15便士。""" UpperCAmelCase = self.prepare_image_inputs() UpperCAmelCase = processor(text=A ,images=A ) self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
710
"""simple docstring""" # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. _UpperCamelCase = abspath(join(dirname(dirname(__file__)), """src""")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="""ignore""", category=FutureWarning) def _a ( _snake_case ): """simple docstring""" from diffusers.utils.testing_utils import pytest_addoption_shared pytest_addoption_shared(_snake_case ) def _a ( _snake_case ): """simple docstring""" from diffusers.utils.testing_utils import pytest_terminal_summary_main UpperCAmelCase = terminalreporter.config.getoption("""--make-reports""" ) if make_reports: pytest_terminal_summary_main(_snake_case , id=_snake_case )
74
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = {} class lowerCamelCase__ ( snake_case ): SCREAMING_SNAKE_CASE = '''llama''' SCREAMING_SNAKE_CASE = ['''past_key_values'''] def __init__( self ,A=32_000 ,A=4_096 ,A=11_008 ,A=32 ,A=32 ,A=None ,A="silu" ,A=2_048 ,A=0.02 ,A=1e-6 ,A=True ,A=0 ,A=1 ,A=2 ,A=1 ,A=False ,A=None ,**A ,): UpperCAmelCase = vocab_size UpperCAmelCase = max_position_embeddings UpperCAmelCase = hidden_size UpperCAmelCase = intermediate_size UpperCAmelCase = num_hidden_layers UpperCAmelCase = num_attention_heads # for backward compatibility if num_key_value_heads is None: UpperCAmelCase = num_attention_heads UpperCAmelCase = num_key_value_heads UpperCAmelCase = hidden_act UpperCAmelCase = initializer_range UpperCAmelCase = rms_norm_eps UpperCAmelCase = pretraining_tp UpperCAmelCase = use_cache UpperCAmelCase = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=A ,bos_token_id=A ,eos_token_id=A ,tie_word_embeddings=A ,**A ,) def _UpperCamelCase ( self ): if self.rope_scaling is None: return if not isinstance(self.rope_scaling ,A ) or len(self.rope_scaling ) != 2: raise ValueError( """`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """ F'''got {self.rope_scaling}''' ) UpperCAmelCase = self.rope_scaling.get("""type""" ,A ) UpperCAmelCase = self.rope_scaling.get("""factor""" ,A ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(A ,A ) or rope_scaling_factor <= 1.0: raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
711
"""simple docstring""" from __future__ import annotations from collections.abc import MutableSequence class lowerCamelCase__ : def __init__( self ,A ,A ): if len(A ) != degree + 1: raise ValueError( """The number of coefficients should be equal to the degree + 1.""" ) UpperCAmelCase = list(A ) UpperCAmelCase = degree def __add__( self ,A ): if self.degree > polynomial_a.degree: UpperCAmelCase = self.coefficients[:] for i in range(polynomial_a.degree + 1 ): coefficients[i] += polynomial_a.coefficients[i] return Polynomial(self.degree ,A ) else: UpperCAmelCase = polynomial_a.coefficients[:] for i in range(self.degree + 1 ): coefficients[i] += self.coefficients[i] return Polynomial(polynomial_a.degree ,A ) def __sub__( self ,A ): return self + polynomial_a * Polynomial(0 ,[-1] ) def __neg__( self ): return Polynomial(self.degree ,[-c for c in self.coefficients] ) def __mul__( self ,A ): UpperCAmelCase = [0] * (self.degree + polynomial_a.degree + 1) for i in range(self.degree + 1 ): for j in range(polynomial_a.degree + 1 ): coefficients[i + j] += ( self.coefficients[i] * polynomial_a.coefficients[j] ) return Polynomial(self.degree + polynomial_a.degree ,A ) def _UpperCamelCase ( self ,A ): UpperCAmelCase = 0 for i in range(self.degree + 1 ): result += self.coefficients[i] * (substitution**i) return result def __str__( self ): UpperCAmelCase = """""" for i in range(self.degree ,-1 ,-1 ): if self.coefficients[i] == 0: continue elif self.coefficients[i] > 0: if polynomial: polynomial += " + " else: polynomial += " - " if i == 0: polynomial += str(abs(self.coefficients[i] ) ) elif i == 1: polynomial += str(abs(self.coefficients[i] ) ) + "x" else: polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(A ) return polynomial def __repr__( self ): return self.__str__() def _UpperCamelCase ( self ): UpperCAmelCase = [0] * self.degree for i in range(self.degree ): UpperCAmelCase = self.coefficients[i + 1] * (i + 1) return Polynomial(self.degree - 1 ,A ) def _UpperCamelCase ( self ,A = 0 ): UpperCAmelCase = [0] * (self.degree + 2) UpperCAmelCase = constant for i in range(self.degree + 1 ): UpperCAmelCase = self.coefficients[i] / (i + 1) return Polynomial(self.degree + 1 ,A ) def __eq__( self ,A ): if not isinstance(A ,A ): return False if self.degree != polynomial_a.degree: return False for i in range(self.degree + 1 ): if self.coefficients[i] != polynomial_a.coefficients[i]: return False return True def __ne__( self ,A ): return not self.__eq__(A )
74
0
"""simple docstring""" import os import torch from ..logging import get_logger from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME from .versions import is_torch_version if is_torch_version(""">=""", FSDP_PYTORCH_VERSION): import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType _UpperCamelCase = get_logger(__name__) def _a ( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=0 ): """simple docstring""" os.makedirs(_snake_case , exist_ok=_snake_case ) with FSDP.state_dict_type( _snake_case , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): UpperCAmelCase = model.state_dict() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: UpperCAmelCase = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin''' UpperCAmelCase = os.path.join(_snake_case , _snake_case ) if accelerator.process_index == 0: logger.info(F'''Saving model to {output_model_file}''' ) torch.save(_snake_case , _snake_case ) logger.info(F'''Model saved to {output_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: UpperCAmelCase = ( F'''{MODEL_NAME}_rank{accelerator.process_index}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin''' ) UpperCAmelCase = os.path.join(_snake_case , _snake_case ) logger.info(F'''Saving model to {output_model_file}''' ) torch.save(_snake_case , _snake_case ) logger.info(F'''Model saved to {output_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: UpperCAmelCase = os.path.join(_snake_case , F'''{MODEL_NAME}_{model_index}''' ) os.makedirs(_snake_case , exist_ok=_snake_case ) logger.info(F'''Saving model to {ckpt_dir}''' ) UpperCAmelCase = {"""model""": state_dict} dist_cp.save_state_dict( state_dict=_snake_case , storage_writer=dist_cp.FileSystemWriter(_snake_case ) , planner=DefaultSavePlanner() , ) logger.info(F'''Model saved to {ckpt_dir}''' ) def _a ( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=0 ): """simple docstring""" accelerator.wait_for_everyone() with FSDP.state_dict_type( _snake_case , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(_snake_case ) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( """Set the `sync_module_states` flag to `True` so that model states are synced across processes when """ """initializing FSDP object""" ) return UpperCAmelCase = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin''' UpperCAmelCase = os.path.join(_snake_case , _snake_case ) logger.info(F'''Loading model from {input_model_file}''' ) UpperCAmelCase = torch.load(_snake_case ) logger.info(F'''Model loaded from {input_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: UpperCAmelCase = ( F'''{MODEL_NAME}_rank{accelerator.process_index}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin''' ) UpperCAmelCase = os.path.join(_snake_case , _snake_case ) logger.info(F'''Loading model from {input_model_file}''' ) UpperCAmelCase = torch.load(_snake_case ) logger.info(F'''Model loaded from {input_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: UpperCAmelCase = ( os.path.join(_snake_case , F'''{MODEL_NAME}_{model_index}''' ) if F'''{MODEL_NAME}''' not in input_dir else input_dir ) logger.info(F'''Loading model from {ckpt_dir}''' ) UpperCAmelCase = {"""model""": model.state_dict()} dist_cp.load_state_dict( state_dict=_snake_case , storage_reader=dist_cp.FileSystemReader(_snake_case ) , planner=DefaultLoadPlanner() , ) UpperCAmelCase = state_dict["""model"""] logger.info(F'''Model loaded from {ckpt_dir}''' ) model.load_state_dict(_snake_case ) def _a ( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=0 ): """simple docstring""" os.makedirs(_snake_case , exist_ok=_snake_case ) with FSDP.state_dict_type( _snake_case , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): UpperCAmelCase = FSDP.optim_state_dict(_snake_case , _snake_case ) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: UpperCAmelCase = ( F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin''' ) UpperCAmelCase = os.path.join(_snake_case , _snake_case ) logger.info(F'''Saving Optimizer state to {output_optimizer_file}''' ) torch.save(_snake_case , _snake_case ) logger.info(F'''Optimizer state saved in {output_optimizer_file}''' ) else: UpperCAmelCase = os.path.join(_snake_case , F'''{OPTIMIZER_NAME}_{optimizer_index}''' ) os.makedirs(_snake_case , exist_ok=_snake_case ) logger.info(F'''Saving Optimizer state to {ckpt_dir}''' ) dist_cp.save_state_dict( state_dict={"""optimizer""": optim_state} , storage_writer=dist_cp.FileSystemWriter(_snake_case ) , planner=DefaultSavePlanner() , ) logger.info(F'''Optimizer state saved in {ckpt_dir}''' ) def _a ( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=0 ): """simple docstring""" accelerator.wait_for_everyone() with FSDP.state_dict_type( _snake_case , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: UpperCAmelCase = None # below check should work but currently it isn't working (mostly opytorch issue), # in the meantime disabling it at the cost of excess memory usage # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: UpperCAmelCase = ( F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin''' ) UpperCAmelCase = os.path.join(_snake_case , _snake_case ) logger.info(F'''Loading Optimizer state from {input_optimizer_file}''' ) UpperCAmelCase = torch.load(_snake_case ) logger.info(F'''Optimizer state loaded from {input_optimizer_file}''' ) else: UpperCAmelCase = ( os.path.join(_snake_case , F'''{OPTIMIZER_NAME}_{optimizer_index}''' ) if F'''{OPTIMIZER_NAME}''' not in input_dir else input_dir ) logger.info(F'''Loading Optimizer from {ckpt_dir}''' ) UpperCAmelCase = load_sharded_optimizer_state_dict( model_state_dict=model.state_dict() , optimizer_key="""optimizer""" , storage_reader=dist_cp.FileSystemReader(_snake_case ) , ) UpperCAmelCase = optim_state["""optimizer"""] logger.info(F'''Optimizer loaded from {ckpt_dir}''' ) UpperCAmelCase = FSDP.optim_state_dict_to_load(_snake_case , _snake_case , _snake_case ) optimizer.load_state_dict(_snake_case )
712
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy as np import tensorflow as tf from transformers import ( TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST, FlaubertConfig, TFFlaubertForMultipleChoice, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForSequenceClassification, TFFlaubertForTokenClassification, TFFlaubertModel, TFFlaubertWithLMHeadModel, ) class lowerCamelCase__ : def __init__( self ,A ,): UpperCAmelCase = parent UpperCAmelCase = 13 UpperCAmelCase = 7 UpperCAmelCase = True UpperCAmelCase = True UpperCAmelCase = True UpperCAmelCase = True UpperCAmelCase = True UpperCAmelCase = False UpperCAmelCase = False UpperCAmelCase = False UpperCAmelCase = 2 UpperCAmelCase = 99 UpperCAmelCase = 0 UpperCAmelCase = 32 UpperCAmelCase = 2 UpperCAmelCase = 4 UpperCAmelCase = 0.1 UpperCAmelCase = 0.1 UpperCAmelCase = 512 UpperCAmelCase = 16 UpperCAmelCase = 2 UpperCAmelCase = 0.02 UpperCAmelCase = 3 UpperCAmelCase = 4 UpperCAmelCase = """last""" UpperCAmelCase = True UpperCAmelCase = None UpperCAmelCase = 0 def _UpperCamelCase ( self ): UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ,dtype=tf.floataa ) UpperCAmelCase = None if self.use_input_lengths: UpperCAmelCase = ( ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length UpperCAmelCase = None if self.use_token_type_ids: UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs ) UpperCAmelCase = None UpperCAmelCase = None UpperCAmelCase = None if self.use_labels: UpperCAmelCase = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) UpperCAmelCase = ids_tensor([self.batch_size] ,2 ,dtype=tf.floataa ) UpperCAmelCase = ids_tensor([self.batch_size] ,self.num_choices ) UpperCAmelCase = FlaubertConfig( vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,bos_token_id=self.bos_token_id ,) return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,): UpperCAmelCase = TFFlaubertModel(config=A ) UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids} UpperCAmelCase = model(A ) UpperCAmelCase = [input_ids, input_mask] UpperCAmelCase = model(A ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,): UpperCAmelCase = TFFlaubertWithLMHeadModel(A ) UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids} UpperCAmelCase = model(A ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,): UpperCAmelCase = TFFlaubertForQuestionAnsweringSimple(A ) UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths} UpperCAmelCase = model(A ) self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,): UpperCAmelCase = TFFlaubertForSequenceClassification(A ) UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths} UpperCAmelCase = model(A ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,): UpperCAmelCase = self.num_labels UpperCAmelCase = TFFlaubertForTokenClassification(config=A ) UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} UpperCAmelCase = model(A ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,): UpperCAmelCase = self.num_choices UpperCAmelCase = TFFlaubertForMultipleChoice(config=A ) UpperCAmelCase = tf.tile(tf.expand_dims(A ,1 ) ,(1, self.num_choices, 1) ) UpperCAmelCase = tf.tile(tf.expand_dims(A ,1 ) ,(1, self.num_choices, 1) ) UpperCAmelCase = tf.tile(tf.expand_dims(A ,1 ) ,(1, self.num_choices, 1) ) UpperCAmelCase = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, """token_type_ids""": multiple_choice_token_type_ids, } UpperCAmelCase = model(A ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) ) def _UpperCamelCase ( self ): UpperCAmelCase = self.prepare_config_and_inputs() ( ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ) = config_and_inputs UpperCAmelCase = { """input_ids""": input_ids, """token_type_ids""": token_type_ids, """langs""": token_type_ids, """lengths""": input_lengths, } return config, inputs_dict @require_tf class lowerCamelCase__ ( snake_case , snake_case , unittest.TestCase ): SCREAMING_SNAKE_CASE = ( ( TFFlaubertModel, TFFlaubertWithLMHeadModel, TFFlaubertForSequenceClassification, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForTokenClassification, TFFlaubertForMultipleChoice, ) if is_tf_available() else () ) SCREAMING_SNAKE_CASE = ( (TFFlaubertWithLMHeadModel,) if is_tf_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable SCREAMING_SNAKE_CASE = ( { '''feature-extraction''': TFFlaubertModel, '''fill-mask''': TFFlaubertWithLMHeadModel, '''question-answering''': TFFlaubertForQuestionAnsweringSimple, '''text-classification''': TFFlaubertForSequenceClassification, '''token-classification''': TFFlaubertForTokenClassification, '''zero-shot''': TFFlaubertForSequenceClassification, } if is_tf_available() else {} ) SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False def _UpperCamelCase ( self ,A ,A ,A ,A ,A ): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("""Fast""" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def _UpperCamelCase ( self ): UpperCAmelCase = TFFlaubertModelTester(self ) UpperCAmelCase = ConfigTester(self ,config_class=A ,emb_dim=37 ) def _UpperCamelCase ( self ): self.config_tester.run_common_tests() def _UpperCamelCase ( self ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*A ) def _UpperCamelCase ( self ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*A ) def _UpperCamelCase ( self ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*A ) def _UpperCamelCase ( self ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*A ) def _UpperCamelCase ( self ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_token_classification(*A ) def _UpperCamelCase ( self ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_multiple_choice(*A ) @slow def _UpperCamelCase ( self ): for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase = TFFlaubertModel.from_pretrained(A ) self.assertIsNotNone(A ) @require_tf @require_sentencepiece @require_tokenizers class lowerCamelCase__ ( unittest.TestCase ): @slow def _UpperCamelCase ( self ): UpperCAmelCase = TFFlaubertModel.from_pretrained("""jplu/tf-flaubert-small-cased""" ) UpperCAmelCase = tf.convert_to_tensor( [[0, 158, 735, 2_592, 1_424, 6_727, 82, 1]] ,dtype=tf.intaa ,) # "J'aime flaubert !" UpperCAmelCase = model(A )[0] UpperCAmelCase = tf.TensorShape((1, 8, 512) ) self.assertEqual(output.shape ,A ) # compare the actual values for a slice. UpperCAmelCase = tf.convert_to_tensor( [ [ [-1.8768773, -1.566555, 0.27072418], [-1.6920038, -0.5873505, 1.9329599], [-2.9563985, -1.6993835, 1.7972052], ] ] ,dtype=tf.floataa ,) self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
74
0
"""simple docstring""" from typing import Optional from urllib.parse import quote import huggingface_hub as hfh from packaging import version def _a ( _snake_case , _snake_case , _snake_case = None ) -> str: """simple docstring""" if version.parse(hfh.__version__ ).release < version.parse("""0.11.0""" ).release: # old versions of hfh don't url-encode the file path UpperCAmelCase = quote(_snake_case ) return hfh.hf_hub_url(_snake_case , _snake_case , repo_type="""dataset""" , revision=_snake_case )
713
"""simple docstring""" import argparse from collections import OrderedDict from pathlib import Path import requests import torch from PIL import Image from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor from transformers.utils import logging logging.set_verbosity_info() _UpperCamelCase = logging.get_logger(__name__) def _a ( _snake_case ): """simple docstring""" UpperCAmelCase = OrderedDict() for key, value in state_dict.items(): if key.startswith("""module.encoder""" ): UpperCAmelCase = key.replace("""module.encoder""" , """glpn.encoder""" ) if key.startswith("""module.decoder""" ): UpperCAmelCase = key.replace("""module.decoder""" , """decoder.stages""" ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 UpperCAmelCase = key[key.find("""patch_embed""" ) + len("""patch_embed""" )] UpperCAmelCase = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(_snake_case )-1}''' ) if "norm" in key: UpperCAmelCase = key.replace("""norm""" , """layer_norm""" ) if "glpn.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 UpperCAmelCase = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )] UpperCAmelCase = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(_snake_case )-1}''' ) if "layer_norm1" in key: UpperCAmelCase = key.replace("""layer_norm1""" , """layer_norm_1""" ) if "layer_norm2" in key: UpperCAmelCase = key.replace("""layer_norm2""" , """layer_norm_2""" ) if "block" in key: # replace for example block1 by block.0 UpperCAmelCase = key[key.find("""block""" ) + len("""block""" )] UpperCAmelCase = key.replace(F'''block{idx}''' , F'''block.{int(_snake_case )-1}''' ) if "attn.q" in key: UpperCAmelCase = key.replace("""attn.q""" , """attention.self.query""" ) if "attn.proj" in key: UpperCAmelCase = key.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in key: UpperCAmelCase = key.replace("""attn""" , """attention.self""" ) if "fc1" in key: UpperCAmelCase = key.replace("""fc1""" , """dense1""" ) if "fc2" in key: UpperCAmelCase = key.replace("""fc2""" , """dense2""" ) if "linear_pred" in key: UpperCAmelCase = key.replace("""linear_pred""" , """classifier""" ) if "linear_fuse" in key: UpperCAmelCase = key.replace("""linear_fuse.conv""" , """linear_fuse""" ) UpperCAmelCase = key.replace("""linear_fuse.bn""" , """batch_norm""" ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 UpperCAmelCase = key[key.find("""linear_c""" ) + len("""linear_c""" )] UpperCAmelCase = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(_snake_case )-1}''' ) if "bot_conv" in key: UpperCAmelCase = key.replace("""bot_conv""" , """0.convolution""" ) if "skip_conv1" in key: UpperCAmelCase = key.replace("""skip_conv1""" , """1.convolution""" ) if "skip_conv2" in key: UpperCAmelCase = key.replace("""skip_conv2""" , """2.convolution""" ) if "fusion1" in key: UpperCAmelCase = key.replace("""fusion1""" , """1.fusion""" ) if "fusion2" in key: UpperCAmelCase = key.replace("""fusion2""" , """2.fusion""" ) if "fusion3" in key: UpperCAmelCase = key.replace("""fusion3""" , """3.fusion""" ) if "fusion" in key and "conv" in key: UpperCAmelCase = key.replace("""conv""" , """convolutional_layer""" ) if key.startswith("""module.last_layer_depth""" ): UpperCAmelCase = key.replace("""module.last_layer_depth""" , """head.head""" ) UpperCAmelCase = value return new_state_dict def _a ( _snake_case , _snake_case ): """simple docstring""" for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) UpperCAmelCase = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' ) UpperCAmelCase = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' ) # next, add keys and values (in that order) to the state dict UpperCAmelCase = kv_weight[ : config.hidden_sizes[i], : ] UpperCAmelCase = kv_bias[: config.hidden_sizes[i]] UpperCAmelCase = kv_weight[ config.hidden_sizes[i] :, : ] UpperCAmelCase = kv_bias[config.hidden_sizes[i] :] def _a ( ): """simple docstring""" UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" UpperCAmelCase = Image.open(requests.get(_snake_case , stream=_snake_case ).raw ) return image @torch.no_grad() def _a ( _snake_case , _snake_case , _snake_case=False , _snake_case=None ): """simple docstring""" UpperCAmelCase = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] ) # load image processor (only resize + rescale) UpperCAmelCase = GLPNImageProcessor() # prepare image UpperCAmelCase = prepare_img() UpperCAmelCase = image_processor(images=_snake_case , return_tensors="""pt""" ).pixel_values logger.info("""Converting model...""" ) # load original state dict UpperCAmelCase = torch.load(_snake_case , map_location=torch.device("""cpu""" ) ) # rename keys UpperCAmelCase = rename_keys(_snake_case ) # key and value matrices need special treatment read_in_k_v(_snake_case , _snake_case ) # create HuggingFace model and load state dict UpperCAmelCase = GLPNForDepthEstimation(_snake_case ) model.load_state_dict(_snake_case ) model.eval() # forward pass UpperCAmelCase = model(_snake_case ) UpperCAmelCase = outputs.predicted_depth # verify output if model_name is not None: if "nyu" in model_name: UpperCAmelCase = torch.tensor( [[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] ) elif "kitti" in model_name: UpperCAmelCase = torch.tensor( [[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] ) else: raise ValueError(F'''Unknown model name: {model_name}''' ) UpperCAmelCase = torch.Size([1, 480, 640] ) assert predicted_depth.shape == expected_shape assert torch.allclose(predicted_depth[0, :3, :3] , _snake_case , atol=1E-4 ) print("""Looks ok!""" ) # finally, push to hub if required if push_to_hub: logger.info("""Pushing model and image processor to the hub...""" ) model.push_to_hub( repo_path_or_name=Path(_snake_case , _snake_case ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=_snake_case , ) image_processor.push_to_hub( repo_path_or_name=Path(_snake_case , _snake_case ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=_snake_case , ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() parser.add_argument( """--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file).""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub.""" ) parser.add_argument( """--model_name""", default="""glpn-kitti""", type=str, help="""Name of the model in case you're pushing to the hub.""", ) _UpperCamelCase = parser.parse_args() convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
74
0
"""simple docstring""" import logging import os import quant_trainer import torch from torch.utils.data import DataLoader from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput _UpperCamelCase = logging.getLogger(__name__) if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class lowerCamelCase__ ( snake_case ): def __init__( self ,*A ,A=None ,A=None ,A=None ,**A ): super().__init__(*A ,**A ) UpperCAmelCase = eval_examples UpperCAmelCase = post_process_function UpperCAmelCase = quant_trainer_args UpperCAmelCase = 128 # default number of calibration samples def _UpperCamelCase ( self ,A=None ): if calib_dataset is None and self.calib_dataset is None: raise ValueError("""Trainer: calibration requires an calib_dataset.""" ) UpperCAmelCase = calib_dataset if calib_dataset is not None else self.calib_dataset UpperCAmelCase = self._remove_unused_columns(A ,description="""Calibration""" ) return DataLoader( A ,batch_size=self.args.eval_batch_size ,collate_fn=self.data_collator ,drop_last=self.args.dataloader_drop_last ,num_workers=self.args.dataloader_num_workers ,pin_memory=self.args.dataloader_pin_memory ,shuffle=A ,) def _UpperCamelCase ( self ,A=None ): UpperCAmelCase = self.train_dataset if calib_dataset is None else calib_dataset UpperCAmelCase = self.get_calib_dataloader(A ) UpperCAmelCase = self.model quant_trainer.configure_model(A ,self.quant_trainer_args ,calib=A ) model.eval() quant_trainer.enable_calibration(A ) logger.info("""***** Running calibration *****""" ) logger.info(F''' Num examples = {self.calib_num}''' ) logger.info(F''' Batch size = {calib_dataloader.batch_size}''' ) for step, inputs in enumerate(A ): # Prediction step UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self.prediction_step(A ,A ,prediction_loss_only=A ) if (step + 1) * calib_dataloader.batch_size >= self.calib_num: break quant_trainer.finish_calibration(A ,self.quant_trainer_args ) UpperCAmelCase = model def _UpperCamelCase ( self ,A=None ,A=None ,A=None ,A = "eval" ): UpperCAmelCase = self.eval_dataset if eval_dataset is None else eval_dataset UpperCAmelCase = self.get_eval_dataloader(A ) UpperCAmelCase = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. UpperCAmelCase = self.compute_metrics UpperCAmelCase = None UpperCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: UpperCAmelCase = eval_loop( A ,description="""Evaluation""" ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=A ,) finally: UpperCAmelCase = compute_metrics if self.post_process_function is not None and self.compute_metrics is not None: UpperCAmelCase = self.post_process_function(A ,A ,output.predictions ) UpperCAmelCase = self.compute_metrics(A ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): UpperCAmelCase = metrics.pop(A ) self.log(A ) else: UpperCAmelCase = {} if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) UpperCAmelCase = self.callback_handler.on_evaluate(self.args ,self.state ,self.control ,A ) return metrics def _UpperCamelCase ( self ,A ,A ,A=None ,A = "test" ): UpperCAmelCase = self.get_test_dataloader(A ) # Temporarily disable metric computation, we will do it in the loop here. UpperCAmelCase = self.compute_metrics UpperCAmelCase = None UpperCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: UpperCAmelCase = eval_loop( A ,description="""Prediction""" ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=A ,) finally: UpperCAmelCase = compute_metrics if self.post_process_function is None or self.compute_metrics is None: return output UpperCAmelCase = self.post_process_function(A ,A ,output.predictions ,"""predict""" ) UpperCAmelCase = self.compute_metrics(A ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): UpperCAmelCase = metrics.pop(A ) return PredictionOutput(predictions=predictions.predictions ,label_ids=predictions.label_ids ,metrics=A ) def _UpperCamelCase ( self ,A="./" ): UpperCAmelCase = self.eval_dataset UpperCAmelCase = self.get_eval_dataloader(A ) UpperCAmelCase = next(iter(A ) ) # saving device - to make it consistent UpperCAmelCase = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) # convert to tuple UpperCAmelCase = tuple(v.to(A ) for k, v in batch.items() ) logger.info("""Converting model to be onnx compatible""" ) from pytorch_quantization.nn import TensorQuantizer UpperCAmelCase = True UpperCAmelCase = self.model.to(A ) model.eval() model.float() UpperCAmelCase = model.module if hasattr(A ,"""module""" ) else model quant_trainer.configure_model(A ,self.quant_trainer_args ) UpperCAmelCase = os.path.join(A ,"""model.onnx""" ) logger.info(F'''exporting model to {output_model_file}''' ) UpperCAmelCase = {0: """batch_size""", 1: """seq_len"""} torch.onnx.export( A ,A ,A ,export_params=A ,opset_version=13 ,do_constant_folding=A ,input_names=["""input_ids""", """attention_mask""", """token_type_ids"""] ,output_names=["""output_start_logits""", """output_end_logits"""] ,dynamic_axes={ """input_ids""": axes, """attention_mask""": axes, """token_type_ids""": axes, """output_start_logits""": axes, """output_end_logits""": axes, } ,verbose=A ,) logger.info("""onnx export finished""" )
714
"""simple docstring""" def _a ( _snake_case ): # noqa: E741 """simple docstring""" UpperCAmelCase = len(_snake_case ) UpperCAmelCase = 0 UpperCAmelCase = [0] * n UpperCAmelCase = [False] * n UpperCAmelCase = [False] * n def dfs(_snake_case , _snake_case , _snake_case , _snake_case ): if parent == root: out_edge_count += 1 UpperCAmelCase = True UpperCAmelCase = at for to in l[at]: if to == parent: pass elif not visited[to]: UpperCAmelCase = dfs(_snake_case , _snake_case , _snake_case , _snake_case ) UpperCAmelCase = min(low[at] , low[to] ) # AP found via bridge if at < low[to]: UpperCAmelCase = True # AP found via cycle if at == low[to]: UpperCAmelCase = True else: UpperCAmelCase = min(low[at] , _snake_case ) return out_edge_count for i in range(_snake_case ): if not visited[i]: UpperCAmelCase = 0 UpperCAmelCase = dfs(_snake_case , _snake_case , -1 , _snake_case ) UpperCAmelCase = out_edge_count > 1 for x in range(len(_snake_case ) ): if is_art[x] is True: print(_snake_case ) # Adjacency list of graph _UpperCamelCase = { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], 3: [2, 4], 4: [3], 5: [2, 6, 8], 6: [5, 7], 7: [6, 8], 8: [5, 7], } compute_ap(data)
74
0
"""simple docstring""" import random import unittest import torch from diffusers import IFInpaintingSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class lowerCamelCase__ ( snake_case , snake_case , unittest.TestCase ): SCREAMING_SNAKE_CASE = IFInpaintingSuperResolutionPipeline SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''} SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} ) SCREAMING_SNAKE_CASE = PipelineTesterMixin.required_optional_params - {'''latents'''} def _UpperCamelCase ( self ): return self._get_superresolution_dummy_components() def _UpperCamelCase ( self ,A ,A=0 ): if str(A ).startswith("""mps""" ): UpperCAmelCase = torch.manual_seed(A ) else: UpperCAmelCase = torch.Generator(device=A ).manual_seed(A ) UpperCAmelCase = floats_tensor((1, 3, 16, 16) ,rng=random.Random(A ) ).to(A ) UpperCAmelCase = floats_tensor((1, 3, 32, 32) ,rng=random.Random(A ) ).to(A ) UpperCAmelCase = floats_tensor((1, 3, 32, 32) ,rng=random.Random(A ) ).to(A ) UpperCAmelCase = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """original_image""": original_image, """mask_image""": mask_image, """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() ,reason="""XFormers attention is only available with CUDA and `xformers` installed""" ,) def _UpperCamelCase ( self ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) def _UpperCamelCase ( self ): self._test_save_load_optional_components() @unittest.skipIf(torch_device != """cuda""" ,reason="""float16 requires CUDA""" ) def _UpperCamelCase ( self ): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1e-1 ) def _UpperCamelCase ( self ): self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def _UpperCamelCase ( self ): self._test_save_load_local() def _UpperCamelCase ( self ): self._test_inference_batch_single_identical( expected_max_diff=1e-2 ,)
715
"""simple docstring""" _UpperCamelCase = """ # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git """ _UpperCamelCase = [{"""type""": """code""", """content""": INSTALL_CONTENT}] _UpperCamelCase = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
74
0
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DiffusionPipeline, EulerDiscreteScheduler, StableDiffusionXLImgaImgPipeline, UNetaDConditionModel, ) from diffusers.utils import floats_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class lowerCamelCase__ ( snake_case , snake_case , unittest.TestCase ): SCREAMING_SNAKE_CASE = StableDiffusionXLImgaImgPipeline SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''} SCREAMING_SNAKE_CASE = PipelineTesterMixin.required_optional_params - {'''latents'''} SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS SCREAMING_SNAKE_CASE = IMAGE_TO_IMAGE_IMAGE_PARAMS SCREAMING_SNAKE_CASE = IMAGE_TO_IMAGE_IMAGE_PARAMS def _UpperCamelCase ( self ): torch.manual_seed(0 ) UpperCAmelCase = UNetaDConditionModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,attention_head_dim=(2, 4) ,use_linear_projection=A ,addition_embed_type="""text_time""" ,addition_time_embed_dim=8 ,transformer_layers_per_block=(1, 2) ,projection_class_embeddings_input_dim=80 ,cross_attention_dim=64 ,) UpperCAmelCase = EulerDiscreteScheduler( beta_start=0.00085 ,beta_end=0.012 ,steps_offset=1 ,beta_schedule="""scaled_linear""" ,timestep_spacing="""leading""" ,) torch.manual_seed(0 ) UpperCAmelCase = AutoencoderKL( block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,sample_size=128 ,) torch.manual_seed(0 ) UpperCAmelCase = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,hidden_act="""gelu""" ,projection_dim=32 ,) UpperCAmelCase = CLIPTextModel(A ) UpperCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ,local_files_only=A ) UpperCAmelCase = CLIPTextModelWithProjection(A ) UpperCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ,local_files_only=A ) UpperCAmelCase = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """text_encoder_2""": text_encoder_a, """tokenizer_2""": tokenizer_a, # "safety_checker": None, # "feature_extractor": None, } return components def _UpperCamelCase ( self ,A ,A=0 ): UpperCAmelCase = floats_tensor((1, 3, 32, 32) ,rng=random.Random(A ) ).to(A ) UpperCAmelCase = image / 2 + 0.5 if str(A ).startswith("""mps""" ): UpperCAmelCase = torch.manual_seed(A ) else: UpperCAmelCase = torch.Generator(device=A ).manual_seed(A ) UpperCAmelCase = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 5.0, """output_type""": """numpy""", """strength""": 0.75, } return inputs def _UpperCamelCase ( self ): UpperCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase = self.get_dummy_components() UpperCAmelCase = StableDiffusionXLImgaImgPipeline(**A ) UpperCAmelCase = sd_pipe.to(A ) sd_pipe.set_progress_bar_config(disable=A ) UpperCAmelCase = self.get_dummy_inputs(A ) UpperCAmelCase = sd_pipe(**A ).images UpperCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) UpperCAmelCase = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _UpperCamelCase ( self ): super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 ) def _UpperCamelCase ( self ): super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) def _UpperCamelCase ( self ): pass def _UpperCamelCase ( self ): UpperCAmelCase = self.get_dummy_components() UpperCAmelCase = StableDiffusionXLImgaImgPipeline(**A ) UpperCAmelCase = sd_pipe.to(A ) UpperCAmelCase = sd_pipe.to(A ) sd_pipe.set_progress_bar_config(disable=A ) # forward without prompt embeds UpperCAmelCase = self.get_dummy_inputs(A ) UpperCAmelCase = 3 * ["""this is a negative prompt"""] UpperCAmelCase = negative_prompt UpperCAmelCase = 3 * [inputs["""prompt"""]] UpperCAmelCase = sd_pipe(**A ) UpperCAmelCase = output.images[0, -3:, -3:, -1] # forward with prompt embeds UpperCAmelCase = self.get_dummy_inputs(A ) UpperCAmelCase = 3 * ["""this is a negative prompt"""] UpperCAmelCase = 3 * [inputs.pop("""prompt""" )] ( ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ) = sd_pipe.encode_prompt(A ,negative_prompt=A ) UpperCAmelCase = sd_pipe( **A ,prompt_embeds=A ,negative_prompt_embeds=A ,pooled_prompt_embeds=A ,negative_pooled_prompt_embeds=A ,) UpperCAmelCase = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4 @slow @require_torch_gpu class lowerCamelCase__ ( unittest.TestCase ): def _UpperCamelCase ( self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def _UpperCamelCase ( self ,A ,A="cpu" ,A=torch.floataa ,A=0 ): UpperCAmelCase = torch.Generator(device=A ).manual_seed(A ) UpperCAmelCase = np.random.RandomState(A ).standard_normal((1, 4, 64, 64) ) UpperCAmelCase = torch.from_numpy(A ).to(device=A ,dtype=A ) UpperCAmelCase = { """prompt""": """a photograph of an astronaut riding a horse""", """latents""": latents, """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def _UpperCamelCase ( self ): UpperCAmelCase = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) UpperCAmelCase = self.get_inputs(A ) UpperCAmelCase = pipe(**A ).images UpperCAmelCase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) UpperCAmelCase = np.array([0.49493, 0.47896, 0.40798, 0.54214, 0.53212, 0.48202, 0.47656, 0.46329, 0.48506] ) assert np.abs(image_slice - expected_slice ).max() < 7e-3
716
"""simple docstring""" import argparse import struct import unittest class lowerCamelCase__ : def __init__( self ,A ): UpperCAmelCase = data # Initialize hash values UpperCAmelCase = [ 0x6A_09_E6_67, 0xBB_67_AE_85, 0x3C_6E_F3_72, 0xA5_4F_F5_3A, 0x51_0E_52_7F, 0x9B_05_68_8C, 0x1F_83_D9_AB, 0x5B_E0_CD_19, ] # Initialize round constants UpperCAmelCase = [ 0x42_8A_2F_98, 0x71_37_44_91, 0xB5_C0_FB_CF, 0xE9_B5_DB_A5, 0x39_56_C2_5B, 0x59_F1_11_F1, 0x92_3F_82_A4, 0xAB_1C_5E_D5, 0xD8_07_AA_98, 0x12_83_5B_01, 0x24_31_85_BE, 0x55_0C_7D_C3, 0x72_BE_5D_74, 0x80_DE_B1_FE, 0x9B_DC_06_A7, 0xC1_9B_F1_74, 0xE4_9B_69_C1, 0xEF_BE_47_86, 0x0F_C1_9D_C6, 0x24_0C_A1_CC, 0x2D_E9_2C_6F, 0x4A_74_84_AA, 0x5C_B0_A9_DC, 0x76_F9_88_DA, 0x98_3E_51_52, 0xA8_31_C6_6D, 0xB0_03_27_C8, 0xBF_59_7F_C7, 0xC6_E0_0B_F3, 0xD5_A7_91_47, 0x06_CA_63_51, 0x14_29_29_67, 0x27_B7_0A_85, 0x2E_1B_21_38, 0x4D_2C_6D_FC, 0x53_38_0D_13, 0x65_0A_73_54, 0x76_6A_0A_BB, 0x81_C2_C9_2E, 0x92_72_2C_85, 0xA2_BF_E8_A1, 0xA8_1A_66_4B, 0xC2_4B_8B_70, 0xC7_6C_51_A3, 0xD1_92_E8_19, 0xD6_99_06_24, 0xF4_0E_35_85, 0x10_6A_A0_70, 0x19_A4_C1_16, 0x1E_37_6C_08, 0x27_48_77_4C, 0x34_B0_BC_B5, 0x39_1C_0C_B3, 0x4E_D8_AA_4A, 0x5B_9C_CA_4F, 0x68_2E_6F_F3, 0x74_8F_82_EE, 0x78_A5_63_6F, 0x84_C8_78_14, 0x8C_C7_02_08, 0x90_BE_FF_FA, 0xA4_50_6C_EB, 0xBE_F9_A3_F7, 0xC6_71_78_F2, ] UpperCAmelCase = self.preprocessing(self.data ) self.final_hash() @staticmethod def _UpperCamelCase ( A ): UpperCAmelCase = b"""\x80""" + (b"""\x00""" * (63 - (len(A ) + 8) % 64)) UpperCAmelCase = struct.pack(""">Q""" ,(len(A ) * 8) ) return data + padding + big_endian_integer def _UpperCamelCase ( self ): # Convert into blocks of 64 bytes UpperCAmelCase = [ self.preprocessed_data[x : x + 64] for x in range(0 ,len(self.preprocessed_data ) ,64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers UpperCAmelCase = list(struct.unpack(""">16L""" ,A ) ) # add 48 0-ed integers words += [0] * 48 UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self.hashes for index in range(0 ,64 ): if index > 15: # modify the zero-ed indexes at the end of the array UpperCAmelCase = ( self.ror(words[index - 15] ,7 ) ^ self.ror(words[index - 15] ,18 ) ^ (words[index - 15] >> 3) ) UpperCAmelCase = ( self.ror(words[index - 2] ,17 ) ^ self.ror(words[index - 2] ,19 ) ^ (words[index - 2] >> 10) ) UpperCAmelCase = ( words[index - 16] + sa + words[index - 7] + sa ) % 0x1_00_00_00_00 # Compression UpperCAmelCase = self.ror(A ,6 ) ^ self.ror(A ,11 ) ^ self.ror(A ,25 ) UpperCAmelCase = (e & f) ^ ((~e & 0xFF_FF_FF_FF) & g) UpperCAmelCase = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0x1_00_00_00_00 UpperCAmelCase = self.ror(A ,2 ) ^ self.ror(A ,13 ) ^ self.ror(A ,22 ) UpperCAmelCase = (a & b) ^ (a & c) ^ (b & c) UpperCAmelCase = (sa + maj) % 0x1_00_00_00_00 UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = ( g, f, e, ((d + tempa) % 0x1_00_00_00_00), c, b, a, ((tempa + tempa) % 0x1_00_00_00_00), ) UpperCAmelCase = [a, b, c, d, e, f, g, h] # Modify final values UpperCAmelCase = [ ((element + mutated_hash_values[index]) % 0x1_00_00_00_00) for index, element in enumerate(self.hashes ) ] UpperCAmelCase = """""".join([hex(A )[2:].zfill(8 ) for value in self.hashes] ) def _UpperCamelCase ( self ,A ,A ): return 0xFF_FF_FF_FF & (value << (32 - rotations)) | (value >> rotations) class lowerCamelCase__ ( unittest.TestCase ): def _UpperCamelCase ( self ): import hashlib UpperCAmelCase = bytes("""Test String""" ,"""utf-8""" ) self.assertEqual(SHAaaa(A ).hash ,hashlib.shaaaa(A ).hexdigest() ) def _a ( ): """simple docstring""" import doctest doctest.testmod() UpperCAmelCase = argparse.ArgumentParser() parser.add_argument( """-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , ) parser.add_argument( """-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" ) UpperCAmelCase = parser.parse_args() UpperCAmelCase = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , """rb""" ) as f: UpperCAmelCase = f.read() else: UpperCAmelCase = bytes(_snake_case , """utf-8""" ) print(SHAaaa(_snake_case ).hash ) if __name__ == "__main__": main()
74
0
"""simple docstring""" import unittest from transformers import GPTNeoXJapaneseConfig, is_torch_available from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel class lowerCamelCase__ : def __init__( self ,A ,A=13 ,A=7 ,A=True ,A=True ,A=True ,A=True ,A=99 ,A=32 ,A=5 ,A=4 ,A=4 ,A="gelu" ,A=0.0 ,A=0.1 ,A=True ,A=512 ,A=16 ,A=2 ,A=0.02 ,A=3 ,A=4 ,A=None ,): UpperCAmelCase = parent UpperCAmelCase = batch_size UpperCAmelCase = seq_length UpperCAmelCase = is_training UpperCAmelCase = use_input_mask UpperCAmelCase = use_token_type_ids UpperCAmelCase = use_labels UpperCAmelCase = vocab_size UpperCAmelCase = hidden_size UpperCAmelCase = num_hidden_layers UpperCAmelCase = num_attention_heads UpperCAmelCase = intermediate_multiple_size UpperCAmelCase = hidden_act UpperCAmelCase = hidden_dropout UpperCAmelCase = attention_dropout UpperCAmelCase = weight_tying UpperCAmelCase = max_position_embeddings UpperCAmelCase = type_vocab_size UpperCAmelCase = type_sequence_label_size UpperCAmelCase = initializer_range UpperCAmelCase = num_labels UpperCAmelCase = num_choices UpperCAmelCase = scope def _UpperCamelCase ( self ): UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) UpperCAmelCase = None if self.use_input_mask: UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase = None if self.use_labels: UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) UpperCAmelCase = self.get_config() return config, input_ids, input_mask, token_labels def _UpperCamelCase ( self ): return GPTNeoXJapaneseConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_multiple_size=self.intermediate_multiple_size ,hidden_act=self.hidden_act ,hidden_dropout=self.hidden_dropout ,attention_dropout=self.attention_dropout ,weight_tying=self.weight_tying ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=A ,initializer_range=self.initializer_range ,) def _UpperCamelCase ( self ): UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self.prepare_config_and_inputs() UpperCAmelCase = True return config, input_ids, input_mask, token_labels def _UpperCamelCase ( self ,A ,A ,A ): UpperCAmelCase = GPTNeoXJapaneseModel(config=A ) model.to(A ) model.eval() UpperCAmelCase = model(A ,attention_mask=A ) UpperCAmelCase = model(A ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCamelCase ( self ,A ,A ,A ): UpperCAmelCase = True UpperCAmelCase = GPTNeoXJapaneseModel(A ) model.to(A ) model.eval() UpperCAmelCase = model(A ,attention_mask=A ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCamelCase ( self ,A ,A ,A ,A ): UpperCAmelCase = GPTNeoXJapaneseForCausalLM(config=A ) model.to(A ) model.eval() UpperCAmelCase = model(A ,attention_mask=A ,labels=A ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _UpperCamelCase ( self ,A ,A ,A ): UpperCAmelCase = True UpperCAmelCase = GPTNeoXJapaneseForCausalLM(config=A ) model.to(A ) model.eval() # first forward pass UpperCAmelCase = model(A ,attention_mask=A ,use_cache=A ) UpperCAmelCase = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids UpperCAmelCase = ids_tensor((self.batch_size, 3) ,config.vocab_size ) UpperCAmelCase = ids_tensor((self.batch_size, 3) ,vocab_size=2 ) # append to next input_ids and UpperCAmelCase = torch.cat([input_ids, next_tokens] ,dim=-1 ) UpperCAmelCase = torch.cat([input_mask, next_mask] ,dim=-1 ) UpperCAmelCase = model(A ,attention_mask=A ,output_hidden_states=A ) UpperCAmelCase = output_from_no_past["""hidden_states"""][0] UpperCAmelCase = model( A ,attention_mask=A ,past_key_values=A ,output_hidden_states=A ,)["""hidden_states"""][0] # select random slice UpperCAmelCase = ids_tensor((1,) ,output_from_past.shape[-1] ).item() UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCAmelCase = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(A ,A ,atol=1e-3 ) ) def _UpperCamelCase ( self ): UpperCAmelCase = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class lowerCamelCase__ ( snake_case , snake_case , unittest.TestCase ): SCREAMING_SNAKE_CASE = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else () SCREAMING_SNAKE_CASE = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else () SCREAMING_SNAKE_CASE = ( {'''feature-extraction''': GPTNeoXJapaneseModel, '''text-generation''': GPTNeoXJapaneseForCausalLM} if is_torch_available() else {} ) SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False def _UpperCamelCase ( self ): UpperCAmelCase = GPTNeoXJapaneseModelTester(self ) UpperCAmelCase = ConfigTester(self ,config_class=A ,hidden_size=37 ) def _UpperCamelCase ( self ): self.config_tester.run_common_tests() def _UpperCamelCase ( self ): UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(A ,A ,A ) def _UpperCamelCase ( self ): UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(A ,A ,A ) def _UpperCamelCase ( self ): # This regression test was failing with PyTorch < 1.3 UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder() UpperCAmelCase = None self.model_tester.create_and_check_model_as_decoder(A ,A ,A ) def _UpperCamelCase ( self ): UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(A ,A ,A ) def _UpperCamelCase ( self ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*A ) @slow def _UpperCamelCase ( self ): UpperCAmelCase = """abeja/gpt-neox-japanese-2.7b""" UpperCAmelCase = ["""データサイエンティストとは、""", """100年後に必要とされる会社は、""", """フルリモートの環境で働くために必要なことは、""", """国境の長いトンネルを抜けると""", """美味しい日本食といえば、"""] UpperCAmelCase = [ """データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。""", """100年後に必要とされる会社は、「人」が中心の会社です。""", """フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。""", """国境の長いトンネルを抜けると、そこは雪国だった。""", """美味しい日本食といえば、やっぱりお寿司ですよね。""", ] UpperCAmelCase = GPTNeoXJapaneseTokenizer.from_pretrained(A ) UpperCAmelCase = GPTNeoXJapaneseForCausalLM.from_pretrained(A ) UpperCAmelCase = [] for prompt in prompts: UpperCAmelCase = tokenizer(A ,return_tensors="""pt""" ).input_ids UpperCAmelCase = model.generate(A ,max_length=50 ) UpperCAmelCase = tokenizer.batch_decode(A ,skip_special_tokens=A ) predicted_outputs += generated_string self.assertListEqual(A ,A )
717
"""simple docstring""" def _a ( _snake_case = 10 , _snake_case = 22 ): """simple docstring""" UpperCAmelCase = range(1 , _snake_case ) UpperCAmelCase = range(1 , _snake_case ) return sum( 1 for power in powers for base in bases if len(str(base**power ) ) == power ) if __name__ == "__main__": print(F"""{solution(10, 22) = }""")
74
0
"""simple docstring""" from math import isclose, sqrt def _a ( _snake_case , _snake_case , _snake_case ): """simple docstring""" UpperCAmelCase = point_y / 4 / point_x UpperCAmelCase = 2 * normal_gradient / (1 + normal_gradient * normal_gradient) UpperCAmelCase = (1 - normal_gradient * normal_gradient) / ( 1 + normal_gradient * normal_gradient ) UpperCAmelCase = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient) # to find the next point, solve the simultaeneous equations: # y^2 + 4x^2 = 100 # y - b = m * (x - a) # ==> A x^2 + B x + C = 0 UpperCAmelCase = outgoing_gradient**2 + 4 UpperCAmelCase = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x) UpperCAmelCase = (point_y - outgoing_gradient * point_x) ** 2 - 100 UpperCAmelCase = ( -linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) UpperCAmelCase = ( -linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) # two solutions, one of which is our input point UpperCAmelCase = x_minus if isclose(_snake_case , _snake_case ) else x_plus UpperCAmelCase = point_y + outgoing_gradient * (next_x - point_x) return next_x, next_y, outgoing_gradient def _a ( _snake_case = 1.4 , _snake_case = -9.6 ): """simple docstring""" UpperCAmelCase = 0 UpperCAmelCase = first_x_coord UpperCAmelCase = first_y_coord UpperCAmelCase = (10.1 - point_y) / (0.0 - point_x) while not (-0.01 <= point_x <= 0.01 and point_y > 0): UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = next_point(_snake_case , _snake_case , _snake_case ) num_reflections += 1 return num_reflections if __name__ == "__main__": print(F"""{solution() = }""")
718
"""simple docstring""" from __future__ import annotations def _a ( _snake_case ): """simple docstring""" return len(set(_snake_case ) ) == len(_snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
74
0
"""simple docstring""" from math import sqrt def _a ( _snake_case = 100_0000 ): """simple docstring""" UpperCAmelCase = 0 UpperCAmelCase = 0 UpperCAmelCase = 42 while num_cuboids <= limit: max_cuboid_size += 1 for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ): if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer(): num_cuboids += ( min(_snake_case , sum_shortest_sides // 2 ) - max(1 , sum_shortest_sides - max_cuboid_size ) + 1 ) return max_cuboid_size if __name__ == "__main__": print(F"""{solution() = }""")
719
"""simple docstring""" import math def _a ( _snake_case ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_snake_case ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def _a ( _snake_case = 0.1 ): """simple docstring""" UpperCAmelCase = 3 UpperCAmelCase = 3 while primes / (2 * j - 1) >= ratio: for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ): primes += is_prime(_snake_case ) j += 2 return j if __name__ == "__main__": import doctest doctest.testmod()
74
0
"""simple docstring""" import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class lowerCamelCase__ ( snake_case ): SCREAMING_SNAKE_CASE = '''naver-clova-ix/donut-base-finetuned-docvqa''' SCREAMING_SNAKE_CASE = ( '''This is a tool that answers a question about an document (pdf). It takes an input named `document` which ''' '''should be the document containing the information, as well as a `question` that is the question about the ''' '''document. It returns a text that contains the answer to the question.''' ) SCREAMING_SNAKE_CASE = '''document_qa''' SCREAMING_SNAKE_CASE = AutoProcessor SCREAMING_SNAKE_CASE = VisionEncoderDecoderModel SCREAMING_SNAKE_CASE = ['''image''', '''text'''] SCREAMING_SNAKE_CASE = ['''text'''] def __init__( self ,*A ,**A ): if not is_vision_available(): raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" ) super().__init__(*A ,**A ) def _UpperCamelCase ( self ,A ,A ): UpperCAmelCase = """<s_docvqa><s_question>{user_input}</s_question><s_answer>""" UpperCAmelCase = task_prompt.replace("""{user_input}""" ,A ) UpperCAmelCase = self.pre_processor.tokenizer( A ,add_special_tokens=A ,return_tensors="""pt""" ).input_ids UpperCAmelCase = self.pre_processor(A ,return_tensors="""pt""" ).pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def _UpperCamelCase ( self ,A ): return self.model.generate( inputs["""pixel_values"""].to(self.device ) ,decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ) ,max_length=self.model.decoder.config.max_position_embeddings ,early_stopping=A ,pad_token_id=self.pre_processor.tokenizer.pad_token_id ,eos_token_id=self.pre_processor.tokenizer.eos_token_id ,use_cache=A ,num_beams=1 ,bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] ,return_dict_in_generate=A ,).sequences def _UpperCamelCase ( self ,A ): UpperCAmelCase = self.pre_processor.batch_decode(A )[0] UpperCAmelCase = sequence.replace(self.pre_processor.tokenizer.eos_token ,"""""" ) UpperCAmelCase = sequence.replace(self.pre_processor.tokenizer.pad_token ,"""""" ) UpperCAmelCase = re.sub(r"""<.*?>""" ,"""""" ,A ,count=1 ).strip() # remove first task start token UpperCAmelCase = self.pre_processor.tokenajson(A ) return sequence["answer"]
720
"""simple docstring""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCamelCase__ ( snake_case ): SCREAMING_SNAKE_CASE = ['''image_processor''', '''tokenizer'''] SCREAMING_SNAKE_CASE = '''CLIPImageProcessor''' SCREAMING_SNAKE_CASE = ('''CLIPTokenizer''', '''CLIPTokenizerFast''') def __init__( self ,A=None ,A=None ,**A ): UpperCAmelCase = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" ,A ,) UpperCAmelCase = kwargs.pop("""feature_extractor""" ) UpperCAmelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(A ,A ) def __call__( self ,A=None ,A=None ,A=None ,**A ): if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: UpperCAmelCase = self.tokenizer(A ,return_tensors=A ,**A ) if images is not None: UpperCAmelCase = self.image_processor(A ,return_tensors=A ,**A ) if text is not None and images is not None: UpperCAmelCase = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**A ) ,tensor_type=A ) def _UpperCamelCase ( self ,*A ,**A ): return self.tokenizer.batch_decode(*A ,**A ) def _UpperCamelCase ( self ,*A ,**A ): return self.tokenizer.decode(*A ,**A ) @property def _UpperCamelCase ( self ): UpperCAmelCase = self.tokenizer.model_input_names UpperCAmelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def _UpperCamelCase ( self ): warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,A ,) return self.image_processor_class @property def _UpperCamelCase ( self ): warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,A ,) return self.image_processor
74
0
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy as np import tensorflow as tf from transformers import ( TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST, FlaubertConfig, TFFlaubertForMultipleChoice, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForSequenceClassification, TFFlaubertForTokenClassification, TFFlaubertModel, TFFlaubertWithLMHeadModel, ) class lowerCamelCase__ : def __init__( self ,A ,): UpperCAmelCase = parent UpperCAmelCase = 13 UpperCAmelCase = 7 UpperCAmelCase = True UpperCAmelCase = True UpperCAmelCase = True UpperCAmelCase = True UpperCAmelCase = True UpperCAmelCase = False UpperCAmelCase = False UpperCAmelCase = False UpperCAmelCase = 2 UpperCAmelCase = 99 UpperCAmelCase = 0 UpperCAmelCase = 32 UpperCAmelCase = 2 UpperCAmelCase = 4 UpperCAmelCase = 0.1 UpperCAmelCase = 0.1 UpperCAmelCase = 512 UpperCAmelCase = 16 UpperCAmelCase = 2 UpperCAmelCase = 0.02 UpperCAmelCase = 3 UpperCAmelCase = 4 UpperCAmelCase = """last""" UpperCAmelCase = True UpperCAmelCase = None UpperCAmelCase = 0 def _UpperCamelCase ( self ): UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ,dtype=tf.floataa ) UpperCAmelCase = None if self.use_input_lengths: UpperCAmelCase = ( ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length UpperCAmelCase = None if self.use_token_type_ids: UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs ) UpperCAmelCase = None UpperCAmelCase = None UpperCAmelCase = None if self.use_labels: UpperCAmelCase = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) UpperCAmelCase = ids_tensor([self.batch_size] ,2 ,dtype=tf.floataa ) UpperCAmelCase = ids_tensor([self.batch_size] ,self.num_choices ) UpperCAmelCase = FlaubertConfig( vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,bos_token_id=self.bos_token_id ,) return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,): UpperCAmelCase = TFFlaubertModel(config=A ) UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids} UpperCAmelCase = model(A ) UpperCAmelCase = [input_ids, input_mask] UpperCAmelCase = model(A ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,): UpperCAmelCase = TFFlaubertWithLMHeadModel(A ) UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids} UpperCAmelCase = model(A ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,): UpperCAmelCase = TFFlaubertForQuestionAnsweringSimple(A ) UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths} UpperCAmelCase = model(A ) self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,): UpperCAmelCase = TFFlaubertForSequenceClassification(A ) UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths} UpperCAmelCase = model(A ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,): UpperCAmelCase = self.num_labels UpperCAmelCase = TFFlaubertForTokenClassification(config=A ) UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} UpperCAmelCase = model(A ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,): UpperCAmelCase = self.num_choices UpperCAmelCase = TFFlaubertForMultipleChoice(config=A ) UpperCAmelCase = tf.tile(tf.expand_dims(A ,1 ) ,(1, self.num_choices, 1) ) UpperCAmelCase = tf.tile(tf.expand_dims(A ,1 ) ,(1, self.num_choices, 1) ) UpperCAmelCase = tf.tile(tf.expand_dims(A ,1 ) ,(1, self.num_choices, 1) ) UpperCAmelCase = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, """token_type_ids""": multiple_choice_token_type_ids, } UpperCAmelCase = model(A ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) ) def _UpperCamelCase ( self ): UpperCAmelCase = self.prepare_config_and_inputs() ( ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ) = config_and_inputs UpperCAmelCase = { """input_ids""": input_ids, """token_type_ids""": token_type_ids, """langs""": token_type_ids, """lengths""": input_lengths, } return config, inputs_dict @require_tf class lowerCamelCase__ ( snake_case , snake_case , unittest.TestCase ): SCREAMING_SNAKE_CASE = ( ( TFFlaubertModel, TFFlaubertWithLMHeadModel, TFFlaubertForSequenceClassification, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForTokenClassification, TFFlaubertForMultipleChoice, ) if is_tf_available() else () ) SCREAMING_SNAKE_CASE = ( (TFFlaubertWithLMHeadModel,) if is_tf_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable SCREAMING_SNAKE_CASE = ( { '''feature-extraction''': TFFlaubertModel, '''fill-mask''': TFFlaubertWithLMHeadModel, '''question-answering''': TFFlaubertForQuestionAnsweringSimple, '''text-classification''': TFFlaubertForSequenceClassification, '''token-classification''': TFFlaubertForTokenClassification, '''zero-shot''': TFFlaubertForSequenceClassification, } if is_tf_available() else {} ) SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False def _UpperCamelCase ( self ,A ,A ,A ,A ,A ): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("""Fast""" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def _UpperCamelCase ( self ): UpperCAmelCase = TFFlaubertModelTester(self ) UpperCAmelCase = ConfigTester(self ,config_class=A ,emb_dim=37 ) def _UpperCamelCase ( self ): self.config_tester.run_common_tests() def _UpperCamelCase ( self ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*A ) def _UpperCamelCase ( self ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*A ) def _UpperCamelCase ( self ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*A ) def _UpperCamelCase ( self ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*A ) def _UpperCamelCase ( self ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_token_classification(*A ) def _UpperCamelCase ( self ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_multiple_choice(*A ) @slow def _UpperCamelCase ( self ): for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase = TFFlaubertModel.from_pretrained(A ) self.assertIsNotNone(A ) @require_tf @require_sentencepiece @require_tokenizers class lowerCamelCase__ ( unittest.TestCase ): @slow def _UpperCamelCase ( self ): UpperCAmelCase = TFFlaubertModel.from_pretrained("""jplu/tf-flaubert-small-cased""" ) UpperCAmelCase = tf.convert_to_tensor( [[0, 158, 735, 2_592, 1_424, 6_727, 82, 1]] ,dtype=tf.intaa ,) # "J'aime flaubert !" UpperCAmelCase = model(A )[0] UpperCAmelCase = tf.TensorShape((1, 8, 512) ) self.assertEqual(output.shape ,A ) # compare the actual values for a slice. UpperCAmelCase = tf.convert_to_tensor( [ [ [-1.8768773, -1.566555, 0.27072418], [-1.6920038, -0.5873505, 1.9329599], [-2.9563985, -1.6993835, 1.7972052], ] ] ,dtype=tf.floataa ,) self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
721
"""simple docstring""" from __future__ import annotations from collections.abc import Generator import requests from bsa import BeautifulSoup _UpperCamelCase = """https://www.indeed.co.in/jobs?q=mobile+app+development&l=""" def _a ( _snake_case = "mumbai" ): """simple docstring""" UpperCAmelCase = BeautifulSoup(requests.get(url + location ).content , """html.parser""" ) # This attribute finds out all the specifics listed in a job for job in soup.find_all("""div""" , attrs={"""data-tn-component""": """organicJob"""} ): UpperCAmelCase = job.find("""a""" , attrs={"""data-tn-element""": """jobTitle"""} ).text.strip() UpperCAmelCase = job.find("""span""" , {"""class""": """company"""} ).text.strip() yield job_title, company_name if __name__ == "__main__": for i, job in enumerate(fetch_jobs("""Bangalore"""), 1): print(F"""Job {i:>2} is {job[0]} at {job[1]}""")
74
0
from __future__ import annotations def _a ( _snake_case ): """simple docstring""" UpperCAmelCase = [True] * limit UpperCAmelCase = False UpperCAmelCase = False UpperCAmelCase = True for i in range(3 , int(limit**0.5 + 1 ) , 2 ): UpperCAmelCase = i * 2 while index < limit: UpperCAmelCase = False UpperCAmelCase = index + i UpperCAmelCase = [2] for i in range(3 , _snake_case , 2 ): if is_prime[i]: primes.append(_snake_case ) return primes def _a ( _snake_case = 100_0000 ): """simple docstring""" UpperCAmelCase = prime_sieve(_snake_case ) UpperCAmelCase = 0 UpperCAmelCase = 0 for i in range(len(_snake_case ) ): for j in range(i + length , len(_snake_case ) ): UpperCAmelCase = sum(primes[i:j] ) if sol >= ceiling: break if sol in primes: UpperCAmelCase = j - i UpperCAmelCase = sol return largest if __name__ == "__main__": print(F"""{solution() = }""")
700
"""simple docstring""" import unittest from transformers.utils.backbone_utils import ( BackboneMixin, get_aligned_output_features_output_indices, verify_out_features_out_indices, ) class lowerCamelCase__ ( unittest.TestCase ): def _UpperCamelCase ( self ): UpperCAmelCase = ["""a""", """b""", """c"""] # Defaults to last layer if both are None UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(A ,A ,A ) self.assertEqual(A ,["""c"""] ) self.assertEqual(A ,[2] ) # Out indices set to match out features UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(["""a""", """c"""] ,A ,A ) self.assertEqual(A ,["""a""", """c"""] ) self.assertEqual(A ,[0, 2] ) # Out features set to match out indices UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(A ,[0, 2] ,A ) self.assertEqual(A ,["""a""", """c"""] ) self.assertEqual(A ,[0, 2] ) # Out features selected from negative indices UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(A ,[-3, -1] ,A ) self.assertEqual(A ,["""a""", """c"""] ) self.assertEqual(A ,[-3, -1] ) def _UpperCamelCase ( self ): # Stage names must be set with self.assertRaises(A ): verify_out_features_out_indices(["""a""", """b"""] ,(0, 1) ,A ) # Out features must be a list with self.assertRaises(A ): verify_out_features_out_indices(("""a""", """b""") ,(0, 1) ,["""a""", """b"""] ) # Out features must be a subset of stage names with self.assertRaises(A ): verify_out_features_out_indices(["""a""", """b"""] ,(0, 1) ,["""a"""] ) # Out indices must be a list or tuple with self.assertRaises(A ): verify_out_features_out_indices(A ,0 ,["""a""", """b"""] ) # Out indices must be a subset of stage names with self.assertRaises(A ): verify_out_features_out_indices(A ,(0, 1) ,["""a"""] ) # Out features and out indices must be the same length with self.assertRaises(A ): verify_out_features_out_indices(["""a""", """b"""] ,(0,) ,["""a""", """b""", """c"""] ) # Out features should match out indices with self.assertRaises(A ): verify_out_features_out_indices(["""a""", """b"""] ,(0, 2) ,["""a""", """b""", """c"""] ) # Out features and out indices should be in order with self.assertRaises(A ): verify_out_features_out_indices(["""b""", """a"""] ,(0, 1) ,["""a""", """b"""] ) # Check passes with valid inputs verify_out_features_out_indices(["""a""", """b""", """d"""] ,(0, 1, -1) ,["""a""", """b""", """c""", """d"""] ) def _UpperCamelCase ( self ): UpperCAmelCase = BackboneMixin() UpperCAmelCase = ["""a""", """b""", """c"""] UpperCAmelCase = ["""a""", """c"""] UpperCAmelCase = [0, 2] # Check that the output features and indices are set correctly self.assertEqual(backbone.out_features ,["""a""", """c"""] ) self.assertEqual(backbone.out_indices ,[0, 2] ) # Check out features and indices are updated correctly UpperCAmelCase = ["""a""", """b"""] self.assertEqual(backbone.out_features ,["""a""", """b"""] ) self.assertEqual(backbone.out_indices ,[0, 1] ) UpperCAmelCase = [-3, -1] self.assertEqual(backbone.out_features ,["""a""", """c"""] ) self.assertEqual(backbone.out_indices ,[-3, -1] )
74
0
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL _UpperCamelCase = logging.get_logger(__name__) class lowerCamelCase__ ( snake_case ): SCREAMING_SNAKE_CASE = ['''pixel_values'''] def __init__( self ,A = True ,A = None ,A = None ,A = PILImageResampling.BILINEAR ,A = True ,A = 1 / 255 ,A = True ,A = None ,A = None ,**A ,): super().__init__(**A ) UpperCAmelCase = size if size is not None else {"""shortest_edge""": 384} UpperCAmelCase = get_size_dict(A ,default_to_square=A ) UpperCAmelCase = do_resize UpperCAmelCase = size # Default value set here for backwards compatibility where the value in config is None UpperCAmelCase = crop_pct if crop_pct is not None else 224 / 256 UpperCAmelCase = resample UpperCAmelCase = do_rescale UpperCAmelCase = rescale_factor UpperCAmelCase = do_normalize UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD def _UpperCamelCase ( self ,A ,A ,A ,A = PILImageResampling.BICUBIC ,A = None ,**A ,): UpperCAmelCase = get_size_dict(A ,default_to_square=A ) if "shortest_edge" not in size: raise ValueError(F'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' ) UpperCAmelCase = size["""shortest_edge"""] if shortest_edge < 384: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct UpperCAmelCase = int(shortest_edge / crop_pct ) UpperCAmelCase = get_resize_output_image_size(A ,size=A ,default_to_square=A ) UpperCAmelCase = resize(image=A ,size=A ,resample=A ,data_format=A ,**A ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=A ,size=(shortest_edge, shortest_edge) ,data_format=A ,**A ) else: # warping (no cropping) when evaluated at 384 or larger return resize( A ,size=(shortest_edge, shortest_edge) ,resample=A ,data_format=A ,**A ) def _UpperCamelCase ( self ,A ,A ,A = None ,**A ,): return rescale(A ,scale=A ,data_format=A ,**A ) def _UpperCamelCase ( self ,A ,A ,A ,A = None ,**A ,): return normalize(A ,mean=A ,std=A ,data_format=A ,**A ) def _UpperCamelCase ( self ,A ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = ChannelDimension.FIRST ,**A ,): UpperCAmelCase = do_resize if do_resize is not None else self.do_resize UpperCAmelCase = crop_pct if crop_pct is not None else self.crop_pct UpperCAmelCase = resample if resample is not None else self.resample UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase = image_mean if image_mean is not None else self.image_mean UpperCAmelCase = image_std if image_std is not None else self.image_std UpperCAmelCase = size if size is not None else self.size UpperCAmelCase = get_size_dict(A ,default_to_square=A ) UpperCAmelCase = make_list_of_images(A ) if not valid_images(A ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_resize and size["shortest_edge"] < 384 and crop_pct is None: raise ValueError("""crop_pct must be specified if size < 384.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. UpperCAmelCase = [to_numpy_array(A ) for image in images] if do_resize: UpperCAmelCase = [self.resize(image=A ,size=A ,crop_pct=A ,resample=A ) for image in images] if do_rescale: UpperCAmelCase = [self.rescale(image=A ,scale=A ) for image in images] if do_normalize: UpperCAmelCase = [self.normalize(image=A ,mean=A ,std=A ) for image in images] UpperCAmelCase = [to_channel_dimension_format(A ,A ) for image in images] UpperCAmelCase = {"""pixel_values""": images} return BatchFeature(data=A ,tensor_type=A )
701
"""simple docstring""" from __future__ import annotations from typing import Any class lowerCamelCase__ : def __init__( self ,A = 6 ): UpperCAmelCase = None UpperCAmelCase = None self.create_linked_list(A ) def _UpperCamelCase ( self ,A ): UpperCAmelCase = Node() UpperCAmelCase = current_node UpperCAmelCase = current_node UpperCAmelCase = current_node for _ in range(1 ,A ): UpperCAmelCase = Node() UpperCAmelCase = current_node UpperCAmelCase = previous_node UpperCAmelCase = current_node UpperCAmelCase = self.front UpperCAmelCase = previous_node def _UpperCamelCase ( self ): return ( self.front == self.rear and self.front is not None and self.front.data is None ) def _UpperCamelCase ( self ): self.check_can_perform_operation() return self.front.data if self.front else None def _UpperCamelCase ( self ,A ): if self.rear is None: return self.check_is_full() if not self.is_empty(): UpperCAmelCase = self.rear.next if self.rear: UpperCAmelCase = data def _UpperCamelCase ( self ): self.check_can_perform_operation() if self.rear is None or self.front is None: return None if self.front == self.rear: UpperCAmelCase = self.front.data UpperCAmelCase = None return data UpperCAmelCase = self.front UpperCAmelCase = old_front.next UpperCAmelCase = old_front.data UpperCAmelCase = None return data def _UpperCamelCase ( self ): if self.is_empty(): raise Exception("""Empty Queue""" ) def _UpperCamelCase ( self ): if self.rear and self.rear.next == self.front: raise Exception("""Full Queue""" ) class lowerCamelCase__ : def __init__( self ): UpperCAmelCase = None UpperCAmelCase = None UpperCAmelCase = None if __name__ == "__main__": import doctest doctest.testmod()
74
0
"""simple docstring""" import unittest from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class lowerCamelCase__ : @staticmethod def _UpperCamelCase ( *A ,**A ): pass @is_pipeline_test @require_torch @require_vision class lowerCamelCase__ ( unittest.TestCase ): SCREAMING_SNAKE_CASE = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING def _UpperCamelCase ( self ,A ,A ,A ): UpperCAmelCase = pipeline("""visual-question-answering""" ,model="""hf-internal-testing/tiny-vilt-random-vqa""" ) UpperCAmelCase = [ { """image""": Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ), """question""": """How many cats are there?""", }, { """image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""", """question""": """How many cats are there?""", }, ] return vqa_pipeline, examples def _UpperCamelCase ( self ,A ,A ): UpperCAmelCase = vqa_pipeline(A ,top_k=1 ) self.assertEqual( A ,[ [{"""score""": ANY(A ), """answer""": ANY(A )}], [{"""score""": ANY(A ), """answer""": ANY(A )}], ] ,) @require_torch def _UpperCamelCase ( self ): UpperCAmelCase = pipeline("""visual-question-answering""" ,model="""hf-internal-testing/tiny-vilt-random-vqa""" ) UpperCAmelCase = """./tests/fixtures/tests_samples/COCO/000000039769.png""" UpperCAmelCase = """How many cats are there?""" UpperCAmelCase = vqa_pipeline(image=A ,question="""How many cats are there?""" ,top_k=2 ) self.assertEqual( A ,[{"""score""": ANY(A ), """answer""": ANY(A )}, {"""score""": ANY(A ), """answer""": ANY(A )}] ) UpperCAmelCase = vqa_pipeline({"""image""": image, """question""": question} ,top_k=2 ) self.assertEqual( A ,[{"""score""": ANY(A ), """answer""": ANY(A )}, {"""score""": ANY(A ), """answer""": ANY(A )}] ) @slow @require_torch def _UpperCamelCase ( self ): UpperCAmelCase = pipeline("""visual-question-answering""" ,model="""dandelin/vilt-b32-finetuned-vqa""" ) UpperCAmelCase = """./tests/fixtures/tests_samples/COCO/000000039769.png""" UpperCAmelCase = """How many cats are there?""" UpperCAmelCase = vqa_pipeline(image=A ,question=A ,top_k=2 ) self.assertEqual( nested_simplify(A ,decimals=4 ) ,[{"""score""": 0.8799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}] ) UpperCAmelCase = vqa_pipeline({"""image""": image, """question""": question} ,top_k=2 ) self.assertEqual( nested_simplify(A ,decimals=4 ) ,[{"""score""": 0.8799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}] ) UpperCAmelCase = vqa_pipeline( [{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] ,top_k=2 ) self.assertEqual( nested_simplify(A ,decimals=4 ) ,[[{"""score""": 0.8799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}]] * 2 ,) @require_tf @unittest.skip("""Visual question answering not implemented in TF""" ) def _UpperCamelCase ( self ): pass
702
"""simple docstring""" import argparse import numpy as np import torch from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging logging.set_verbosity_info() _UpperCamelCase = logging.get_logger("""transformers.models.speecht5""") def _a ( _snake_case , _snake_case , _snake_case ): """simple docstring""" hf_model.apply_weight_norm() UpperCAmelCase = checkpoint["""input_conv.weight_g"""] UpperCAmelCase = checkpoint["""input_conv.weight_v"""] UpperCAmelCase = checkpoint["""input_conv.bias"""] for i in range(len(config.upsample_rates ) ): UpperCAmelCase = checkpoint[F'''upsamples.{i}.1.weight_g'''] UpperCAmelCase = checkpoint[F'''upsamples.{i}.1.weight_v'''] UpperCAmelCase = checkpoint[F'''upsamples.{i}.1.bias'''] for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ): for j in range(len(config.resblock_dilation_sizes ) ): UpperCAmelCase = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g'''] UpperCAmelCase = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v'''] UpperCAmelCase = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias'''] UpperCAmelCase = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g'''] UpperCAmelCase = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v'''] UpperCAmelCase = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias'''] UpperCAmelCase = checkpoint["""output_conv.1.weight_g"""] UpperCAmelCase = checkpoint["""output_conv.1.weight_v"""] UpperCAmelCase = checkpoint["""output_conv.1.bias"""] hf_model.remove_weight_norm() @torch.no_grad() def _a ( _snake_case , _snake_case , _snake_case , _snake_case=None , _snake_case=None , ): """simple docstring""" if config_path is not None: UpperCAmelCase = SpeechTaHifiGanConfig.from_pretrained(_snake_case ) else: UpperCAmelCase = SpeechTaHifiGanConfig() UpperCAmelCase = SpeechTaHifiGan(_snake_case ) UpperCAmelCase = torch.load(_snake_case ) load_weights(orig_checkpoint["""model"""]["""generator"""] , _snake_case , _snake_case ) UpperCAmelCase = np.load(_snake_case ) UpperCAmelCase = stats[0].reshape(-1 ) UpperCAmelCase = stats[1].reshape(-1 ) UpperCAmelCase = torch.from_numpy(_snake_case ).float() UpperCAmelCase = torch.from_numpy(_snake_case ).float() model.save_pretrained(_snake_case ) if repo_id: print("""Pushing to the hub...""" ) model.push_to_hub(_snake_case ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""") parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub.""" ) _UpperCamelCase = parser.parse_args() convert_hifigan_checkpoint( args.checkpoint_path, args.stats_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
74
0
"""simple docstring""" import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def _a ( _snake_case , _snake_case=0.999 , _snake_case="cosine" , ): """simple docstring""" if alpha_transform_type == "cosine": def alpha_bar_fn(_snake_case ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(_snake_case ): return math.exp(t * -12.0 ) else: raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) UpperCAmelCase = [] for i in range(_snake_case ): UpperCAmelCase = i / num_diffusion_timesteps UpperCAmelCase = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(_snake_case ) / alpha_bar_fn(_snake_case ) , _snake_case ) ) return torch.tensor(_snake_case , dtype=torch.floataa ) class lowerCamelCase__ ( snake_case , snake_case ): SCREAMING_SNAKE_CASE = [e.name for e in KarrasDiffusionSchedulers] SCREAMING_SNAKE_CASE = 2 @register_to_config def __init__( self ,A = 1_000 ,A = 0.00085 ,A = 0.012 ,A = "linear" ,A = None ,A = "epsilon" ,A = "linspace" ,A = 0 ,): if trained_betas is not None: UpperCAmelCase = torch.tensor(A ,dtype=torch.floataa ) elif beta_schedule == "linear": UpperCAmelCase = torch.linspace(A ,A ,A ,dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. UpperCAmelCase = ( torch.linspace(beta_start**0.5 ,beta_end**0.5 ,A ,dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule UpperCAmelCase = betas_for_alpha_bar(A ) else: raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' ) UpperCAmelCase = 1.0 - self.betas UpperCAmelCase = torch.cumprod(self.alphas ,dim=0 ) # set all values self.set_timesteps(A ,A ,A ) def _UpperCamelCase ( self ,A ,A=None ): if schedule_timesteps is None: UpperCAmelCase = self.timesteps UpperCAmelCase = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: UpperCAmelCase = 1 if len(A ) > 1 else 0 else: UpperCAmelCase = timestep.cpu().item() if torch.is_tensor(A ) else timestep UpperCAmelCase = self._index_counter[timestep_int] return indices[pos].item() @property def _UpperCamelCase ( self ): # standard deviation of the initial noise distribution if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def _UpperCamelCase ( self ,A ,A ,): UpperCAmelCase = self.index_for_timestep(A ) if self.state_in_first_order: UpperCAmelCase = self.sigmas[step_index] else: UpperCAmelCase = self.sigmas_interpol[step_index] UpperCAmelCase = sample / ((sigma**2 + 1) ** 0.5) return sample def _UpperCamelCase ( self ,A ,A = None ,A = None ,): UpperCAmelCase = num_inference_steps UpperCAmelCase = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": UpperCAmelCase = np.linspace(0 ,num_train_timesteps - 1 ,A ,dtype=A )[::-1].copy() elif self.config.timestep_spacing == "leading": UpperCAmelCase = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 UpperCAmelCase = (np.arange(0 ,A ) * step_ratio).round()[::-1].copy().astype(A ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": UpperCAmelCase = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 UpperCAmelCase = (np.arange(A ,0 ,-step_ratio )).round().copy().astype(A ) timesteps -= 1 else: raise ValueError( F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' ) UpperCAmelCase = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) UpperCAmelCase = torch.from_numpy(np.log(A ) ).to(A ) UpperCAmelCase = np.interp(A ,np.arange(0 ,len(A ) ) ,A ) UpperCAmelCase = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) UpperCAmelCase = torch.from_numpy(A ).to(device=A ) # interpolate sigmas UpperCAmelCase = sigmas.log().lerp(sigmas.roll(1 ).log() ,0.5 ).exp() UpperCAmelCase = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] ) UpperCAmelCase = torch.cat( [sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] ) if str(A ).startswith("""mps""" ): # mps does not support float64 UpperCAmelCase = torch.from_numpy(A ).to(A ,dtype=torch.floataa ) else: UpperCAmelCase = torch.from_numpy(A ).to(A ) # interpolate timesteps UpperCAmelCase = self.sigma_to_t(A ).to(A ,dtype=timesteps.dtype ) UpperCAmelCase = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) ,dim=-1 ).flatten() UpperCAmelCase = torch.cat([timesteps[:1], interleaved_timesteps] ) UpperCAmelCase = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter UpperCAmelCase = defaultdict(A ) def _UpperCamelCase ( self ,A ): # get log sigma UpperCAmelCase = sigma.log() # get distribution UpperCAmelCase = log_sigma - self.log_sigmas[:, None] # get sigmas range UpperCAmelCase = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 ) UpperCAmelCase = low_idx + 1 UpperCAmelCase = self.log_sigmas[low_idx] UpperCAmelCase = self.log_sigmas[high_idx] # interpolate sigmas UpperCAmelCase = (low - log_sigma) / (low - high) UpperCAmelCase = w.clamp(0 ,1 ) # transform interpolation to time range UpperCAmelCase = (1 - w) * low_idx + w * high_idx UpperCAmelCase = t.view(sigma.shape ) return t @property def _UpperCamelCase ( self ): return self.sample is None def _UpperCamelCase ( self ,A ,A ,A ,A = True ,): UpperCAmelCase = self.index_for_timestep(A ) # advance index counter by 1 UpperCAmelCase = timestep.cpu().item() if torch.is_tensor(A ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: UpperCAmelCase = self.sigmas[step_index] UpperCAmelCase = self.sigmas_interpol[step_index + 1] UpperCAmelCase = self.sigmas[step_index + 1] else: # 2nd order / KDPM2's method UpperCAmelCase = self.sigmas[step_index - 1] UpperCAmelCase = self.sigmas_interpol[step_index] UpperCAmelCase = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API UpperCAmelCase = 0 UpperCAmelCase = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": UpperCAmelCase = sigma_hat if self.state_in_first_order else sigma_interpol UpperCAmelCase = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": UpperCAmelCase = sigma_hat if self.state_in_first_order else sigma_interpol UpperCAmelCase = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": raise NotImplementedError("""prediction_type not implemented yet: sample""" ) else: raise ValueError( F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order UpperCAmelCase = (sample - pred_original_sample) / sigma_hat # 3. delta timestep UpperCAmelCase = sigma_interpol - sigma_hat # store for 2nd order step UpperCAmelCase = sample else: # DPM-Solver-2 # 2. Convert to an ODE derivative for 2nd order UpperCAmelCase = (sample - pred_original_sample) / sigma_interpol # 3. delta timestep UpperCAmelCase = sigma_next - sigma_hat UpperCAmelCase = self.sample UpperCAmelCase = None UpperCAmelCase = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=A ) def _UpperCamelCase ( self ,A ,A ,A ,): # Make sure sigmas and timesteps have the same device and dtype as original_samples UpperCAmelCase = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(A ): # mps does not support float64 UpperCAmelCase = self.timesteps.to(original_samples.device ,dtype=torch.floataa ) UpperCAmelCase = timesteps.to(original_samples.device ,dtype=torch.floataa ) else: UpperCAmelCase = self.timesteps.to(original_samples.device ) UpperCAmelCase = timesteps.to(original_samples.device ) UpperCAmelCase = [self.index_for_timestep(A ,A ) for t in timesteps] UpperCAmelCase = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): UpperCAmelCase = sigma.unsqueeze(-1 ) UpperCAmelCase = original_samples + noise * sigma return noisy_samples def __len__( self ): return self.config.num_train_timesteps
703
"""simple docstring""" # Lint as: python3 # pylint: enable=line-too-long # pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position _UpperCamelCase = """2.13.1""" import platform import pyarrow from packaging import version if version.parse(platform.python_version()) < version.parse("""3.7"""): raise ImportWarning( """To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition.""" ) if version.parse(pyarrow.__version__).major < 8: raise ImportWarning( """To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n""" """If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.""" ) del platform del pyarrow del version from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip _UpperCamelCase = concatenate_datasets _UpperCamelCase = DownloadConfig _UpperCamelCase = DownloadManager _UpperCamelCase = DownloadMode _UpperCamelCase = DownloadConfig _UpperCamelCase = DownloadMode _UpperCamelCase = DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
74
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = { """vinvino02/glpn-kitti""": """https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json""", # See all GLPN models at https://huggingface.co/models?filter=glpn } class lowerCamelCase__ ( snake_case ): SCREAMING_SNAKE_CASE = '''glpn''' def __init__( self ,A=3 ,A=4 ,A=[2, 2, 2, 2] ,A=[8, 4, 2, 1] ,A=[32, 64, 160, 256] ,A=[7, 3, 3, 3] ,A=[4, 2, 2, 2] ,A=[1, 2, 5, 8] ,A=[4, 4, 4, 4] ,A="gelu" ,A=0.0 ,A=0.0 ,A=0.02 ,A=0.1 ,A=1e-6 ,A=64 ,A=10 ,A=-1 ,**A ,): super().__init__(**A ) UpperCAmelCase = num_channels UpperCAmelCase = num_encoder_blocks UpperCAmelCase = depths UpperCAmelCase = sr_ratios UpperCAmelCase = hidden_sizes UpperCAmelCase = patch_sizes UpperCAmelCase = strides UpperCAmelCase = mlp_ratios UpperCAmelCase = num_attention_heads UpperCAmelCase = hidden_act UpperCAmelCase = hidden_dropout_prob UpperCAmelCase = attention_probs_dropout_prob UpperCAmelCase = initializer_range UpperCAmelCase = drop_path_rate UpperCAmelCase = layer_norm_eps UpperCAmelCase = decoder_hidden_size UpperCAmelCase = max_depth UpperCAmelCase = head_in_index
704
"""simple docstring""" def _a ( _snake_case ): """simple docstring""" if not isinstance(_snake_case , _snake_case ): raise ValueError("""Input must be an integer""" ) if input_num <= 0: raise ValueError("""Input must be positive""" ) return sum( divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 ) if __name__ == "__main__": import doctest doctest.testmod()
74
0
import argparse import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTokenizer, CLIPTokenizerFast, VideoMAEImageProcessor, XCLIPConfig, XCLIPModel, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) def _a ( _snake_case , _snake_case ): """simple docstring""" UpperCAmelCase = XCLIPTextConfig() # derive patch size from model name UpperCAmelCase = model_name.find("""patch""" ) UpperCAmelCase = int(model_name[start_idx + len("""patch""" ) : start_idx + len("""patch""" ) + 2] ) UpperCAmelCase = XCLIPVisionConfig(patch_size=_snake_case , num_frames=_snake_case ) if "large" in model_name: UpperCAmelCase = 768 UpperCAmelCase = 3072 UpperCAmelCase = 12 UpperCAmelCase = 1024 UpperCAmelCase = 4096 UpperCAmelCase = 16 UpperCAmelCase = 24 UpperCAmelCase = 768 UpperCAmelCase = 3072 if model_name == "xclip-large-patch14-16-frames": UpperCAmelCase = 336 UpperCAmelCase = XCLIPConfig.from_text_vision_configs(_snake_case , _snake_case ) if "large" in model_name: UpperCAmelCase = 768 return config def _a ( _snake_case ): """simple docstring""" if name == "token_embedding.weight": UpperCAmelCase = name.replace("""token_embedding.weight""" , """text_model.embeddings.token_embedding.weight""" ) if name == "positional_embedding": UpperCAmelCase = name.replace("""positional_embedding""" , """text_model.embeddings.position_embedding.weight""" ) if "ln_1" in name: UpperCAmelCase = name.replace("""ln_1""" , """layer_norm1""" ) if "ln_2" in name: UpperCAmelCase = name.replace("""ln_2""" , """layer_norm2""" ) if "c_fc" in name: UpperCAmelCase = name.replace("""c_fc""" , """fc1""" ) if "c_proj" in name: UpperCAmelCase = name.replace("""c_proj""" , """fc2""" ) if name.startswith("""transformer.resblocks""" ): UpperCAmelCase = name.replace("""transformer.resblocks""" , """text_model.encoder.layers""" ) if "attn.out_proj" in name and "message" not in name: UpperCAmelCase = name.replace("""attn.out_proj""" , """self_attn.out_proj""" ) if "ln_final" in name: UpperCAmelCase = name.replace("""ln_final""" , """text_model.final_layer_norm""" ) # visual encoder if name == "visual.class_embedding": UpperCAmelCase = name.replace("""visual.class_embedding""" , """vision_model.embeddings.class_embedding""" ) if name == "visual.positional_embedding": UpperCAmelCase = name.replace("""visual.positional_embedding""" , """vision_model.embeddings.position_embedding.weight""" ) if name.startswith("""visual.transformer.resblocks""" ): UpperCAmelCase = name.replace("""visual.transformer.resblocks""" , """vision_model.encoder.layers""" ) if "visual.conv1" in name: UpperCAmelCase = name.replace("""visual.conv1""" , """vision_model.embeddings.patch_embedding""" ) if "visual.ln_pre" in name: UpperCAmelCase = name.replace("""visual.ln_pre""" , """vision_model.pre_layernorm""" ) if "visual.ln_post" in name: UpperCAmelCase = name.replace("""visual.ln_post""" , """vision_model.post_layernorm""" ) if "visual.proj" in name: UpperCAmelCase = name.replace("""visual.proj""" , """visual_projection.weight""" ) if "text_projection" in name: UpperCAmelCase = name.replace("""text_projection""" , """text_projection.weight""" ) # things on top if "prompts_visual_proj" in name: UpperCAmelCase = name.replace("""prompts_visual_proj""" , """prompts_visual_projection""" ) if "prompts_visual_ln" in name: UpperCAmelCase = name.replace("""prompts_visual_ln""" , """prompts_visual_layernorm""" ) # mit if name == "mit.positional_embedding": UpperCAmelCase = name.replace("""positional""" , """position""" ) if name.startswith("""mit.resblocks""" ): UpperCAmelCase = name.replace("""mit.resblocks""" , """mit.encoder.layers""" ) # prompts generator if name.startswith("""prompts_generator.norm""" ): UpperCAmelCase = name.replace("""prompts_generator.norm""" , """prompts_generator.layernorm""" ) return name def _a ( _snake_case , _snake_case ): """simple docstring""" for key in orig_state_dict.copy().keys(): UpperCAmelCase = orig_state_dict.pop(_snake_case ) if "attn.in_proj" in key: UpperCAmelCase = key.split(""".""" ) if key.startswith("""visual""" ): UpperCAmelCase = key_split[3] UpperCAmelCase = config.vision_config.hidden_size if "message_attn" in key: if "weight" in key: UpperCAmelCase = val[ :dim, : ] UpperCAmelCase = val[ dim : dim * 2, : ] UpperCAmelCase = val[ -dim:, : ] else: UpperCAmelCase = val[ :dim ] UpperCAmelCase = val[ dim : dim * 2 ] UpperCAmelCase = val[ -dim: ] else: if "weight" in key: UpperCAmelCase = val[ :dim, : ] UpperCAmelCase = val[ dim : dim * 2, : ] UpperCAmelCase = val[ -dim:, : ] else: UpperCAmelCase = val[:dim] UpperCAmelCase = val[ dim : dim * 2 ] UpperCAmelCase = val[-dim:] elif key.startswith("""mit""" ): UpperCAmelCase = key_split[2] UpperCAmelCase = config.vision_config.mit_hidden_size if "weight" in key: UpperCAmelCase = val[:dim, :] UpperCAmelCase = val[dim : dim * 2, :] UpperCAmelCase = val[-dim:, :] else: UpperCAmelCase = val[:dim] UpperCAmelCase = val[dim : dim * 2] UpperCAmelCase = val[-dim:] else: UpperCAmelCase = key_split[2] UpperCAmelCase = config.text_config.hidden_size if "weight" in key: UpperCAmelCase = val[:dim, :] UpperCAmelCase = val[ dim : dim * 2, : ] UpperCAmelCase = val[-dim:, :] else: UpperCAmelCase = val[:dim] UpperCAmelCase = val[ dim : dim * 2 ] UpperCAmelCase = val[-dim:] else: UpperCAmelCase = rename_key(_snake_case ) if new_key_name in ["visual_projection.weight", "text_projection.weight"]: UpperCAmelCase = val.T UpperCAmelCase = val return orig_state_dict def _a ( _snake_case ): """simple docstring""" if num_frames == 8: UpperCAmelCase = """eating_spaghetti_8_frames.npy""" elif num_frames == 16: UpperCAmelCase = """eating_spaghetti.npy""" elif num_frames == 32: UpperCAmelCase = """eating_spaghetti_32_frames.npy""" UpperCAmelCase = hf_hub_download( repo_id="""hf-internal-testing/spaghetti-video""" , filename=_snake_case , repo_type="""dataset""" , ) UpperCAmelCase = np.load(_snake_case ) return list(_snake_case ) def _a ( _snake_case , _snake_case=None , _snake_case=False ): """simple docstring""" UpperCAmelCase = { # fully supervised kinetics-400 checkpoints """xclip-base-patch32""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth""", """xclip-base-patch32-16-frames""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth""" ), """xclip-base-patch16""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth""", """xclip-base-patch16-16-frames""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth""" ), """xclip-large-patch14""": """https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&amp;export=download&amp;confirm=t&amp;uuid=b26caedc-88e2-473e-830a-9d158b653cdb""", """xclip-large-patch14-16-frames""": """https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&amp;export=download&amp;confirm=t&amp;uuid=538fa810-e671-4050-b385-9a623f89804f""", # fully supervised kinetics-600 checkpoints """xclip-base-patch16-kinetics-600""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth""" ), """xclip-base-patch16-kinetics-600-16-frames""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth""" ), """xclip-large-patch14-kinetics-600""": """https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&amp;export=download&amp;confirm=t&amp;uuid=141d4977-4a65-44ae-864f-4b0c19f838be""", # few shot """xclip-base-patch16-hmdb-2-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth""" ), """xclip-base-patch16-hmdb-4-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth""" ), """xclip-base-patch16-hmdb-8-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth""" ), """xclip-base-patch16-hmdb-16-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth""" ), """xclip-base-patch16-ucf-2-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth""" ), """xclip-base-patch16-ucf-4-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth""" ), """xclip-base-patch16-ucf-8-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth""" ), """xclip-base-patch16-ucf-16-shot""": ( """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth""" ), # zero shot """xclip-base-patch16-zero-shot""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth""", } UpperCAmelCase = model_to_url[model_name] UpperCAmelCase = 8 if "16-frames" in model_name: UpperCAmelCase = 16 elif "shot" in model_name: UpperCAmelCase = 32 UpperCAmelCase = get_xclip_config(_snake_case , _snake_case ) UpperCAmelCase = XCLIPModel(_snake_case ) model.eval() if "drive" in checkpoint_url: UpperCAmelCase = """pytorch_model.bin""" gdown.cached_download(_snake_case , _snake_case , quiet=_snake_case ) UpperCAmelCase = torch.load(_snake_case , map_location="""cpu""" )["""model"""] else: UpperCAmelCase = torch.hub.load_state_dict_from_url(_snake_case )["""model"""] UpperCAmelCase = convert_state_dict(_snake_case , _snake_case ) UpperCAmelCase = XCLIPModel(_snake_case ) UpperCAmelCase , UpperCAmelCase = model.load_state_dict(_snake_case , strict=_snake_case ) assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] model.eval() UpperCAmelCase = 336 if model_name == """xclip-large-patch14-16-frames""" else 224 UpperCAmelCase = VideoMAEImageProcessor(size=_snake_case ) UpperCAmelCase = CLIPTokenizer.from_pretrained("""openai/clip-vit-base-patch32""" ) UpperCAmelCase = CLIPTokenizerFast.from_pretrained("""openai/clip-vit-base-patch32""" ) UpperCAmelCase = XCLIPProcessor(image_processor=_snake_case , tokenizer=_snake_case ) UpperCAmelCase = prepare_video(_snake_case ) UpperCAmelCase = processor( text=["""playing sports""", """eating spaghetti""", """go shopping"""] , videos=_snake_case , return_tensors="""pt""" , padding=_snake_case ) print("""Shape of pixel values:""" , inputs.pixel_values.shape ) with torch.no_grad(): UpperCAmelCase = model(**_snake_case ) # Verify outputs UpperCAmelCase = outputs.logits_per_video UpperCAmelCase = logits_per_video.softmax(dim=1 ) print("""Probs:""" , _snake_case ) # kinetics-400 if model_name == "xclip-base-patch32": UpperCAmelCase = torch.tensor([[0.0019, 0.9951, 0.0030]] ) elif model_name == "xclip-base-patch32-16-frames": UpperCAmelCase = torch.tensor([[7.0999E-04, 9.9883E-01, 4.5580E-04]] ) elif model_name == "xclip-base-patch16": UpperCAmelCase = torch.tensor([[0.0083, 0.9681, 0.0236]] ) elif model_name == "xclip-base-patch16-16-frames": UpperCAmelCase = torch.tensor([[7.6937E-04, 9.9728E-01, 1.9473E-03]] ) elif model_name == "xclip-large-patch14": UpperCAmelCase = torch.tensor([[0.0062, 0.9864, 0.0075]] ) elif model_name == "xclip-large-patch14-16-frames": UpperCAmelCase = torch.tensor([[3.3877E-04, 9.9937E-01, 2.8888E-04]] ) # kinetics-600 elif model_name == "xclip-base-patch16-kinetics-600": UpperCAmelCase = torch.tensor([[0.0555, 0.8914, 0.0531]] ) elif model_name == "xclip-base-patch16-kinetics-600-16-frames": UpperCAmelCase = torch.tensor([[3.8554E-04, 9.9929E-01, 3.2754E-04]] ) elif model_name == "xclip-large-patch14-kinetics-600": UpperCAmelCase = torch.tensor([[0.0036, 0.9920, 0.0045]] ) # few shot elif model_name == "xclip-base-patch16-hmdb-2-shot": UpperCAmelCase = torch.tensor([[7.1890E-06, 9.9994E-01, 5.6559E-05]] ) elif model_name == "xclip-base-patch16-hmdb-4-shot": UpperCAmelCase = torch.tensor([[1.0320E-05, 9.9993E-01, 6.2435E-05]] ) elif model_name == "xclip-base-patch16-hmdb-8-shot": UpperCAmelCase = torch.tensor([[4.1377E-06, 9.9990E-01, 9.8386E-05]] ) elif model_name == "xclip-base-patch16-hmdb-16-shot": UpperCAmelCase = torch.tensor([[4.1347E-05, 9.9962E-01, 3.3411E-04]] ) elif model_name == "xclip-base-patch16-ucf-2-shot": UpperCAmelCase = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] ) elif model_name == "xclip-base-patch16-ucf-4-shot": UpperCAmelCase = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] ) elif model_name == "xclip-base-patch16-ucf-8-shot": UpperCAmelCase = torch.tensor([[0.0027, 0.9904, 0.0070]] ) elif model_name == "xclip-base-patch16-ucf-16-shot": UpperCAmelCase = torch.tensor([[9.8219E-04, 9.9593E-01, 3.0863E-03]] ) # zero shot elif model_name == "xclip-base-patch16-zero-shot": UpperCAmelCase = torch.tensor([[3.5082E-04, 9.9785E-01, 1.7966E-03]] ) else: raise ValueError(F'''Model name {model_name} not supported''' ) assert torch.allclose(_snake_case , _snake_case , atol=1E-3 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(_snake_case ) if push_to_hub: print("""Pushing model, processor and slow tokenizer files to the hub...""" ) model.push_to_hub(_snake_case , organization="""nielsr""" ) processor.push_to_hub(_snake_case , organization="""nielsr""" ) slow_tokenizer.push_to_hub(_snake_case , organization="""nielsr""" ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""xclip-base-patch32""", type=str, help="""Name of the model.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) _UpperCamelCase = parser.parse_args() convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
705
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForMaskedImageModeling, HfArgumentParser, Trainer, TrainingArguments, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version _UpperCamelCase = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.31.0""") require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""") _UpperCamelCase = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys()) _UpperCamelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class lowerCamelCase__ : SCREAMING_SNAKE_CASE = field( default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , ) SCREAMING_SNAKE_CASE = field(default=snake_case , metadata={'''help''': '''A folder containing the training data.'''} ) SCREAMING_SNAKE_CASE = field(default=snake_case , metadata={'''help''': '''A folder containing the validation data.'''} ) SCREAMING_SNAKE_CASE = field( default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} ) SCREAMING_SNAKE_CASE = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} ) SCREAMING_SNAKE_CASE = field( default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def _UpperCamelCase ( self ): UpperCAmelCase = {} if self.train_dir is not None: UpperCAmelCase = self.train_dir if self.validation_dir is not None: UpperCAmelCase = self.validation_dir UpperCAmelCase = data_files if data_files else None @dataclass class lowerCamelCase__ : SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={ '''help''': ( '''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a ''' '''checkpoint identifier on the hub. ''' '''Don\'t set if you want to train a model from scratch.''' ) } , ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(snake_case )} , ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } , ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , ) SCREAMING_SNAKE_CASE = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) SCREAMING_SNAKE_CASE = field(default=snake_case , metadata={'''help''': '''Name or path of preprocessor config.'''} ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={ '''help''': ( '''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.''' ) } , ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={ '''help''': ( '''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.''' ) } , ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''Stride to use for the encoder.'''} , ) class lowerCamelCase__ : def __init__( self ,A=192 ,A=32 ,A=4 ,A=0.6 ): UpperCAmelCase = input_size UpperCAmelCase = mask_patch_size UpperCAmelCase = model_patch_size UpperCAmelCase = mask_ratio if self.input_size % self.mask_patch_size != 0: raise ValueError("""Input size must be divisible by mask patch size""" ) if self.mask_patch_size % self.model_patch_size != 0: raise ValueError("""Mask patch size must be divisible by model patch size""" ) UpperCAmelCase = self.input_size // self.mask_patch_size UpperCAmelCase = self.mask_patch_size // self.model_patch_size UpperCAmelCase = self.rand_size**2 UpperCAmelCase = int(np.ceil(self.token_count * self.mask_ratio ) ) def __call__( self ): UpperCAmelCase = np.random.permutation(self.token_count )[: self.mask_count] UpperCAmelCase = np.zeros(self.token_count ,dtype=A ) UpperCAmelCase = 1 UpperCAmelCase = mask.reshape((self.rand_size, self.rand_size) ) UpperCAmelCase = mask.repeat(self.scale ,axis=0 ).repeat(self.scale ,axis=1 ) return torch.tensor(mask.flatten() ) def _a ( _snake_case ): """simple docstring""" UpperCAmelCase = torch.stack([example["""pixel_values"""] for example in examples] ) UpperCAmelCase = torch.stack([example["""mask"""] for example in examples] ) return {"pixel_values": pixel_values, "bool_masked_pos": mask} def _a ( ): """simple docstring""" UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_mim""" , _snake_case , _snake_case ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() UpperCAmelCase = training_args.get_process_log_level() logger.setLevel(_snake_case ) transformers.utils.logging.set_verbosity(_snake_case ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. UpperCAmelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: UpperCAmelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Initialize our dataset. UpperCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. UpperCAmelCase = None if """validation""" in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , _snake_case ) and data_args.train_val_split > 0.0: UpperCAmelCase = ds["""train"""].train_test_split(data_args.train_val_split ) UpperCAmelCase = split["""train"""] UpperCAmelCase = split["""test"""] # Create config # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. UpperCAmelCase = { """cache_dir""": model_args.cache_dir, """revision""": model_args.model_revision, """use_auth_token""": True if model_args.use_auth_token else None, } if model_args.config_name_or_path: UpperCAmelCase = AutoConfig.from_pretrained(model_args.config_name_or_path , **_snake_case ) elif model_args.model_name_or_path: UpperCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , **_snake_case ) else: UpperCAmelCase = CONFIG_MAPPING[model_args.model_type]() logger.warning("""You are instantiating a new config instance from scratch.""" ) if model_args.config_overrides is not None: logger.info(F'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(F'''New config: {config}''' ) # make sure the decoder_type is "simmim" (only relevant for BEiT) if hasattr(_snake_case , """decoder_type""" ): UpperCAmelCase = """simmim""" # adapt config UpperCAmelCase = model_args.image_size if model_args.image_size is not None else config.image_size UpperCAmelCase = model_args.patch_size if model_args.patch_size is not None else config.patch_size UpperCAmelCase = ( model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride ) config.update( { """image_size""": model_args.image_size, """patch_size""": model_args.patch_size, """encoder_stride""": model_args.encoder_stride, } ) # create image processor if model_args.image_processor_name: UpperCAmelCase = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **_snake_case ) elif model_args.model_name_or_path: UpperCAmelCase = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **_snake_case ) else: UpperCAmelCase = { conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items() } UpperCAmelCase = IMAGE_PROCESSOR_TYPES[model_args.model_type]() # create model if model_args.model_name_or_path: UpperCAmelCase = AutoModelForMaskedImageModeling.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("""Training new model from scratch""" ) UpperCAmelCase = AutoModelForMaskedImageModeling.from_config(_snake_case ) if training_args.do_train: UpperCAmelCase = ds["""train"""].column_names else: UpperCAmelCase = ds["""validation"""].column_names if data_args.image_column_name is not None: UpperCAmelCase = data_args.image_column_name elif "image" in column_names: UpperCAmelCase = """image""" elif "img" in column_names: UpperCAmelCase = """img""" else: UpperCAmelCase = column_names[0] # transformations as done in original SimMIM paper # source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py UpperCAmelCase = Compose( [ Lambda(lambda _snake_case : img.convert("""RGB""" ) if img.mode != "RGB" else img ), RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) # create mask generator UpperCAmelCase = MaskGenerator( input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , ) def preprocess_images(_snake_case ): UpperCAmelCase = [transforms(_snake_case ) for image in examples[image_column_name]] UpperCAmelCase = [mask_generator() for i in range(len(examples[image_column_name] ) )] return examples if training_args.do_train: if "train" not in ds: raise ValueError("""--do_train requires a train dataset""" ) if data_args.max_train_samples is not None: UpperCAmelCase = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(_snake_case ) if training_args.do_eval: if "validation" not in ds: raise ValueError("""--do_eval requires a validation dataset""" ) if data_args.max_eval_samples is not None: UpperCAmelCase = ( ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(_snake_case ) # Initialize our trainer UpperCAmelCase = Trainer( model=_snake_case , args=_snake_case , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=_snake_case , data_collator=_snake_case , ) # Training if training_args.do_train: UpperCAmelCase = None if training_args.resume_from_checkpoint is not None: UpperCAmelCase = training_args.resume_from_checkpoint elif last_checkpoint is not None: UpperCAmelCase = last_checkpoint UpperCAmelCase = trainer.train(resume_from_checkpoint=_snake_case ) trainer.save_model() trainer.log_metrics("""train""" , train_result.metrics ) trainer.save_metrics("""train""" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: UpperCAmelCase = trainer.evaluate() trainer.log_metrics("""eval""" , _snake_case ) trainer.save_metrics("""eval""" , _snake_case ) # Write model card and (optionally) push to hub UpperCAmelCase = { """finetuned_from""": model_args.model_name_or_path, """tasks""": """masked-image-modeling""", """dataset""": data_args.dataset_name, """tags""": ["""masked-image-modeling"""], } if training_args.push_to_hub: trainer.push_to_hub(**_snake_case ) else: trainer.create_model_card(**_snake_case ) if __name__ == "__main__": main()
74
0
"""simple docstring""" import warnings from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging _UpperCamelCase = logging.get_logger(__name__) class lowerCamelCase__ ( snake_case ): '''simple docstring''' SCREAMING_SNAKE_CASE = ['''input_values''', '''attention_mask'''] def __init__( self ,A = 1 ,A = 16_000 ,A = 0.0 ,A = False ,A = 80 ,A = 16 ,A = 64 ,A = "hann_window" ,A = 1.0 ,A = 80 ,A = 7_600 ,A = 1e-1_0 ,A = 2 ,A = True ,**A ,): super().__init__(feature_size=A ,sampling_rate=A ,padding_value=A ,**A ) UpperCAmelCase = do_normalize UpperCAmelCase = return_attention_mask UpperCAmelCase = num_mel_bins UpperCAmelCase = hop_length UpperCAmelCase = win_length UpperCAmelCase = win_function UpperCAmelCase = frame_signal_scale UpperCAmelCase = fmin UpperCAmelCase = fmax UpperCAmelCase = mel_floor UpperCAmelCase = reduction_factor UpperCAmelCase = win_length * sampling_rate // 1_000 UpperCAmelCase = hop_length * sampling_rate // 1_000 UpperCAmelCase = optimal_fft_length(self.sample_size ) UpperCAmelCase = (self.n_fft // 2) + 1 UpperCAmelCase = window_function(window_length=self.sample_size ,name=self.win_function ,periodic=A ) UpperCAmelCase = mel_filter_bank( num_frequency_bins=self.n_freqs ,num_mel_filters=self.num_mel_bins ,min_frequency=self.fmin ,max_frequency=self.fmax ,sampling_rate=self.sampling_rate ,norm="""slaney""" ,mel_scale="""slaney""" ,) if frame_signal_scale != 1.0: warnings.warn( """The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers""" ,A ,) if reduction_factor != 2.0: warnings.warn( """The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers""" ,A ,) @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def _UpperCamelCase ( A ,A ,A = 0.0 ): if attention_mask is not None: UpperCAmelCase = np.array(A ,np.intaa ) UpperCAmelCase = [] for vector, length in zip(A ,attention_mask.sum(-1 ) ): UpperCAmelCase = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 ) if length < normed_slice.shape[0]: UpperCAmelCase = padding_value normed_input_values.append(A ) else: UpperCAmelCase = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values] return normed_input_values def _UpperCamelCase ( self ,A ,): UpperCAmelCase = spectrogram( A ,window=self.window ,frame_length=self.sample_size ,hop_length=self.sample_stride ,fft_length=self.n_fft ,mel_filters=self.mel_filters ,mel_floor=self.mel_floor ,log_mel="""log10""" ,) return log_mel_spec.T def __call__( self ,A = None ,A = None ,A = False ,A = None ,A = False ,A = None ,A = None ,A = None ,A = None ,**A ,): if audio is None and audio_target is None: raise ValueError("""You must provide either `audio` or `audio_target` values.""" ) if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with''' F''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( """It is strongly recommended to pass the ``sampling_rate`` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) if audio is not None: UpperCAmelCase = self._process_audio( A ,A ,A ,A ,A ,A ,A ,A ,**A ,) else: UpperCAmelCase = None if audio_target is not None: UpperCAmelCase = self._process_audio( A ,A ,A ,A ,A ,A ,A ,A ,**A ,) if inputs is None: return inputs_target else: UpperCAmelCase = inputs_target["""input_values"""] UpperCAmelCase = inputs_target.get("""attention_mask""" ) if decoder_attention_mask is not None: UpperCAmelCase = decoder_attention_mask return inputs def _UpperCamelCase ( self ,A ,A = False ,A = False ,A = None ,A = False ,A = None ,A = None ,A = None ,**A ,): UpperCAmelCase = isinstance(A ,np.ndarray ) and len(speech.shape ) > 1 if is_batched_numpy and len(speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) UpperCAmelCase = is_batched_numpy or ( isinstance(A ,(list, tuple) ) and (isinstance(speech[0] ,(np.ndarray, tuple, list) )) ) if is_batched: UpperCAmelCase = [np.asarray(A ,dtype=np.floataa ) for speech in speech] elif not is_batched and not isinstance(A ,np.ndarray ): UpperCAmelCase = np.asarray(A ,dtype=np.floataa ) elif isinstance(A ,np.ndarray ) and speech.dtype is np.dtype(np.floataa ): UpperCAmelCase = speech.astype(np.floataa ) # always return batch if not is_batched: UpperCAmelCase = [speech] # needed to make pad() work on spectrogram inputs UpperCAmelCase = self.feature_size # convert into correct format for padding if is_target: UpperCAmelCase = [self._extract_mel_features(A ) for waveform in speech] UpperCAmelCase = BatchFeature({"""input_values""": features} ) UpperCAmelCase = self.num_mel_bins else: UpperCAmelCase = BatchFeature({"""input_values""": speech} ) UpperCAmelCase = self.pad( A ,padding=A ,max_length=A ,truncation=A ,pad_to_multiple_of=A ,return_attention_mask=A ,**A ,) UpperCAmelCase = feature_size_hack # convert input values to correct format UpperCAmelCase = padded_inputs["""input_values"""] if not isinstance(input_values[0] ,np.ndarray ): UpperCAmelCase = [np.asarray(A ,dtype=np.floataa ) for array in input_values] elif ( not isinstance(A ,np.ndarray ) and isinstance(input_values[0] ,np.ndarray ) and input_values[0].dtype is np.dtype(np.floataa ) ): UpperCAmelCase = [array.astype(np.floataa ) for array in input_values] elif isinstance(A ,np.ndarray ) and input_values.dtype is np.dtype(np.floataa ): UpperCAmelCase = input_values.astype(np.floataa ) # convert attention_mask to correct format UpperCAmelCase = padded_inputs.get("""attention_mask""" ) if attention_mask is not None: UpperCAmelCase = [np.asarray(A ,dtype=np.intaa ) for array in attention_mask] # zero-mean and unit-variance normalization if not is_target and self.do_normalize: UpperCAmelCase = ( attention_mask if self._get_padding_strategies(A ,max_length=A ) is not PaddingStrategy.DO_NOT_PAD else None ) UpperCAmelCase = self.zero_mean_unit_var_norm( padded_inputs["""input_values"""] ,attention_mask=A ,padding_value=self.padding_value ) if return_tensors is not None: UpperCAmelCase = padded_inputs.convert_to_tensors(A ) return padded_inputs def _UpperCamelCase ( self ): UpperCAmelCase = super().to_dict() # Don't serialize these as they are derived from the other properties. UpperCAmelCase = ["""window""", """mel_filters""", """sample_size""", """sample_stride""", """n_fft""", """n_freqs"""] for name in names: if name in output: del output[name] return output
706
"""simple docstring""" import logging import sys from dataclasses import dataclass, field from typing import Any, Dict, List, Optional, Union import librosa import torch from datasets import DatasetDict, load_dataset from packaging import version from torch import nn from transformers import ( HfArgumentParser, Trainer, TrainingArguments, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForPreTraining, is_apex_available, trainer_utils, ) from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices if is_apex_available(): from apex import amp if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""): _UpperCamelCase = True from torch.cuda.amp import autocast _UpperCamelCase = logging.getLogger(__name__) @dataclass class lowerCamelCase__ : SCREAMING_SNAKE_CASE = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''Whether to log verbose messages or not.'''} , ) SCREAMING_SNAKE_CASE = field( default=2.0 , metadata={'''help''': '''Maximum temperature for gumbel softmax.'''} ) SCREAMING_SNAKE_CASE = field( default=0.5 , metadata={'''help''': '''Minimum temperature for gumbel softmax.'''} ) SCREAMING_SNAKE_CASE = field( default=0.99_99_95 , metadata={'''help''': '''Decay of gumbel temperature during training.'''} ) def _a ( _snake_case , _snake_case ): """simple docstring""" logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) UpperCAmelCase = logging.WARNING if model_args.verbose_logging: UpperCAmelCase = logging.DEBUG elif trainer_utils.is_main_process(training_args.local_rank ): UpperCAmelCase = logging.INFO logger.setLevel(_snake_case ) @dataclass class lowerCamelCase__ : SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) SCREAMING_SNAKE_CASE = field( default='''train''' , metadata={ '''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\'''' } , ) SCREAMING_SNAKE_CASE = field( default='''validation''' , metadata={ '''help''': ( '''The name of the validation data set split to use (via the datasets library). Defaults to \'validation\'''' ) } , ) SCREAMING_SNAKE_CASE = field( default='''file''' , metadata={'''help''': '''Column in the dataset that contains speech file path. Defaults to \'file\''''} , ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} ) SCREAMING_SNAKE_CASE = field( default=1 , metadata={ '''help''': '''The percentage of the train set used as validation set in case there\'s no validation split''' } , ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , ) SCREAMING_SNAKE_CASE = field( default=20.0 , metadata={'''help''': '''Filter audio files that are longer than `max_duration_in_seconds` seconds'''} ) @dataclass class lowerCamelCase__ : SCREAMING_SNAKE_CASE = 42 SCREAMING_SNAKE_CASE = 42 SCREAMING_SNAKE_CASE = "longest" SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None def __call__( self ,A ): # reformat list to dict and set to pytorch format UpperCAmelCase = self.feature_extractor.pad( A ,max_length=self.max_length ,padding=self.padding ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors="""pt""" ,) UpperCAmelCase = self.model._get_feat_extract_output_lengths(batch["""input_values"""].shape[-1] ) UpperCAmelCase = batch["""input_values"""].shape[0] # make sure that no loss is computed on padded inputs if batch["attention_mask"] is not None: # compute real output lengths according to convolution formula UpperCAmelCase = self.model._get_feat_extract_output_lengths(batch["""attention_mask"""].sum(-1 ) ).to( torch.long ) UpperCAmelCase = torch.zeros( (batch_size, mask_indices_seq_length) ,dtype=torch.long ,device=batch["""input_values"""].device ) # these two operations makes sure that all values # before the output lengths indices are attended to UpperCAmelCase = 1 UpperCAmelCase = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool() # sample randomly masked indices UpperCAmelCase = _compute_mask_indices( (batch_size, mask_indices_seq_length) ,self.model.config.mask_time_prob ,self.model.config.mask_time_length ,attention_mask=A ,min_masks=2 ,) return batch class lowerCamelCase__ ( snake_case ): def __init__( self ,*A ,A=1 ,A=0 ,A=1.0 ,**A ): super().__init__(*A ,**A ) UpperCAmelCase = 0 UpperCAmelCase = max_gumbel_temp UpperCAmelCase = min_gumbel_temp UpperCAmelCase = gumbel_temp_decay def _UpperCamelCase ( self ,A ,A ): model.train() UpperCAmelCase = self._prepare_inputs(A ) if self.use_amp: with autocast(): UpperCAmelCase = self.compute_loss(A ,A ) else: UpperCAmelCase = self.compute_loss(A ,A ) if self.args.n_gpu > 1 or self.deepspeed: if model.module.config.ctc_loss_reduction == "mean": UpperCAmelCase = loss.mean() elif model.module.config.ctc_loss_reduction == "sum": UpperCAmelCase = loss.sum() / (inputs["""mask_time_indices"""]).sum() else: raise ValueError(F'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' ) if self.args.gradient_accumulation_steps > 1: UpperCAmelCase = loss / self.args.gradient_accumulation_steps if self.use_amp: self.scaler.scale(A ).backward() elif self.use_apex: with amp.scale_loss(A ,self.optimizer ) as scaled_loss: scaled_loss.backward() elif self.deepspeed: self.deepspeed.backward(A ) else: loss.backward() self.num_update_step += 1 # make sure gumbel softmax temperature is decayed if self.args.n_gpu > 1 or self.deepspeed: model.module.set_gumbel_temperature( max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step ,self.min_gumbel_temp ) ) else: model.set_gumbel_temperature( max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step ,self.min_gumbel_temp ) ) return loss.detach() def _a ( ): """simple docstring""" UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses() configure_logger(_snake_case , _snake_case ) # Downloading and loading a dataset from the hub. UpperCAmelCase = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir ) if "validation" not in datasets.keys(): # make sure only "validation" and "train" keys remain" UpperCAmelCase = DatasetDict() UpperCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[:{data_args.validation_split_percentage}%]''' , cache_dir=model_args.cache_dir , ) UpperCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[{data_args.validation_split_percentage}%:]''' , cache_dir=model_args.cache_dir , ) else: # make sure only "validation" and "train" keys remain" UpperCAmelCase = DatasetDict() UpperCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split="""validation""" , cache_dir=model_args.cache_dir , ) UpperCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}''' , cache_dir=model_args.cache_dir , ) # only normalized-inputs-training is supported UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained( model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_snake_case ) def prepare_dataset(_snake_case ): # check that all files have the correct sampling rate UpperCAmelCase , UpperCAmelCase = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate ) return batch # load audio files into numpy arrays UpperCAmelCase = datasets.map( _snake_case , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["""train"""].column_names ) # filter audio files that are too long UpperCAmelCase = vectorized_datasets.filter( lambda _snake_case : len(data["""speech"""] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) ) def normalize(_snake_case ): return feature_extractor(batch["""speech"""] , sampling_rate=feature_extractor.sampling_rate ) # normalize and transform to `BatchFeatures` UpperCAmelCase = vectorized_datasets.map( _snake_case , batched=_snake_case , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["""train"""].column_names , ) # pretraining is only supported for "newer" stable layer norm architecture # apply_spec_augment has to be True, mask_feature_prob has to be 0.0 UpperCAmelCase = WavaVecaConfig.from_pretrained( model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , ) if not config.do_stable_layer_norm or config.feat_extract_norm != "layer": raise ValueError( """PreTraining is only supported for ``config.do_stable_layer_norm=True`` and""" """ ``config.feat_extract_norm='layer'""" ) UpperCAmelCase = WavaVecaForPreTraining(_snake_case ) UpperCAmelCase = DataCollatorForWavaVecaPretraining(model=_snake_case , feature_extractor=_snake_case ) UpperCAmelCase = WavaVecaPreTrainer( model=_snake_case , data_collator=_snake_case , args=_snake_case , train_dataset=vectorized_datasets["""train"""] , eval_dataset=vectorized_datasets["""validation"""] , tokenizer=_snake_case , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , ) trainer.train() if __name__ == "__main__": main()
74
0
"""simple docstring""" import unittest from pathlib import Path from shutil import copyfile from transformers import SPIECE_UNDERLINE, is_sentencepiece_available from transformers.models.speech_to_text import SpeechaTextTokenizer from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin _UpperCamelCase : Optional[int] = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_sentencepiece_available(): import sentencepiece as sp _UpperCamelCase : int = 5 _UpperCamelCase : List[Any] = 10 @require_sentencepiece @require_tokenizers class lowerCamelCase__ ( snake_case , unittest.TestCase ): SCREAMING_SNAKE_CASE = SpeechaTextTokenizer SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = True def _UpperCamelCase ( self ): super().setUp() UpperCAmelCase = sp.SentencePieceProcessor() spm_model.Load(A ) UpperCAmelCase = ["""<s>""", """<pad>""", """</s>""", """<unk>"""] vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(A ) )] UpperCAmelCase = dict(zip(A ,range(len(A ) ) ) ) UpperCAmelCase = Path(self.tmpdirname ) save_json(A ,save_dir / VOCAB_FILES_NAMES["""vocab_file"""] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(A ,save_dir / VOCAB_FILES_NAMES["""spm_file"""] ) UpperCAmelCase = SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def _UpperCamelCase ( self ): UpperCAmelCase = """<pad>""" UpperCAmelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) ,A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) ,A ) def _UpperCamelCase ( self ): UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,"""<s>""" ) self.assertEqual(vocab_keys[1] ,"""<pad>""" ) self.assertEqual(vocab_keys[-1] ,"""j""" ) self.assertEqual(len(A ) ,1_001 ) def _UpperCamelCase ( self ): self.assertEqual(self.get_tokenizer().vocab_size ,1_001 ) def _UpperCamelCase ( self ): UpperCAmelCase = SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) UpperCAmelCase = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(A ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(A ) ,[289, 50, 14, 174, 386] ,) UpperCAmelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( A ,[SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""] ,) UpperCAmelCase = tokenizer.convert_tokens_to_ids(A ) self.assertListEqual(A ,[12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] ) UpperCAmelCase = tokenizer.convert_ids_to_tokens(A ) self.assertListEqual( A ,[SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""] ,) @slow def _UpperCamelCase ( self ): # fmt: off UpperCAmelCase = {"""input_ids""": [[3_791, 797, 31, 11, 64, 797, 31, 2_429, 433, 12, 1_176, 12, 20, 786, 915, 142, 2_413, 240, 37, 3_238, 797, 31, 11, 35, 93, 915, 142, 2_413, 240, 37, 5_540, 567, 1_276, 93, 37, 610, 40, 62, 455, 657, 1_042, 123, 780, 177, 37, 309, 241, 1_298, 514, 20, 292, 2_737, 114, 2_469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3_388, 511, 459, 4, 3_555, 40, 321, 302, 705, 4, 3_388, 511, 583, 326, 5, 5, 5, 62, 3_310, 560, 177, 2_680, 217, 1_508, 32, 31, 853, 418, 64, 583, 511, 1_605, 62, 35, 93, 560, 177, 2_680, 217, 1_508, 1_521, 64, 583, 511, 519, 62, 20, 1_515, 764, 20, 149, 261, 5_625, 7_972, 20, 5_540, 567, 1_276, 93, 3_925, 1_675, 11, 15, 802, 7_972, 576, 217, 1_508, 11, 35, 93, 1_253, 2_441, 15, 289, 652, 31, 416, 321, 3_842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2_681, 1_153, 3_434, 20, 5_540, 37, 567, 126, 1_253, 2_441, 3_376, 449, 210, 431, 1_563, 177, 767, 5_540, 11, 1_203, 472, 11, 2_953, 685, 285, 364, 706, 1_153, 20, 6_799, 20, 2_869, 20, 4_464, 126, 40, 2_429, 20, 1_040, 866, 2_664, 418, 20, 318, 20, 1_726, 186, 20, 265, 522, 35, 93, 2_191, 4_634, 20, 1_040, 12, 6_799, 15, 228, 2_356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_575, 2_666, 684, 1_582, 1_176, 12, 627, 149, 619, 20, 4_902, 563, 11, 20, 149, 261, 3_420, 2_356, 174, 142, 4_714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=A ,model_name="""facebook/s2t-small-mustc-en-de-st""" ,revision="""a14f04cf0776c02f62a8cb800cf7909e15ea23ad""" ,) @require_sentencepiece class lowerCamelCase__ ( unittest.TestCase ): SCREAMING_SNAKE_CASE = '''valhalla/s2t_mustc_multilinguial_medium''' SCREAMING_SNAKE_CASE = '''C\'est trop cool''' SCREAMING_SNAKE_CASE = '''Esto es genial''' @classmethod def _UpperCamelCase ( cls ): UpperCAmelCase = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name ) return cls def _UpperCamelCase ( self ): self.assertEqual(self.tokenizer.lang_code_to_id["""pt"""] ,4 ) self.assertEqual(self.tokenizer.lang_code_to_id["""ru"""] ,6 ) self.assertEqual(self.tokenizer.lang_code_to_id["""it"""] ,9 ) self.assertEqual(self.tokenizer.lang_code_to_id["""de"""] ,11 ) def _UpperCamelCase ( self ): self.assertEqual(self.tokenizer.vocab_size ,10_000 ) def _UpperCamelCase ( self ): self.assertIn(A ,self.tokenizer.all_special_ids ) UpperCAmelCase = [ES_CODE, 4, 1_601, 47, 7_647, 2] UpperCAmelCase = self.tokenizer.decode(A ,skip_special_tokens=A ) UpperCAmelCase = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=A ) self.assertEqual(A ,A ) self.assertNotIn(self.tokenizer.eos_token ,A ) def _UpperCamelCase ( self ): UpperCAmelCase = """fr""" UpperCAmelCase = self.tokenizer(self.french_text ).input_ids self.assertEqual(encoded[0] ,A ) self.assertEqual(encoded[-1] ,self.tokenizer.eos_token_id ) def _UpperCamelCase ( self ): UpperCAmelCase = """fr""" self.assertListEqual(self.tokenizer.prefix_tokens ,[FR_CODE] ) UpperCAmelCase = """es""" self.assertListEqual(self.tokenizer.prefix_tokens ,[ES_CODE] )
707
"""simple docstring""" from __future__ import annotations import random import unittest from transformers import TransfoXLConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLModel, ) class lowerCamelCase__ : def __init__( self ,A ,): UpperCAmelCase = parent UpperCAmelCase = 13 UpperCAmelCase = 7 UpperCAmelCase = 30 UpperCAmelCase = self.seq_length + self.mem_len UpperCAmelCase = 15 UpperCAmelCase = True UpperCAmelCase = True UpperCAmelCase = 99 UpperCAmelCase = [10, 50, 80] UpperCAmelCase = 32 UpperCAmelCase = 32 UpperCAmelCase = 4 UpperCAmelCase = 8 UpperCAmelCase = 128 UpperCAmelCase = 2 UpperCAmelCase = 2 UpperCAmelCase = None UpperCAmelCase = 1 UpperCAmelCase = 0 UpperCAmelCase = 3 UpperCAmelCase = self.vocab_size - 1 UpperCAmelCase = 0.01 def _UpperCamelCase ( self ): UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) UpperCAmelCase = None if self.use_labels: UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) UpperCAmelCase = TransfoXLConfig( vocab_size=self.vocab_size ,mem_len=self.mem_len ,clamp_len=self.clamp_len ,cutoffs=self.cutoffs ,d_model=self.hidden_size ,d_embed=self.d_embed ,n_head=self.num_attention_heads ,d_head=self.d_head ,d_inner=self.d_inner ,div_val=self.div_val ,n_layer=self.num_hidden_layers ,eos_token_id=self.eos_token_id ,pad_token_id=self.vocab_size - 1 ,init_range=self.init_range ,num_labels=self.num_labels ,) return (config, input_ids_a, input_ids_a, lm_labels) def _UpperCamelCase ( self ): random.seed(self.seed ) tf.random.set_seed(self.seed ) def _UpperCamelCase ( self ,A ,A ,A ,A ): UpperCAmelCase = TFTransfoXLModel(A ) UpperCAmelCase , UpperCAmelCase = model(A ).to_tuple() UpperCAmelCase = {"""input_ids""": input_ids_a, """mems""": mems_a} UpperCAmelCase , UpperCAmelCase = model(A ).to_tuple() self.parent.assertEqual(hidden_states_a.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(hidden_states_a.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,) self.parent.assertListEqual( [mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,) def _UpperCamelCase ( self ,A ,A ,A ,A ): UpperCAmelCase = TFTransfoXLLMHeadModel(A ) UpperCAmelCase , UpperCAmelCase = model(A ).to_tuple() UpperCAmelCase = {"""input_ids""": input_ids_a, """labels""": lm_labels} UpperCAmelCase , UpperCAmelCase = model(A ).to_tuple() UpperCAmelCase , UpperCAmelCase = model([input_ids_a, mems_a] ).to_tuple() UpperCAmelCase = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels} UpperCAmelCase , UpperCAmelCase = model(A ).to_tuple() self.parent.assertEqual(lm_logits_a.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,) self.parent.assertEqual(lm_logits_a.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,) def _UpperCamelCase ( self ,A ,A ,A ,A ): UpperCAmelCase = TFTransfoXLForSequenceClassification(A ) UpperCAmelCase = model(A ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def _UpperCamelCase ( self ): UpperCAmelCase = self.prepare_config_and_inputs() ((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) = config_and_inputs UpperCAmelCase = {"""input_ids""": input_ids_a} return config, inputs_dict @require_tf class lowerCamelCase__ ( snake_case , snake_case , unittest.TestCase ): SCREAMING_SNAKE_CASE = ( (TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else () ) SCREAMING_SNAKE_CASE = () if is_tf_available() else () SCREAMING_SNAKE_CASE = ( { '''feature-extraction''': TFTransfoXLModel, '''text-classification''': TFTransfoXLForSequenceClassification, '''text-generation''': TFTransfoXLLMHeadModel, '''zero-shot''': TFTransfoXLForSequenceClassification, } if is_tf_available() else {} ) # TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False def _UpperCamelCase ( self ,A ,A ,A ,A ,A ): if pipeline_test_casse_name == "TextGenerationPipelineTests": # Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`. # `TransfoXLConfig` was never used in pipeline tests: cannot create a simple # tokenizer. return True return False def _UpperCamelCase ( self ): UpperCAmelCase = TFTransfoXLModelTester(self ) UpperCAmelCase = ConfigTester(self ,config_class=A ,d_embed=37 ) def _UpperCamelCase ( self ): self.config_tester.run_common_tests() def _UpperCamelCase ( self ): self.model_tester.set_seed() UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_model(*A ) def _UpperCamelCase ( self ): self.model_tester.set_seed() UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_lm_head(*A ) def _UpperCamelCase ( self ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*A ) def _UpperCamelCase ( self ): UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase = [TFTransfoXLForSequenceClassification] for model_class in self.all_model_classes: UpperCAmelCase = model_class(A ) assert isinstance(model.get_input_embeddings() ,tf.keras.layers.Layer ) if model_class in list_other_models_with_output_ebd: UpperCAmelCase = model.get_output_embeddings() assert isinstance(A ,tf.keras.layers.Layer ) UpperCAmelCase = model.get_bias() assert name is None else: UpperCAmelCase = model.get_output_embeddings() assert x is None UpperCAmelCase = model.get_bias() assert name is None def _UpperCamelCase ( self ): # TODO JP: Make TransfoXL XLA compliant pass @slow def _UpperCamelCase ( self ): for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase = TFTransfoXLModel.from_pretrained(A ) self.assertIsNotNone(A ) @unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""" ) def _UpperCamelCase ( self ): pass @require_tf class lowerCamelCase__ ( unittest.TestCase ): @unittest.skip("""Skip test until #12651 is resolved.""" ) @slow def _UpperCamelCase ( self ): UpperCAmelCase = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" ) # fmt: off UpperCAmelCase = tf.convert_to_tensor([[33,1_297,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,22,1_706,17,20_098,5,3_215,21,37,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,6_224,831,16_002,2,8,603,78_967,29_546,23,803,20,25,416,5,8,232,4,277,6,1_855,4_601,3,29_546,54,8,3_609,5,57_211,49,4,1,277,18,8,1_755,15_691,3,341,25,416,693,42_573,71,17,401,94,31,17_919,2,29_546,7_873,18,1,435,23,11_011,755,5,5_167,3,7_983,98,84,2,29_546,3_267,8,3_609,4,1,4_865,1_075,2,6_087,71,6,346,8,5_854,3,29_546,824,1_400,1_868,2,19,160,2,311,8,5_496,2,20_920,17,25,15_097,3,24,24,0]] ,dtype=tf.intaa ) # noqa: E231 # fmt: on # In 1991 , the remains of Russian Tsar Nicholas II and his family # ( except for Alexei and Maria ) are discovered . # The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the # remainder of the story . 1883 Western Siberia , # a young Grigori Rasputin is asked by his father and a group of men to perform magic . # Rasputin has a vision and denounces one of the men as a horse thief . Although his # father initially slaps him for making such an accusation , Rasputin watches as the # man is chased outside and beaten . Twenty years later , Rasputin sees a vision of # the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous , # with people , even a bishop , begging for his blessing . <eod> </s> <eos> # fmt: off UpperCAmelCase = [33,1_297,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,22,1_706,17,20_098,5,3_215,21,37,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,6_224,831,16_002,2,8,603,78_967,29_546,23,803,20,25,416,5,8,232,4,277,6,1_855,4_601,3,29_546,54,8,3_609,5,57_211,49,4,1,277,18,8,1_755,15_691,3,341,25,416,693,42_573,71,17,401,94,31,17_919,2,29_546,7_873,18,1,435,23,11_011,755,5,5_167,3,7_983,98,84,2,29_546,3_267,8,3_609,4,1,4_865,1_075,2,6_087,71,6,346,8,5_854,3,29_546,824,1_400,1_868,2,19,160,2,311,8,5_496,2,20_920,17,25,15_097,3,24,24,0,33,1,1_857,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,28,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,0] # noqa: E231 # fmt: on # In 1991, the remains of Russian Tsar Nicholas II and his family ( # except for Alexei and Maria ) are discovered. The voice of young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story. # 1883 Western Siberia, a young Grigori Rasputin is asked by his father # and a group of men to perform magic. Rasputin has a vision and # denounces one of the men as a horse thief. Although his father initially # slaps him for making such an accusation, Rasputin watches as the man # is chased outside and beaten. Twenty years later, Rasputin sees a vision # of the Virgin Mary, prompting him to become a priest. # Rasputin quickly becomes famous, with people, even a bishop, begging for # his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar # Nicholas II and his family were discovered. The voice of <unk> young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos> UpperCAmelCase = model.generate(A ,max_length=200 ,do_sample=A ) self.assertListEqual(output_ids[0].numpy().tolist() ,A )
74
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = { """uw-madison/mra-base-512-4""": """https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json""", } class lowerCamelCase__ ( snake_case ): SCREAMING_SNAKE_CASE = '''mra''' def __init__( self ,A=50_265 ,A=768 ,A=12 ,A=12 ,A=3_072 ,A="gelu" ,A=0.1 ,A=0.1 ,A=512 ,A=1 ,A=0.02 ,A=1e-5 ,A="absolute" ,A=4 ,A="full" ,A=0 ,A=0 ,A=1 ,A=0 ,A=2 ,**A ,): super().__init__(pad_token_id=A ,bos_token_id=A ,eos_token_id=A ,**A ) UpperCAmelCase = vocab_size UpperCAmelCase = max_position_embeddings UpperCAmelCase = hidden_size UpperCAmelCase = num_hidden_layers UpperCAmelCase = num_attention_heads UpperCAmelCase = intermediate_size UpperCAmelCase = hidden_act UpperCAmelCase = hidden_dropout_prob UpperCAmelCase = attention_probs_dropout_prob UpperCAmelCase = initializer_range UpperCAmelCase = type_vocab_size UpperCAmelCase = layer_norm_eps UpperCAmelCase = position_embedding_type UpperCAmelCase = block_per_row UpperCAmelCase = approx_mode UpperCAmelCase = initial_prior_first_n_blocks UpperCAmelCase = initial_prior_diagonal_n_blocks
708
"""simple docstring""" from math import sqrt def _a ( _snake_case = 100_0000 ): """simple docstring""" UpperCAmelCase = 0 UpperCAmelCase = 0 UpperCAmelCase = 42 while num_cuboids <= limit: max_cuboid_size += 1 for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ): if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer(): num_cuboids += ( min(_snake_case , sum_shortest_sides // 2 ) - max(1 , sum_shortest_sides - max_cuboid_size ) + 1 ) return max_cuboid_size if __name__ == "__main__": print(F"""{solution() = }""")
74
0
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL _UpperCamelCase = logging.get_logger(__name__) def _a ( _snake_case ): """simple docstring""" if isinstance(_snake_case , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(_snake_case , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(_snake_case ): return [[videos]] raise ValueError(F'''Could not make batched video from {videos}''' ) class lowerCamelCase__ ( snake_case ): SCREAMING_SNAKE_CASE = ['''pixel_values'''] def __init__( self ,A = True ,A = None ,A = PILImageResampling.BILINEAR ,A = True ,A = None ,A = True ,A = 1 / 255 ,A = True ,A = True ,A = None ,A = None ,**A ,): super().__init__(**A ) UpperCAmelCase = size if size is not None else {"""shortest_edge""": 256} UpperCAmelCase = get_size_dict(A ,default_to_square=A ) UpperCAmelCase = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} UpperCAmelCase = get_size_dict(A ,param_name="""crop_size""" ) UpperCAmelCase = do_resize UpperCAmelCase = size UpperCAmelCase = do_center_crop UpperCAmelCase = crop_size UpperCAmelCase = resample UpperCAmelCase = do_rescale UpperCAmelCase = rescale_factor UpperCAmelCase = offset UpperCAmelCase = do_normalize UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD def _UpperCamelCase ( self ,A ,A ,A = PILImageResampling.BILINEAR ,A = None ,**A ,): UpperCAmelCase = get_size_dict(A ,default_to_square=A ) if "shortest_edge" in size: UpperCAmelCase = get_resize_output_image_size(A ,size["""shortest_edge"""] ,default_to_square=A ) elif "height" in size and "width" in size: UpperCAmelCase = (size["""height"""], size["""width"""]) else: raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' ) return resize(A ,size=A ,resample=A ,data_format=A ,**A ) def _UpperCamelCase ( self ,A ,A ,A = None ,**A ,): UpperCAmelCase = get_size_dict(A ) if "height" not in size or "width" not in size: raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' ) return center_crop(A ,size=(size["""height"""], size["""width"""]) ,data_format=A ,**A ) def _UpperCamelCase ( self ,A ,A ,A = True ,A = None ,**A ,): UpperCAmelCase = image.astype(np.floataa ) if offset: UpperCAmelCase = image - (scale / 2) return rescale(A ,scale=A ,data_format=A ,**A ) def _UpperCamelCase ( self ,A ,A ,A ,A = None ,**A ,): return normalize(A ,mean=A ,std=A ,data_format=A ,**A ) def _UpperCamelCase ( self ,A ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = ChannelDimension.FIRST ,): if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) if offset and not do_rescale: raise ValueError("""For offset, do_rescale must also be set to True.""" ) # All transformations expect numpy arrays. UpperCAmelCase = to_numpy_array(A ) if do_resize: UpperCAmelCase = self.resize(image=A ,size=A ,resample=A ) if do_center_crop: UpperCAmelCase = self.center_crop(A ,size=A ) if do_rescale: UpperCAmelCase = self.rescale(image=A ,scale=A ,offset=A ) if do_normalize: UpperCAmelCase = self.normalize(image=A ,mean=A ,std=A ) UpperCAmelCase = to_channel_dimension_format(A ,A ) return image def _UpperCamelCase ( self ,A ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = ChannelDimension.FIRST ,**A ,): UpperCAmelCase = do_resize if do_resize is not None else self.do_resize UpperCAmelCase = resample if resample is not None else self.resample UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase = offset if offset is not None else self.offset UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase = image_mean if image_mean is not None else self.image_mean UpperCAmelCase = image_std if image_std is not None else self.image_std UpperCAmelCase = size if size is not None else self.size UpperCAmelCase = get_size_dict(A ,default_to_square=A ) UpperCAmelCase = crop_size if crop_size is not None else self.crop_size UpperCAmelCase = get_size_dict(A ,param_name="""crop_size""" ) if not valid_images(A ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) UpperCAmelCase = make_batched(A ) UpperCAmelCase = [ [ self._preprocess_image( image=A ,do_resize=A ,size=A ,resample=A ,do_center_crop=A ,crop_size=A ,do_rescale=A ,rescale_factor=A ,offset=A ,do_normalize=A ,image_mean=A ,image_std=A ,data_format=A ,) for img in video ] for video in videos ] UpperCAmelCase = {"""pixel_values""": videos} return BatchFeature(data=A ,tensor_type=A )
709
"""simple docstring""" import argparse import re import torch from CLAP import create_model from transformers import AutoFeatureExtractor, ClapConfig, ClapModel _UpperCamelCase = { """text_branch""": """text_model""", """audio_branch""": """audio_model.audio_encoder""", """attn""": """attention.self""", """self.proj""": """output.dense""", """attention.self_mask""": """attn_mask""", """mlp.fc1""": """intermediate.dense""", """mlp.fc2""": """output.dense""", """norm1""": """layernorm_before""", """norm2""": """layernorm_after""", """bn0""": """batch_norm""", } _UpperCamelCase = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""") def _a ( _snake_case , _snake_case=False ): """simple docstring""" UpperCAmelCase , UpperCAmelCase = create_model( """HTSAT-tiny""" , """roberta""" , _snake_case , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=_snake_case , fusion_type="""aff_2d""" if enable_fusion else None , ) return model, model_cfg def _a ( _snake_case ): """simple docstring""" UpperCAmelCase = {} UpperCAmelCase = R""".*sequential.(\d+).*""" UpperCAmelCase = R""".*_projection.(\d+).*""" for key, value in state_dict.items(): # check if any key needs to be modified for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: UpperCAmelCase = key.replace(_snake_case , _snake_case ) if re.match(_snake_case , _snake_case ): # replace sequential layers with list UpperCAmelCase = re.match(_snake_case , _snake_case ).group(1 ) UpperCAmelCase = key.replace(F'''sequential.{sequential_layer}.''' , F'''layers.{int(_snake_case )//3}.linear.''' ) elif re.match(_snake_case , _snake_case ): UpperCAmelCase = int(re.match(_snake_case , _snake_case ).group(1 ) ) # Because in CLAP they use `nn.Sequential`... UpperCAmelCase = 1 if projecton_layer == 0 else 2 UpperCAmelCase = key.replace(F'''_projection.{projecton_layer}.''' , F'''_projection.linear{transformers_projection_layer}.''' ) if "audio" and "qkv" in key: # split qkv into query key and value UpperCAmelCase = value UpperCAmelCase = mixed_qkv.size(0 ) // 3 UpperCAmelCase = mixed_qkv[:qkv_dim] UpperCAmelCase = mixed_qkv[qkv_dim : qkv_dim * 2] UpperCAmelCase = mixed_qkv[qkv_dim * 2 :] UpperCAmelCase = query_layer UpperCAmelCase = key_layer UpperCAmelCase = value_layer else: UpperCAmelCase = value return model_state_dict def _a ( _snake_case , _snake_case , _snake_case , _snake_case=False ): """simple docstring""" UpperCAmelCase , UpperCAmelCase = init_clap(_snake_case , enable_fusion=_snake_case ) clap_model.eval() UpperCAmelCase = clap_model.state_dict() UpperCAmelCase = rename_state_dict(_snake_case ) UpperCAmelCase = ClapConfig() UpperCAmelCase = enable_fusion UpperCAmelCase = ClapModel(_snake_case ) # ignore the spectrogram embedding layer model.load_state_dict(_snake_case , strict=_snake_case ) model.save_pretrained(_snake_case ) transformers_config.save_pretrained(_snake_case ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""") _UpperCamelCase = parser.parse_args() convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
74
0
"""simple docstring""" _UpperCamelCase = """Input must be a string of 8 numbers plus letter""" _UpperCamelCase = """TRWAGMYFPDXBNJZSQVHLCKE""" def _a ( _snake_case ): """simple docstring""" if not isinstance(_snake_case , _snake_case ): UpperCAmelCase = F'''Expected string as input, found {type(_snake_case ).__name__}''' raise TypeError(_snake_case ) UpperCAmelCase = spanish_id.replace("""-""" , """""" ).upper() if len(_snake_case ) != 9: raise ValueError(_snake_case ) try: UpperCAmelCase = int(spanish_id_clean[0:8] ) UpperCAmelCase = spanish_id_clean[8] except ValueError as ex: raise ValueError(_snake_case ) from ex if letter.isdigit(): raise ValueError(_snake_case ) return letter == LOOKUP_LETTERS[number % 23] if __name__ == "__main__": import doctest doctest.testmod()
710
"""simple docstring""" # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. _UpperCamelCase = abspath(join(dirname(dirname(__file__)), """src""")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="""ignore""", category=FutureWarning) def _a ( _snake_case ): """simple docstring""" from diffusers.utils.testing_utils import pytest_addoption_shared pytest_addoption_shared(_snake_case ) def _a ( _snake_case ): """simple docstring""" from diffusers.utils.testing_utils import pytest_terminal_summary_main UpperCAmelCase = terminalreporter.config.getoption("""--make-reports""" ) if make_reports: pytest_terminal_summary_main(_snake_case , id=_snake_case )
74
0
"""simple docstring""" import unittest from parameterized import parameterized from transformers import OpenLlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel class lowerCamelCase__ : def __init__( self ,A ,A=13 ,A=7 ,A=True ,A=True ,A=False ,A=True ,A=99 ,A=32 ,A=5 ,A=4 ,A=37 ,A="gelu" ,A=0.1 ,A=0.1 ,A=512 ,A=16 ,A=2 ,A=0.02 ,A=3 ,A=4 ,A=None ,): UpperCAmelCase = parent UpperCAmelCase = batch_size UpperCAmelCase = seq_length UpperCAmelCase = is_training UpperCAmelCase = use_input_mask UpperCAmelCase = use_token_type_ids UpperCAmelCase = use_labels UpperCAmelCase = vocab_size UpperCAmelCase = hidden_size UpperCAmelCase = num_hidden_layers UpperCAmelCase = num_attention_heads UpperCAmelCase = intermediate_size UpperCAmelCase = hidden_act UpperCAmelCase = hidden_dropout_prob UpperCAmelCase = attention_probs_dropout_prob UpperCAmelCase = max_position_embeddings UpperCAmelCase = type_vocab_size UpperCAmelCase = type_sequence_label_size UpperCAmelCase = initializer_range UpperCAmelCase = num_labels UpperCAmelCase = num_choices UpperCAmelCase = scope def _UpperCamelCase ( self ): UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) UpperCAmelCase = None if self.use_input_mask: UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase = None if self.use_token_type_ids: UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) UpperCAmelCase = None UpperCAmelCase = None UpperCAmelCase = None if self.use_labels: UpperCAmelCase = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) UpperCAmelCase = ids_tensor([self.batch_size] ,self.num_choices ) UpperCAmelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _UpperCamelCase ( self ): return OpenLlamaConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=A ,initializer_range=self.initializer_range ,use_stable_embedding=A ,) def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ): UpperCAmelCase = OpenLlamaModel(config=A ) model.to(A ) model.eval() UpperCAmelCase = model(A ,attention_mask=A ) UpperCAmelCase = model(A ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,): UpperCAmelCase = True UpperCAmelCase = OpenLlamaModel(A ) model.to(A ) model.eval() UpperCAmelCase = model( A ,attention_mask=A ,encoder_hidden_states=A ,encoder_attention_mask=A ,) UpperCAmelCase = model( A ,attention_mask=A ,encoder_hidden_states=A ,) UpperCAmelCase = model(A ,attention_mask=A ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,): UpperCAmelCase = OpenLlamaForCausalLM(config=A ) model.to(A ) model.eval() UpperCAmelCase = model(A ,attention_mask=A ,labels=A ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,): UpperCAmelCase = True UpperCAmelCase = True UpperCAmelCase = OpenLlamaForCausalLM(config=A ) model.to(A ) model.eval() # first forward pass UpperCAmelCase = model( A ,attention_mask=A ,encoder_hidden_states=A ,encoder_attention_mask=A ,use_cache=A ,) UpperCAmelCase = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids UpperCAmelCase = ids_tensor((self.batch_size, 3) ,config.vocab_size ) UpperCAmelCase = ids_tensor((self.batch_size, 3) ,vocab_size=2 ) # append to next input_ids and UpperCAmelCase = torch.cat([input_ids, next_tokens] ,dim=-1 ) UpperCAmelCase = torch.cat([input_mask, next_mask] ,dim=-1 ) UpperCAmelCase = model( A ,attention_mask=A ,encoder_hidden_states=A ,encoder_attention_mask=A ,output_hidden_states=A ,)["""hidden_states"""][0] UpperCAmelCase = model( A ,attention_mask=A ,encoder_hidden_states=A ,encoder_attention_mask=A ,past_key_values=A ,output_hidden_states=A ,)["""hidden_states"""][0] # select random slice UpperCAmelCase = ids_tensor((1,) ,output_from_past.shape[-1] ).item() UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCAmelCase = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(A ,A ,atol=1e-3 ) ) def _UpperCamelCase ( self ): UpperCAmelCase = self.prepare_config_and_inputs() ( ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ) = config_and_inputs UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class lowerCamelCase__ ( snake_case , snake_case , snake_case , unittest.TestCase ): SCREAMING_SNAKE_CASE = ( (OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else () ) SCREAMING_SNAKE_CASE = (OpenLlamaForCausalLM,) if is_torch_available() else () SCREAMING_SNAKE_CASE = ( { '''feature-extraction''': OpenLlamaModel, '''text-classification''': OpenLlamaForSequenceClassification, '''text-generation''': OpenLlamaForCausalLM, '''zero-shot''': OpenLlamaForSequenceClassification, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False def _UpperCamelCase ( self ): UpperCAmelCase = OpenLlamaModelTester(self ) UpperCAmelCase = ConfigTester(self ,config_class=A ,hidden_size=37 ) def _UpperCamelCase ( self ): self.config_tester.run_common_tests() def _UpperCamelCase ( self ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A ) def _UpperCamelCase ( self ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCAmelCase = type self.model_tester.create_and_check_model(*A ) def _UpperCamelCase ( self ): UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase = 3 UpperCAmelCase = input_dict["""input_ids"""] UpperCAmelCase = input_ids.ne(1 ).to(A ) UpperCAmelCase = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size ) UpperCAmelCase = OpenLlamaForSequenceClassification(A ) model.to(A ) model.eval() UpperCAmelCase = model(A ,attention_mask=A ,labels=A ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) def _UpperCamelCase ( self ): UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase = 3 UpperCAmelCase = """single_label_classification""" UpperCAmelCase = input_dict["""input_ids"""] UpperCAmelCase = input_ids.ne(1 ).to(A ) UpperCAmelCase = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size ) UpperCAmelCase = OpenLlamaForSequenceClassification(A ) model.to(A ) model.eval() UpperCAmelCase = model(A ,attention_mask=A ,labels=A ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) def _UpperCamelCase ( self ): UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase = 3 UpperCAmelCase = """multi_label_classification""" UpperCAmelCase = input_dict["""input_ids"""] UpperCAmelCase = input_ids.ne(1 ).to(A ) UpperCAmelCase = ids_tensor( [self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float ) UpperCAmelCase = OpenLlamaForSequenceClassification(A ) model.to(A ) model.eval() UpperCAmelCase = model(A ,attention_mask=A ,labels=A ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" ) def _UpperCamelCase ( self ): pass @parameterized.expand([("""linear""",), ("""dynamic""",)] ) def _UpperCamelCase ( self ,A ): UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase = ids_tensor([1, 10] ,config.vocab_size ) UpperCAmelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights UpperCAmelCase = OpenLlamaModel(A ) original_model.to(A ) original_model.eval() UpperCAmelCase = original_model(A ).last_hidden_state UpperCAmelCase = original_model(A ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights UpperCAmelCase = {"""type""": scaling_type, """factor""": 10.0} UpperCAmelCase = OpenLlamaModel(A ) scaled_model.to(A ) scaled_model.eval() UpperCAmelCase = scaled_model(A ).last_hidden_state UpperCAmelCase = scaled_model(A ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(A ,A ,atol=1e-5 ) ) else: self.assertFalse(torch.allclose(A ,A ,atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(A ,A ,atol=1e-5 ) )
711
"""simple docstring""" from __future__ import annotations from collections.abc import MutableSequence class lowerCamelCase__ : def __init__( self ,A ,A ): if len(A ) != degree + 1: raise ValueError( """The number of coefficients should be equal to the degree + 1.""" ) UpperCAmelCase = list(A ) UpperCAmelCase = degree def __add__( self ,A ): if self.degree > polynomial_a.degree: UpperCAmelCase = self.coefficients[:] for i in range(polynomial_a.degree + 1 ): coefficients[i] += polynomial_a.coefficients[i] return Polynomial(self.degree ,A ) else: UpperCAmelCase = polynomial_a.coefficients[:] for i in range(self.degree + 1 ): coefficients[i] += self.coefficients[i] return Polynomial(polynomial_a.degree ,A ) def __sub__( self ,A ): return self + polynomial_a * Polynomial(0 ,[-1] ) def __neg__( self ): return Polynomial(self.degree ,[-c for c in self.coefficients] ) def __mul__( self ,A ): UpperCAmelCase = [0] * (self.degree + polynomial_a.degree + 1) for i in range(self.degree + 1 ): for j in range(polynomial_a.degree + 1 ): coefficients[i + j] += ( self.coefficients[i] * polynomial_a.coefficients[j] ) return Polynomial(self.degree + polynomial_a.degree ,A ) def _UpperCamelCase ( self ,A ): UpperCAmelCase = 0 for i in range(self.degree + 1 ): result += self.coefficients[i] * (substitution**i) return result def __str__( self ): UpperCAmelCase = """""" for i in range(self.degree ,-1 ,-1 ): if self.coefficients[i] == 0: continue elif self.coefficients[i] > 0: if polynomial: polynomial += " + " else: polynomial += " - " if i == 0: polynomial += str(abs(self.coefficients[i] ) ) elif i == 1: polynomial += str(abs(self.coefficients[i] ) ) + "x" else: polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(A ) return polynomial def __repr__( self ): return self.__str__() def _UpperCamelCase ( self ): UpperCAmelCase = [0] * self.degree for i in range(self.degree ): UpperCAmelCase = self.coefficients[i + 1] * (i + 1) return Polynomial(self.degree - 1 ,A ) def _UpperCamelCase ( self ,A = 0 ): UpperCAmelCase = [0] * (self.degree + 2) UpperCAmelCase = constant for i in range(self.degree + 1 ): UpperCAmelCase = self.coefficients[i] / (i + 1) return Polynomial(self.degree + 1 ,A ) def __eq__( self ,A ): if not isinstance(A ,A ): return False if self.degree != polynomial_a.degree: return False for i in range(self.degree + 1 ): if self.coefficients[i] != polynomial_a.coefficients[i]: return False return True def __ne__( self ,A ): return not self.__eq__(A )
74
0
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_electra import ElectraTokenizer _UpperCamelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} _UpperCamelCase = { """vocab_file""": { """google/electra-small-generator""": ( """https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt""" ), """google/electra-base-generator""": """https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt""", """google/electra-large-generator""": ( """https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt""" ), """google/electra-small-discriminator""": ( """https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt""" ), """google/electra-base-discriminator""": ( """https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt""" ), """google/electra-large-discriminator""": ( """https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """google/electra-small-generator""": ( """https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json""" ), """google/electra-base-generator""": ( """https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json""" ), """google/electra-large-generator""": ( """https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json""" ), """google/electra-small-discriminator""": ( """https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json""" ), """google/electra-base-discriminator""": ( """https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json""" ), """google/electra-large-discriminator""": ( """https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json""" ), }, } _UpperCamelCase = { """google/electra-small-generator""": 512, """google/electra-base-generator""": 512, """google/electra-large-generator""": 512, """google/electra-small-discriminator""": 512, """google/electra-base-discriminator""": 512, """google/electra-large-discriminator""": 512, } _UpperCamelCase = { """google/electra-small-generator""": {"""do_lower_case""": True}, """google/electra-base-generator""": {"""do_lower_case""": True}, """google/electra-large-generator""": {"""do_lower_case""": True}, """google/electra-small-discriminator""": {"""do_lower_case""": True}, """google/electra-base-discriminator""": {"""do_lower_case""": True}, """google/electra-large-discriminator""": {"""do_lower_case""": True}, } class lowerCamelCase__ ( snake_case ): SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE = ElectraTokenizer def __init__( self ,A=None ,A=None ,A=True ,A="[UNK]" ,A="[SEP]" ,A="[PAD]" ,A="[CLS]" ,A="[MASK]" ,A=True ,A=None ,**A ,): super().__init__( A ,tokenizer_file=A ,do_lower_case=A ,unk_token=A ,sep_token=A ,pad_token=A ,cls_token=A ,mask_token=A ,tokenize_chinese_chars=A ,strip_accents=A ,**A ,) UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" ,A ) != do_lower_case or normalizer_state.get("""strip_accents""" ,A ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" ,A ) != tokenize_chinese_chars ): UpperCAmelCase = getattr(A ,normalizer_state.pop("""type""" ) ) UpperCAmelCase = do_lower_case UpperCAmelCase = strip_accents UpperCAmelCase = tokenize_chinese_chars UpperCAmelCase = normalizer_class(**A ) UpperCAmelCase = do_lower_case def _UpperCamelCase ( self ,A ,A=None ): UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _UpperCamelCase ( self ,A ,A = None ): UpperCAmelCase = [self.sep_token_id] UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _UpperCamelCase ( self ,A ,A = None ): UpperCAmelCase = self._tokenizer.model.save(A ,name=A ) return tuple(A )
712
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy as np import tensorflow as tf from transformers import ( TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST, FlaubertConfig, TFFlaubertForMultipleChoice, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForSequenceClassification, TFFlaubertForTokenClassification, TFFlaubertModel, TFFlaubertWithLMHeadModel, ) class lowerCamelCase__ : def __init__( self ,A ,): UpperCAmelCase = parent UpperCAmelCase = 13 UpperCAmelCase = 7 UpperCAmelCase = True UpperCAmelCase = True UpperCAmelCase = True UpperCAmelCase = True UpperCAmelCase = True UpperCAmelCase = False UpperCAmelCase = False UpperCAmelCase = False UpperCAmelCase = 2 UpperCAmelCase = 99 UpperCAmelCase = 0 UpperCAmelCase = 32 UpperCAmelCase = 2 UpperCAmelCase = 4 UpperCAmelCase = 0.1 UpperCAmelCase = 0.1 UpperCAmelCase = 512 UpperCAmelCase = 16 UpperCAmelCase = 2 UpperCAmelCase = 0.02 UpperCAmelCase = 3 UpperCAmelCase = 4 UpperCAmelCase = """last""" UpperCAmelCase = True UpperCAmelCase = None UpperCAmelCase = 0 def _UpperCamelCase ( self ): UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ,dtype=tf.floataa ) UpperCAmelCase = None if self.use_input_lengths: UpperCAmelCase = ( ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length UpperCAmelCase = None if self.use_token_type_ids: UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs ) UpperCAmelCase = None UpperCAmelCase = None UpperCAmelCase = None if self.use_labels: UpperCAmelCase = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) UpperCAmelCase = ids_tensor([self.batch_size] ,2 ,dtype=tf.floataa ) UpperCAmelCase = ids_tensor([self.batch_size] ,self.num_choices ) UpperCAmelCase = FlaubertConfig( vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,bos_token_id=self.bos_token_id ,) return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,): UpperCAmelCase = TFFlaubertModel(config=A ) UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids} UpperCAmelCase = model(A ) UpperCAmelCase = [input_ids, input_mask] UpperCAmelCase = model(A ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,): UpperCAmelCase = TFFlaubertWithLMHeadModel(A ) UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids} UpperCAmelCase = model(A ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,): UpperCAmelCase = TFFlaubertForQuestionAnsweringSimple(A ) UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths} UpperCAmelCase = model(A ) self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,): UpperCAmelCase = TFFlaubertForSequenceClassification(A ) UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths} UpperCAmelCase = model(A ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,): UpperCAmelCase = self.num_labels UpperCAmelCase = TFFlaubertForTokenClassification(config=A ) UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} UpperCAmelCase = model(A ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,): UpperCAmelCase = self.num_choices UpperCAmelCase = TFFlaubertForMultipleChoice(config=A ) UpperCAmelCase = tf.tile(tf.expand_dims(A ,1 ) ,(1, self.num_choices, 1) ) UpperCAmelCase = tf.tile(tf.expand_dims(A ,1 ) ,(1, self.num_choices, 1) ) UpperCAmelCase = tf.tile(tf.expand_dims(A ,1 ) ,(1, self.num_choices, 1) ) UpperCAmelCase = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, """token_type_ids""": multiple_choice_token_type_ids, } UpperCAmelCase = model(A ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) ) def _UpperCamelCase ( self ): UpperCAmelCase = self.prepare_config_and_inputs() ( ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ) = config_and_inputs UpperCAmelCase = { """input_ids""": input_ids, """token_type_ids""": token_type_ids, """langs""": token_type_ids, """lengths""": input_lengths, } return config, inputs_dict @require_tf class lowerCamelCase__ ( snake_case , snake_case , unittest.TestCase ): SCREAMING_SNAKE_CASE = ( ( TFFlaubertModel, TFFlaubertWithLMHeadModel, TFFlaubertForSequenceClassification, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForTokenClassification, TFFlaubertForMultipleChoice, ) if is_tf_available() else () ) SCREAMING_SNAKE_CASE = ( (TFFlaubertWithLMHeadModel,) if is_tf_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable SCREAMING_SNAKE_CASE = ( { '''feature-extraction''': TFFlaubertModel, '''fill-mask''': TFFlaubertWithLMHeadModel, '''question-answering''': TFFlaubertForQuestionAnsweringSimple, '''text-classification''': TFFlaubertForSequenceClassification, '''token-classification''': TFFlaubertForTokenClassification, '''zero-shot''': TFFlaubertForSequenceClassification, } if is_tf_available() else {} ) SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False def _UpperCamelCase ( self ,A ,A ,A ,A ,A ): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("""Fast""" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def _UpperCamelCase ( self ): UpperCAmelCase = TFFlaubertModelTester(self ) UpperCAmelCase = ConfigTester(self ,config_class=A ,emb_dim=37 ) def _UpperCamelCase ( self ): self.config_tester.run_common_tests() def _UpperCamelCase ( self ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*A ) def _UpperCamelCase ( self ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*A ) def _UpperCamelCase ( self ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*A ) def _UpperCamelCase ( self ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*A ) def _UpperCamelCase ( self ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_token_classification(*A ) def _UpperCamelCase ( self ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_multiple_choice(*A ) @slow def _UpperCamelCase ( self ): for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase = TFFlaubertModel.from_pretrained(A ) self.assertIsNotNone(A ) @require_tf @require_sentencepiece @require_tokenizers class lowerCamelCase__ ( unittest.TestCase ): @slow def _UpperCamelCase ( self ): UpperCAmelCase = TFFlaubertModel.from_pretrained("""jplu/tf-flaubert-small-cased""" ) UpperCAmelCase = tf.convert_to_tensor( [[0, 158, 735, 2_592, 1_424, 6_727, 82, 1]] ,dtype=tf.intaa ,) # "J'aime flaubert !" UpperCAmelCase = model(A )[0] UpperCAmelCase = tf.TensorShape((1, 8, 512) ) self.assertEqual(output.shape ,A ) # compare the actual values for a slice. UpperCAmelCase = tf.convert_to_tensor( [ [ [-1.8768773, -1.566555, 0.27072418], [-1.6920038, -0.5873505, 1.9329599], [-2.9563985, -1.6993835, 1.7972052], ] ] ,dtype=tf.floataa ,) self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
74
0
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForMaskedImageModeling, HfArgumentParser, Trainer, TrainingArguments, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version _UpperCamelCase = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.31.0""") require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""") _UpperCamelCase = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys()) _UpperCamelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class lowerCamelCase__ : SCREAMING_SNAKE_CASE = field( default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , ) SCREAMING_SNAKE_CASE = field(default=snake_case , metadata={'''help''': '''A folder containing the training data.'''} ) SCREAMING_SNAKE_CASE = field(default=snake_case , metadata={'''help''': '''A folder containing the validation data.'''} ) SCREAMING_SNAKE_CASE = field( default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} ) SCREAMING_SNAKE_CASE = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} ) SCREAMING_SNAKE_CASE = field( default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def _UpperCamelCase ( self ): UpperCAmelCase = {} if self.train_dir is not None: UpperCAmelCase = self.train_dir if self.validation_dir is not None: UpperCAmelCase = self.validation_dir UpperCAmelCase = data_files if data_files else None @dataclass class lowerCamelCase__ : SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={ '''help''': ( '''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a ''' '''checkpoint identifier on the hub. ''' '''Don\'t set if you want to train a model from scratch.''' ) } , ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(snake_case )} , ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } , ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , ) SCREAMING_SNAKE_CASE = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) SCREAMING_SNAKE_CASE = field(default=snake_case , metadata={'''help''': '''Name or path of preprocessor config.'''} ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={ '''help''': ( '''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.''' ) } , ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={ '''help''': ( '''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.''' ) } , ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''Stride to use for the encoder.'''} , ) class lowerCamelCase__ : def __init__( self ,A=192 ,A=32 ,A=4 ,A=0.6 ): UpperCAmelCase = input_size UpperCAmelCase = mask_patch_size UpperCAmelCase = model_patch_size UpperCAmelCase = mask_ratio if self.input_size % self.mask_patch_size != 0: raise ValueError("""Input size must be divisible by mask patch size""" ) if self.mask_patch_size % self.model_patch_size != 0: raise ValueError("""Mask patch size must be divisible by model patch size""" ) UpperCAmelCase = self.input_size // self.mask_patch_size UpperCAmelCase = self.mask_patch_size // self.model_patch_size UpperCAmelCase = self.rand_size**2 UpperCAmelCase = int(np.ceil(self.token_count * self.mask_ratio ) ) def __call__( self ): UpperCAmelCase = np.random.permutation(self.token_count )[: self.mask_count] UpperCAmelCase = np.zeros(self.token_count ,dtype=A ) UpperCAmelCase = 1 UpperCAmelCase = mask.reshape((self.rand_size, self.rand_size) ) UpperCAmelCase = mask.repeat(self.scale ,axis=0 ).repeat(self.scale ,axis=1 ) return torch.tensor(mask.flatten() ) def _a ( _snake_case ) -> str: """simple docstring""" UpperCAmelCase = torch.stack([example["""pixel_values"""] for example in examples] ) UpperCAmelCase = torch.stack([example["""mask"""] for example in examples] ) return {"pixel_values": pixel_values, "bool_masked_pos": mask} def _a ( ) -> Optional[Any]: """simple docstring""" UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_mim""" , _snake_case , _snake_case ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() UpperCAmelCase = training_args.get_process_log_level() logger.setLevel(_snake_case ) transformers.utils.logging.set_verbosity(_snake_case ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. UpperCAmelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: UpperCAmelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Initialize our dataset. UpperCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. UpperCAmelCase = None if """validation""" in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , _snake_case ) and data_args.train_val_split > 0.0: UpperCAmelCase = ds["""train"""].train_test_split(data_args.train_val_split ) UpperCAmelCase = split["""train"""] UpperCAmelCase = split["""test"""] # Create config # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. UpperCAmelCase = { """cache_dir""": model_args.cache_dir, """revision""": model_args.model_revision, """use_auth_token""": True if model_args.use_auth_token else None, } if model_args.config_name_or_path: UpperCAmelCase = AutoConfig.from_pretrained(model_args.config_name_or_path , **_snake_case ) elif model_args.model_name_or_path: UpperCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , **_snake_case ) else: UpperCAmelCase = CONFIG_MAPPING[model_args.model_type]() logger.warning("""You are instantiating a new config instance from scratch.""" ) if model_args.config_overrides is not None: logger.info(F'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(F'''New config: {config}''' ) # make sure the decoder_type is "simmim" (only relevant for BEiT) if hasattr(_snake_case , """decoder_type""" ): UpperCAmelCase = """simmim""" # adapt config UpperCAmelCase = model_args.image_size if model_args.image_size is not None else config.image_size UpperCAmelCase = model_args.patch_size if model_args.patch_size is not None else config.patch_size UpperCAmelCase = ( model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride ) config.update( { """image_size""": model_args.image_size, """patch_size""": model_args.patch_size, """encoder_stride""": model_args.encoder_stride, } ) # create image processor if model_args.image_processor_name: UpperCAmelCase = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **_snake_case ) elif model_args.model_name_or_path: UpperCAmelCase = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **_snake_case ) else: UpperCAmelCase = { conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items() } UpperCAmelCase = IMAGE_PROCESSOR_TYPES[model_args.model_type]() # create model if model_args.model_name_or_path: UpperCAmelCase = AutoModelForMaskedImageModeling.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("""Training new model from scratch""" ) UpperCAmelCase = AutoModelForMaskedImageModeling.from_config(_snake_case ) if training_args.do_train: UpperCAmelCase = ds["""train"""].column_names else: UpperCAmelCase = ds["""validation"""].column_names if data_args.image_column_name is not None: UpperCAmelCase = data_args.image_column_name elif "image" in column_names: UpperCAmelCase = """image""" elif "img" in column_names: UpperCAmelCase = """img""" else: UpperCAmelCase = column_names[0] # transformations as done in original SimMIM paper # source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py UpperCAmelCase = Compose( [ Lambda(lambda _snake_case : img.convert("""RGB""" ) if img.mode != "RGB" else img ), RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) # create mask generator UpperCAmelCase = MaskGenerator( input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , ) def preprocess_images(_snake_case ): UpperCAmelCase = [transforms(_snake_case ) for image in examples[image_column_name]] UpperCAmelCase = [mask_generator() for i in range(len(examples[image_column_name] ) )] return examples if training_args.do_train: if "train" not in ds: raise ValueError("""--do_train requires a train dataset""" ) if data_args.max_train_samples is not None: UpperCAmelCase = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(_snake_case ) if training_args.do_eval: if "validation" not in ds: raise ValueError("""--do_eval requires a validation dataset""" ) if data_args.max_eval_samples is not None: UpperCAmelCase = ( ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(_snake_case ) # Initialize our trainer UpperCAmelCase = Trainer( model=_snake_case , args=_snake_case , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=_snake_case , data_collator=_snake_case , ) # Training if training_args.do_train: UpperCAmelCase = None if training_args.resume_from_checkpoint is not None: UpperCAmelCase = training_args.resume_from_checkpoint elif last_checkpoint is not None: UpperCAmelCase = last_checkpoint UpperCAmelCase = trainer.train(resume_from_checkpoint=_snake_case ) trainer.save_model() trainer.log_metrics("""train""" , train_result.metrics ) trainer.save_metrics("""train""" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: UpperCAmelCase = trainer.evaluate() trainer.log_metrics("""eval""" , _snake_case ) trainer.save_metrics("""eval""" , _snake_case ) # Write model card and (optionally) push to hub UpperCAmelCase = { """finetuned_from""": model_args.model_name_or_path, """tasks""": """masked-image-modeling""", """dataset""": data_args.dataset_name, """tags""": ["""masked-image-modeling"""], } if training_args.push_to_hub: trainer.push_to_hub(**_snake_case ) else: trainer.create_model_card(**_snake_case ) if __name__ == "__main__": main()
713
"""simple docstring""" import argparse from collections import OrderedDict from pathlib import Path import requests import torch from PIL import Image from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor from transformers.utils import logging logging.set_verbosity_info() _UpperCamelCase = logging.get_logger(__name__) def _a ( _snake_case ): """simple docstring""" UpperCAmelCase = OrderedDict() for key, value in state_dict.items(): if key.startswith("""module.encoder""" ): UpperCAmelCase = key.replace("""module.encoder""" , """glpn.encoder""" ) if key.startswith("""module.decoder""" ): UpperCAmelCase = key.replace("""module.decoder""" , """decoder.stages""" ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 UpperCAmelCase = key[key.find("""patch_embed""" ) + len("""patch_embed""" )] UpperCAmelCase = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(_snake_case )-1}''' ) if "norm" in key: UpperCAmelCase = key.replace("""norm""" , """layer_norm""" ) if "glpn.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 UpperCAmelCase = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )] UpperCAmelCase = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(_snake_case )-1}''' ) if "layer_norm1" in key: UpperCAmelCase = key.replace("""layer_norm1""" , """layer_norm_1""" ) if "layer_norm2" in key: UpperCAmelCase = key.replace("""layer_norm2""" , """layer_norm_2""" ) if "block" in key: # replace for example block1 by block.0 UpperCAmelCase = key[key.find("""block""" ) + len("""block""" )] UpperCAmelCase = key.replace(F'''block{idx}''' , F'''block.{int(_snake_case )-1}''' ) if "attn.q" in key: UpperCAmelCase = key.replace("""attn.q""" , """attention.self.query""" ) if "attn.proj" in key: UpperCAmelCase = key.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in key: UpperCAmelCase = key.replace("""attn""" , """attention.self""" ) if "fc1" in key: UpperCAmelCase = key.replace("""fc1""" , """dense1""" ) if "fc2" in key: UpperCAmelCase = key.replace("""fc2""" , """dense2""" ) if "linear_pred" in key: UpperCAmelCase = key.replace("""linear_pred""" , """classifier""" ) if "linear_fuse" in key: UpperCAmelCase = key.replace("""linear_fuse.conv""" , """linear_fuse""" ) UpperCAmelCase = key.replace("""linear_fuse.bn""" , """batch_norm""" ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 UpperCAmelCase = key[key.find("""linear_c""" ) + len("""linear_c""" )] UpperCAmelCase = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(_snake_case )-1}''' ) if "bot_conv" in key: UpperCAmelCase = key.replace("""bot_conv""" , """0.convolution""" ) if "skip_conv1" in key: UpperCAmelCase = key.replace("""skip_conv1""" , """1.convolution""" ) if "skip_conv2" in key: UpperCAmelCase = key.replace("""skip_conv2""" , """2.convolution""" ) if "fusion1" in key: UpperCAmelCase = key.replace("""fusion1""" , """1.fusion""" ) if "fusion2" in key: UpperCAmelCase = key.replace("""fusion2""" , """2.fusion""" ) if "fusion3" in key: UpperCAmelCase = key.replace("""fusion3""" , """3.fusion""" ) if "fusion" in key and "conv" in key: UpperCAmelCase = key.replace("""conv""" , """convolutional_layer""" ) if key.startswith("""module.last_layer_depth""" ): UpperCAmelCase = key.replace("""module.last_layer_depth""" , """head.head""" ) UpperCAmelCase = value return new_state_dict def _a ( _snake_case , _snake_case ): """simple docstring""" for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) UpperCAmelCase = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' ) UpperCAmelCase = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' ) # next, add keys and values (in that order) to the state dict UpperCAmelCase = kv_weight[ : config.hidden_sizes[i], : ] UpperCAmelCase = kv_bias[: config.hidden_sizes[i]] UpperCAmelCase = kv_weight[ config.hidden_sizes[i] :, : ] UpperCAmelCase = kv_bias[config.hidden_sizes[i] :] def _a ( ): """simple docstring""" UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" UpperCAmelCase = Image.open(requests.get(_snake_case , stream=_snake_case ).raw ) return image @torch.no_grad() def _a ( _snake_case , _snake_case , _snake_case=False , _snake_case=None ): """simple docstring""" UpperCAmelCase = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] ) # load image processor (only resize + rescale) UpperCAmelCase = GLPNImageProcessor() # prepare image UpperCAmelCase = prepare_img() UpperCAmelCase = image_processor(images=_snake_case , return_tensors="""pt""" ).pixel_values logger.info("""Converting model...""" ) # load original state dict UpperCAmelCase = torch.load(_snake_case , map_location=torch.device("""cpu""" ) ) # rename keys UpperCAmelCase = rename_keys(_snake_case ) # key and value matrices need special treatment read_in_k_v(_snake_case , _snake_case ) # create HuggingFace model and load state dict UpperCAmelCase = GLPNForDepthEstimation(_snake_case ) model.load_state_dict(_snake_case ) model.eval() # forward pass UpperCAmelCase = model(_snake_case ) UpperCAmelCase = outputs.predicted_depth # verify output if model_name is not None: if "nyu" in model_name: UpperCAmelCase = torch.tensor( [[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] ) elif "kitti" in model_name: UpperCAmelCase = torch.tensor( [[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] ) else: raise ValueError(F'''Unknown model name: {model_name}''' ) UpperCAmelCase = torch.Size([1, 480, 640] ) assert predicted_depth.shape == expected_shape assert torch.allclose(predicted_depth[0, :3, :3] , _snake_case , atol=1E-4 ) print("""Looks ok!""" ) # finally, push to hub if required if push_to_hub: logger.info("""Pushing model and image processor to the hub...""" ) model.push_to_hub( repo_path_or_name=Path(_snake_case , _snake_case ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=_snake_case , ) image_processor.push_to_hub( repo_path_or_name=Path(_snake_case , _snake_case ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=_snake_case , ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() parser.add_argument( """--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file).""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub.""" ) parser.add_argument( """--model_name""", default="""glpn-kitti""", type=str, help="""Name of the model in case you're pushing to the hub.""", ) _UpperCamelCase = parser.parse_args() convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
74
0
"""simple docstring""" def _a ( _snake_case , _snake_case ): """simple docstring""" if a < 0 or b < 0: raise ValueError("""the value of both inputs must be positive""" ) UpperCAmelCase = str(bin(_snake_case ) )[2:] # remove the leading "0b" UpperCAmelCase = str(bin(_snake_case ) )[2:] # remove the leading "0b" UpperCAmelCase = max(len(_snake_case ) , len(_snake_case ) ) return "0b" + "".join( str(int(char_a == """1""" and char_b == """1""" ) ) for char_a, char_b in zip(a_binary.zfill(_snake_case ) , b_binary.zfill(_snake_case ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
714
"""simple docstring""" def _a ( _snake_case ): # noqa: E741 """simple docstring""" UpperCAmelCase = len(_snake_case ) UpperCAmelCase = 0 UpperCAmelCase = [0] * n UpperCAmelCase = [False] * n UpperCAmelCase = [False] * n def dfs(_snake_case , _snake_case , _snake_case , _snake_case ): if parent == root: out_edge_count += 1 UpperCAmelCase = True UpperCAmelCase = at for to in l[at]: if to == parent: pass elif not visited[to]: UpperCAmelCase = dfs(_snake_case , _snake_case , _snake_case , _snake_case ) UpperCAmelCase = min(low[at] , low[to] ) # AP found via bridge if at < low[to]: UpperCAmelCase = True # AP found via cycle if at == low[to]: UpperCAmelCase = True else: UpperCAmelCase = min(low[at] , _snake_case ) return out_edge_count for i in range(_snake_case ): if not visited[i]: UpperCAmelCase = 0 UpperCAmelCase = dfs(_snake_case , _snake_case , -1 , _snake_case ) UpperCAmelCase = out_edge_count > 1 for x in range(len(_snake_case ) ): if is_art[x] is True: print(_snake_case ) # Adjacency list of graph _UpperCamelCase = { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], 3: [2, 4], 4: [3], 5: [2, 6, 8], 6: [5, 7], 7: [6, 8], 8: [5, 7], } compute_ap(data)
74
0
"""simple docstring""" from argparse import ArgumentParser from datasets.commands.convert import ConvertCommand from datasets.commands.dummy_data import DummyDataCommand from datasets.commands.env import EnvironmentCommand from datasets.commands.run_beam import RunBeamCommand from datasets.commands.test import TestCommand from datasets.utils.logging import set_verbosity_info def _a ( _snake_case ): """simple docstring""" return {key.lstrip("""-""" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )} def _a ( ): """simple docstring""" UpperCAmelCase = ArgumentParser( """HuggingFace Datasets CLI tool""" , usage="""datasets-cli <command> [<args>]""" , allow_abbrev=_snake_case ) UpperCAmelCase = parser.add_subparsers(help="""datasets-cli command helpers""" ) set_verbosity_info() # Register commands ConvertCommand.register_subcommand(_snake_case ) EnvironmentCommand.register_subcommand(_snake_case ) TestCommand.register_subcommand(_snake_case ) RunBeamCommand.register_subcommand(_snake_case ) DummyDataCommand.register_subcommand(_snake_case ) # Parse args UpperCAmelCase , UpperCAmelCase = parser.parse_known_args() if not hasattr(_snake_case , """func""" ): parser.print_help() exit(1 ) UpperCAmelCase = parse_unknown_args(_snake_case ) # Run UpperCAmelCase = args.func(_snake_case , **_snake_case ) service.run() if __name__ == "__main__": main()
715
"""simple docstring""" _UpperCamelCase = """ # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git """ _UpperCamelCase = [{"""type""": """code""", """content""": INSTALL_CONTENT}] _UpperCamelCase = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
74
0
"""simple docstring""" import unittest from dataclasses import dataclass import pytest from accelerate.commands.config.config_args import SageMakerConfig from accelerate.utils import ComputeEnvironment from accelerate.utils.launch import _convert_nargs_to_dict @dataclass class lowerCamelCase__ ( snake_case ): SCREAMING_SNAKE_CASE = ComputeEnvironment.AMAZON_SAGEMAKER SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = '''ml.p3.2xlarge''' SCREAMING_SNAKE_CASE = '''accelerate_sagemaker_execution_role''' SCREAMING_SNAKE_CASE = '''hf-sm''' SCREAMING_SNAKE_CASE = '''us-east-1''' SCREAMING_SNAKE_CASE = 1 SCREAMING_SNAKE_CASE = '''accelerate-sagemaker-1''' SCREAMING_SNAKE_CASE = '''1.6''' SCREAMING_SNAKE_CASE = '''4.4''' SCREAMING_SNAKE_CASE = '''train.py''' SCREAMING_SNAKE_CASE = [ '''--model_name_or_path''', '''bert''', '''--do_train''', '''False''', '''--epochs''', '''3''', '''--learning_rate''', '''5e-5''', '''--max_steps''', '''50.5''', ] SCREAMING_SNAKE_CASE = [ '''--model_name_or_path''', '''bert''', '''--do_train''', '''--do_test''', '''False''', '''--do_predict''', '''--epochs''', '''3''', '''--learning_rate''', '''5e-5''', '''--max_steps''', '''50.5''', ] class lowerCamelCase__ ( unittest.TestCase ): def _UpperCamelCase ( self ): # If no defaults are changed, `to_kwargs` returns an empty dict. UpperCAmelCase = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args ) assert isinstance(converted_args["""model_name_or_path"""] ,A ) assert isinstance(converted_args["""do_train"""] ,A ) assert isinstance(converted_args["""epochs"""] ,A ) assert isinstance(converted_args["""learning_rate"""] ,A ) assert isinstance(converted_args["""max_steps"""] ,A ) with pytest.raises(A ): _convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
716
"""simple docstring""" import argparse import struct import unittest class lowerCamelCase__ : def __init__( self ,A ): UpperCAmelCase = data # Initialize hash values UpperCAmelCase = [ 0x6A_09_E6_67, 0xBB_67_AE_85, 0x3C_6E_F3_72, 0xA5_4F_F5_3A, 0x51_0E_52_7F, 0x9B_05_68_8C, 0x1F_83_D9_AB, 0x5B_E0_CD_19, ] # Initialize round constants UpperCAmelCase = [ 0x42_8A_2F_98, 0x71_37_44_91, 0xB5_C0_FB_CF, 0xE9_B5_DB_A5, 0x39_56_C2_5B, 0x59_F1_11_F1, 0x92_3F_82_A4, 0xAB_1C_5E_D5, 0xD8_07_AA_98, 0x12_83_5B_01, 0x24_31_85_BE, 0x55_0C_7D_C3, 0x72_BE_5D_74, 0x80_DE_B1_FE, 0x9B_DC_06_A7, 0xC1_9B_F1_74, 0xE4_9B_69_C1, 0xEF_BE_47_86, 0x0F_C1_9D_C6, 0x24_0C_A1_CC, 0x2D_E9_2C_6F, 0x4A_74_84_AA, 0x5C_B0_A9_DC, 0x76_F9_88_DA, 0x98_3E_51_52, 0xA8_31_C6_6D, 0xB0_03_27_C8, 0xBF_59_7F_C7, 0xC6_E0_0B_F3, 0xD5_A7_91_47, 0x06_CA_63_51, 0x14_29_29_67, 0x27_B7_0A_85, 0x2E_1B_21_38, 0x4D_2C_6D_FC, 0x53_38_0D_13, 0x65_0A_73_54, 0x76_6A_0A_BB, 0x81_C2_C9_2E, 0x92_72_2C_85, 0xA2_BF_E8_A1, 0xA8_1A_66_4B, 0xC2_4B_8B_70, 0xC7_6C_51_A3, 0xD1_92_E8_19, 0xD6_99_06_24, 0xF4_0E_35_85, 0x10_6A_A0_70, 0x19_A4_C1_16, 0x1E_37_6C_08, 0x27_48_77_4C, 0x34_B0_BC_B5, 0x39_1C_0C_B3, 0x4E_D8_AA_4A, 0x5B_9C_CA_4F, 0x68_2E_6F_F3, 0x74_8F_82_EE, 0x78_A5_63_6F, 0x84_C8_78_14, 0x8C_C7_02_08, 0x90_BE_FF_FA, 0xA4_50_6C_EB, 0xBE_F9_A3_F7, 0xC6_71_78_F2, ] UpperCAmelCase = self.preprocessing(self.data ) self.final_hash() @staticmethod def _UpperCamelCase ( A ): UpperCAmelCase = b"""\x80""" + (b"""\x00""" * (63 - (len(A ) + 8) % 64)) UpperCAmelCase = struct.pack(""">Q""" ,(len(A ) * 8) ) return data + padding + big_endian_integer def _UpperCamelCase ( self ): # Convert into blocks of 64 bytes UpperCAmelCase = [ self.preprocessed_data[x : x + 64] for x in range(0 ,len(self.preprocessed_data ) ,64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers UpperCAmelCase = list(struct.unpack(""">16L""" ,A ) ) # add 48 0-ed integers words += [0] * 48 UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self.hashes for index in range(0 ,64 ): if index > 15: # modify the zero-ed indexes at the end of the array UpperCAmelCase = ( self.ror(words[index - 15] ,7 ) ^ self.ror(words[index - 15] ,18 ) ^ (words[index - 15] >> 3) ) UpperCAmelCase = ( self.ror(words[index - 2] ,17 ) ^ self.ror(words[index - 2] ,19 ) ^ (words[index - 2] >> 10) ) UpperCAmelCase = ( words[index - 16] + sa + words[index - 7] + sa ) % 0x1_00_00_00_00 # Compression UpperCAmelCase = self.ror(A ,6 ) ^ self.ror(A ,11 ) ^ self.ror(A ,25 ) UpperCAmelCase = (e & f) ^ ((~e & 0xFF_FF_FF_FF) & g) UpperCAmelCase = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0x1_00_00_00_00 UpperCAmelCase = self.ror(A ,2 ) ^ self.ror(A ,13 ) ^ self.ror(A ,22 ) UpperCAmelCase = (a & b) ^ (a & c) ^ (b & c) UpperCAmelCase = (sa + maj) % 0x1_00_00_00_00 UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = ( g, f, e, ((d + tempa) % 0x1_00_00_00_00), c, b, a, ((tempa + tempa) % 0x1_00_00_00_00), ) UpperCAmelCase = [a, b, c, d, e, f, g, h] # Modify final values UpperCAmelCase = [ ((element + mutated_hash_values[index]) % 0x1_00_00_00_00) for index, element in enumerate(self.hashes ) ] UpperCAmelCase = """""".join([hex(A )[2:].zfill(8 ) for value in self.hashes] ) def _UpperCamelCase ( self ,A ,A ): return 0xFF_FF_FF_FF & (value << (32 - rotations)) | (value >> rotations) class lowerCamelCase__ ( unittest.TestCase ): def _UpperCamelCase ( self ): import hashlib UpperCAmelCase = bytes("""Test String""" ,"""utf-8""" ) self.assertEqual(SHAaaa(A ).hash ,hashlib.shaaaa(A ).hexdigest() ) def _a ( ): """simple docstring""" import doctest doctest.testmod() UpperCAmelCase = argparse.ArgumentParser() parser.add_argument( """-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , ) parser.add_argument( """-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" ) UpperCAmelCase = parser.parse_args() UpperCAmelCase = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , """rb""" ) as f: UpperCAmelCase = f.read() else: UpperCAmelCase = bytes(_snake_case , """utf-8""" ) print(SHAaaa(_snake_case ).hash ) if __name__ == "__main__": main()
74
0
"""simple docstring""" import os import tempfile import unittest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class lowerCamelCase__ ( snake_case ): def __init__( self ,A ,A=13 ,A=7 ,A=True ,A=True ,A=False ,A=True ,A=99 ,A=32 ,A=5 ,A=4 ,A=37 ,A="gelu" ,A=0.1 ,A=0.1 ,A=512 ,A=16 ,A=2 ,A=0.02 ,A=3 ,A=4 ,A=None ,): UpperCAmelCase = parent UpperCAmelCase = batch_size UpperCAmelCase = seq_length UpperCAmelCase = is_training UpperCAmelCase = use_input_mask UpperCAmelCase = use_token_type_ids UpperCAmelCase = use_labels UpperCAmelCase = vocab_size UpperCAmelCase = hidden_size UpperCAmelCase = num_hidden_layers UpperCAmelCase = num_attention_heads UpperCAmelCase = intermediate_size UpperCAmelCase = hidden_act UpperCAmelCase = hidden_dropout_prob UpperCAmelCase = attention_probs_dropout_prob UpperCAmelCase = max_position_embeddings UpperCAmelCase = type_vocab_size UpperCAmelCase = type_sequence_label_size UpperCAmelCase = initializer_range UpperCAmelCase = num_labels UpperCAmelCase = num_choices UpperCAmelCase = scope def _UpperCamelCase ( self ): UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) UpperCAmelCase = None if self.use_input_mask: UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase = None UpperCAmelCase = None UpperCAmelCase = None if self.use_labels: UpperCAmelCase = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) UpperCAmelCase = ids_tensor([self.batch_size] ,self.num_choices ) UpperCAmelCase = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _UpperCamelCase ( self ): return DistilBertConfig( vocab_size=self.vocab_size ,dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,hidden_dim=self.intermediate_size ,hidden_act=self.hidden_act ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,) def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ): UpperCAmelCase = DistilBertModel(config=A ) model.to(A ) model.eval() UpperCAmelCase = model(A ,A ) UpperCAmelCase = model(A ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ): UpperCAmelCase = DistilBertForMaskedLM(config=A ) model.to(A ) model.eval() UpperCAmelCase = model(A ,attention_mask=A ,labels=A ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ): UpperCAmelCase = DistilBertForQuestionAnswering(config=A ) model.to(A ) model.eval() UpperCAmelCase = model( A ,attention_mask=A ,start_positions=A ,end_positions=A ) self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ): UpperCAmelCase = self.num_labels UpperCAmelCase = DistilBertForSequenceClassification(A ) model.to(A ) model.eval() UpperCAmelCase = model(A ,attention_mask=A ,labels=A ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ): UpperCAmelCase = self.num_labels UpperCAmelCase = DistilBertForTokenClassification(config=A ) model.to(A ) model.eval() UpperCAmelCase = model(A ,attention_mask=A ,labels=A ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ): UpperCAmelCase = self.num_choices UpperCAmelCase = DistilBertForMultipleChoice(config=A ) model.to(A ) model.eval() UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() UpperCAmelCase = model( A ,attention_mask=A ,labels=A ,) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) ) def _UpperCamelCase ( self ): UpperCAmelCase = self.prepare_config_and_inputs() ((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) = config_and_inputs UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class lowerCamelCase__ ( snake_case , snake_case , unittest.TestCase ): SCREAMING_SNAKE_CASE = ( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) SCREAMING_SNAKE_CASE = ( { '''feature-extraction''': DistilBertModel, '''fill-mask''': DistilBertForMaskedLM, '''question-answering''': DistilBertForQuestionAnswering, '''text-classification''': DistilBertForSequenceClassification, '''token-classification''': DistilBertForTokenClassification, '''zero-shot''': DistilBertForSequenceClassification, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = True def _UpperCamelCase ( self ): UpperCAmelCase = DistilBertModelTester(self ) UpperCAmelCase = ConfigTester(self ,config_class=A ,dim=37 ) def _UpperCamelCase ( self ): self.config_tester.run_common_tests() def _UpperCamelCase ( self ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*A ) def _UpperCamelCase ( self ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*A ) def _UpperCamelCase ( self ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*A ) def _UpperCamelCase ( self ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*A ) def _UpperCamelCase ( self ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*A ) def _UpperCamelCase ( self ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*A ) @slow def _UpperCamelCase ( self ): for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase = DistilBertModel.from_pretrained(A ) self.assertIsNotNone(A ) @slow @require_torch_gpu def _UpperCamelCase ( self ): UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return UpperCAmelCase = True UpperCAmelCase = model_class(config=A ) UpperCAmelCase = self._prepare_for_class(A ,A ) UpperCAmelCase = torch.jit.trace( A ,(inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(A ,os.path.join(A ,"""traced_model.pt""" ) ) UpperCAmelCase = torch.jit.load(os.path.join(A ,"""traced_model.pt""" ) ,map_location=A ) loaded(inputs_dict["""input_ids"""].to(A ) ,inputs_dict["""attention_mask"""].to(A ) ) @require_torch class lowerCamelCase__ ( unittest.TestCase ): @slow def _UpperCamelCase ( self ): UpperCAmelCase = DistilBertModel.from_pretrained("""distilbert-base-uncased""" ) UpperCAmelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] ) UpperCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): UpperCAmelCase = model(A ,attention_mask=A )[0] UpperCAmelCase = torch.Size((1, 11, 768) ) self.assertEqual(output.shape ,A ) UpperCAmelCase = torch.tensor( [[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,A ,atol=1e-4 ) )
717
"""simple docstring""" def _a ( _snake_case = 10 , _snake_case = 22 ): """simple docstring""" UpperCAmelCase = range(1 , _snake_case ) UpperCAmelCase = range(1 , _snake_case ) return sum( 1 for power in powers for base in bases if len(str(base**power ) ) == power ) if __name__ == "__main__": print(F"""{solution(10, 22) = }""")
74
0
"""simple docstring""" from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING _UpperCamelCase = logging.get_logger(__name__) @add_end_docstrings(snake_case ) class lowerCamelCase__ ( snake_case ): def __init__( self ,*A ,**A ): super().__init__(*A ,**A ) requires_backends(self ,"""vision""" ) self.check_model_type( TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING ) def _UpperCamelCase ( self ,A=None ): UpperCAmelCase = {} if top_k is not None: UpperCAmelCase = top_k return {}, {}, postprocess_params def __call__( self ,A ,**A ): return super().__call__(A ,**A ) def _UpperCamelCase ( self ,A ): UpperCAmelCase = load_image(A ) UpperCAmelCase = self.image_processor(images=A ,return_tensors=self.framework ) return model_inputs def _UpperCamelCase ( self ,A ): UpperCAmelCase = self.model(**A ) return model_outputs def _UpperCamelCase ( self ,A ,A=5 ): if top_k > self.model.config.num_labels: UpperCAmelCase = self.model.config.num_labels if self.framework == "pt": UpperCAmelCase = model_outputs.logits.softmax(-1 )[0] UpperCAmelCase , UpperCAmelCase = probs.topk(A ) elif self.framework == "tf": UpperCAmelCase = stable_softmax(model_outputs.logits ,axis=-1 )[0] UpperCAmelCase = tf.math.top_k(A ,k=A ) UpperCAmelCase , UpperCAmelCase = topk.values.numpy(), topk.indices.numpy() else: raise ValueError(F'''Unsupported framework: {self.framework}''' ) UpperCAmelCase = scores.tolist() UpperCAmelCase = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(A ,A )]
718
"""simple docstring""" from __future__ import annotations def _a ( _snake_case ): """simple docstring""" return len(set(_snake_case ) ) == len(_snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
74
0
"""simple docstring""" def _a ( _snake_case , _snake_case , _snake_case = 0 , _snake_case = 0 ): """simple docstring""" UpperCAmelCase = right or len(_snake_case ) - 1 if left > right: return -1 elif list_data[left] == key: return left elif list_data[right] == key: return right else: return search(_snake_case , _snake_case , left + 1 , right - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
719
"""simple docstring""" import math def _a ( _snake_case ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_snake_case ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def _a ( _snake_case = 0.1 ): """simple docstring""" UpperCAmelCase = 3 UpperCAmelCase = 3 while primes / (2 * j - 1) >= ratio: for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ): primes += is_prime(_snake_case ) j += 2 return j if __name__ == "__main__": import doctest doctest.testmod()
74
0
"""simple docstring""" import importlib import os import fsspec import pytest from fsspec import register_implementation from fsspec.registry import _registry as _fsspec_registry from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem from .utils import require_lza, require_zstandard def _a ( _snake_case ): """simple docstring""" assert "mock" in _fsspec_registry assert "bz2" in _fsspec_registry def _a ( ): """simple docstring""" assert "mock" not in _fsspec_registry assert "bz2" in _fsspec_registry def _a ( ): """simple docstring""" UpperCAmelCase = """mock-s3-bucket""" UpperCAmelCase = F'''s3://{mock_bucket}''' UpperCAmelCase = extract_path_from_uri(_snake_case ) assert dataset_path.startswith("""s3://""" ) is False UpperCAmelCase = """./local/path""" UpperCAmelCase = extract_path_from_uri(_snake_case ) assert dataset_path == new_dataset_path def _a ( _snake_case ): """simple docstring""" UpperCAmelCase = is_remote_filesystem(_snake_case ) assert is_remote is True UpperCAmelCase = fsspec.filesystem("""file""" ) UpperCAmelCase = is_remote_filesystem(_snake_case ) assert is_remote is False @pytest.mark.parametrize("""compression_fs_class""" , _snake_case ) def _a ( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" UpperCAmelCase = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_file, """bz2""": bza_file, """lz4""": lza_file} UpperCAmelCase = input_paths[compression_fs_class.protocol] if input_path is None: UpperCAmelCase = F'''for \'{compression_fs_class.protocol}\' compression protocol, ''' if compression_fs_class.protocol == "lz4": reason += require_lza.kwargs["reason"] elif compression_fs_class.protocol == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(_snake_case ) UpperCAmelCase = fsspec.filesystem(compression_fs_class.protocol , fo=_snake_case ) assert isinstance(_snake_case , _snake_case ) UpperCAmelCase = os.path.basename(_snake_case ) UpperCAmelCase = expected_filename[: expected_filename.rindex(""".""" )] assert fs.glob("""*""" ) == [expected_filename] with fs.open(_snake_case , """r""" , encoding="""utf-8""" ) as f, open(_snake_case , encoding="""utf-8""" ) as expected_file: assert f.read() == expected_file.read() @pytest.mark.parametrize("""protocol""" , ["""zip""", """gzip"""] ) def _a ( _snake_case , _snake_case , _snake_case ): """simple docstring""" UpperCAmelCase = {"""zip""": zip_jsonl_path, """gzip""": jsonl_gz_path} UpperCAmelCase = compressed_file_paths[protocol] UpperCAmelCase = """dataset.jsonl""" UpperCAmelCase = F'''{protocol}://{member_file_path}::{compressed_file_path}''' UpperCAmelCase , *UpperCAmelCase = fsspec.get_fs_token_paths(_snake_case ) assert fs.isfile(_snake_case ) assert not fs.isfile("""non_existing_""" + member_file_path ) @pytest.mark.integration def _a ( _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" UpperCAmelCase = hf_api.dataset_info(_snake_case , token=_snake_case ) UpperCAmelCase = HfFileSystem(repo_info=_snake_case , token=_snake_case ) assert sorted(hffs.glob("""*""" ) ) == [".gitattributes", "data"] assert hffs.isdir("""data""" ) assert hffs.isfile(""".gitattributes""" ) and hffs.isfile("""data/text_data.txt""" ) with open(_snake_case ) as f: assert hffs.open("""data/text_data.txt""" , """r""" ).read() == f.read() def _a ( ): """simple docstring""" UpperCAmelCase = """bz2""" # Import module import datasets.filesystems # Overwrite protocol and reload register_implementation(_snake_case , _snake_case , clobber=_snake_case ) with pytest.warns(_snake_case ) as warning_info: importlib.reload(datasets.filesystems ) assert len(_snake_case ) == 1 assert ( str(warning_info[0].message ) == F'''A filesystem protocol was already set for {protocol} and will be overwritten.''' )
720
"""simple docstring""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCamelCase__ ( snake_case ): SCREAMING_SNAKE_CASE = ['''image_processor''', '''tokenizer'''] SCREAMING_SNAKE_CASE = '''CLIPImageProcessor''' SCREAMING_SNAKE_CASE = ('''CLIPTokenizer''', '''CLIPTokenizerFast''') def __init__( self ,A=None ,A=None ,**A ): UpperCAmelCase = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" ,A ,) UpperCAmelCase = kwargs.pop("""feature_extractor""" ) UpperCAmelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(A ,A ) def __call__( self ,A=None ,A=None ,A=None ,**A ): if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: UpperCAmelCase = self.tokenizer(A ,return_tensors=A ,**A ) if images is not None: UpperCAmelCase = self.image_processor(A ,return_tensors=A ,**A ) if text is not None and images is not None: UpperCAmelCase = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**A ) ,tensor_type=A ) def _UpperCamelCase ( self ,*A ,**A ): return self.tokenizer.batch_decode(*A ,**A ) def _UpperCamelCase ( self ,*A ,**A ): return self.tokenizer.decode(*A ,**A ) @property def _UpperCamelCase ( self ): UpperCAmelCase = self.tokenizer.model_input_names UpperCAmelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def _UpperCamelCase ( self ): warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,A ,) return self.image_processor_class @property def _UpperCamelCase ( self ): warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,A ,) return self.image_processor
74
0
"""simple docstring""" import argparse import io import requests import torch from omegaconf import OmegaConf from diffusers import AutoencoderKL from diffusers.pipelines.stable_diffusion.convert_from_ckpt import ( assign_to_checkpoint, conv_attn_to_linear, create_vae_diffusers_config, renew_vae_attention_paths, renew_vae_resnet_paths, ) def _a ( _snake_case , _snake_case ): """simple docstring""" UpperCAmelCase = checkpoint UpperCAmelCase = {} UpperCAmelCase = vae_state_dict["""encoder.conv_in.weight"""] UpperCAmelCase = vae_state_dict["""encoder.conv_in.bias"""] UpperCAmelCase = vae_state_dict["""encoder.conv_out.weight"""] UpperCAmelCase = vae_state_dict["""encoder.conv_out.bias"""] UpperCAmelCase = vae_state_dict["""encoder.norm_out.weight"""] UpperCAmelCase = vae_state_dict["""encoder.norm_out.bias"""] UpperCAmelCase = vae_state_dict["""decoder.conv_in.weight"""] UpperCAmelCase = vae_state_dict["""decoder.conv_in.bias"""] UpperCAmelCase = vae_state_dict["""decoder.conv_out.weight"""] UpperCAmelCase = vae_state_dict["""decoder.conv_out.bias"""] UpperCAmelCase = vae_state_dict["""decoder.norm_out.weight"""] UpperCAmelCase = vae_state_dict["""decoder.norm_out.bias"""] UpperCAmelCase = vae_state_dict["""quant_conv.weight"""] UpperCAmelCase = vae_state_dict["""quant_conv.bias"""] UpperCAmelCase = vae_state_dict["""post_quant_conv.weight"""] UpperCAmelCase = vae_state_dict["""post_quant_conv.bias"""] # Retrieves the keys for the encoder down blocks only UpperCAmelCase = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """encoder.down""" in layer} ) UpperCAmelCase = { layer_id: [key for key in vae_state_dict if F'''down.{layer_id}''' in key] for layer_id in range(_snake_case ) } # Retrieves the keys for the decoder up blocks only UpperCAmelCase = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """decoder.up""" in layer} ) UpperCAmelCase = { layer_id: [key for key in vae_state_dict if F'''up.{layer_id}''' in key] for layer_id in range(_snake_case ) } for i in range(_snake_case ): UpperCAmelCase = [key for key in down_blocks[i] if F'''down.{i}''' in key and F'''down.{i}.downsample''' not in key] if F'''encoder.down.{i}.downsample.conv.weight''' in vae_state_dict: UpperCAmelCase = vae_state_dict.pop( F'''encoder.down.{i}.downsample.conv.weight''' ) UpperCAmelCase = vae_state_dict.pop( F'''encoder.down.{i}.downsample.conv.bias''' ) UpperCAmelCase = renew_vae_resnet_paths(_snake_case ) UpperCAmelCase = {"""old""": F'''down.{i}.block''', """new""": F'''down_blocks.{i}.resnets'''} assign_to_checkpoint(_snake_case , _snake_case , _snake_case , additional_replacements=[meta_path] , config=_snake_case ) UpperCAmelCase = [key for key in vae_state_dict if """encoder.mid.block""" in key] UpperCAmelCase = 2 for i in range(1 , num_mid_res_blocks + 1 ): UpperCAmelCase = [key for key in mid_resnets if F'''encoder.mid.block_{i}''' in key] UpperCAmelCase = renew_vae_resnet_paths(_snake_case ) UpperCAmelCase = {"""old""": F'''mid.block_{i}''', """new""": F'''mid_block.resnets.{i - 1}'''} assign_to_checkpoint(_snake_case , _snake_case , _snake_case , additional_replacements=[meta_path] , config=_snake_case ) UpperCAmelCase = [key for key in vae_state_dict if """encoder.mid.attn""" in key] UpperCAmelCase = renew_vae_attention_paths(_snake_case ) UpperCAmelCase = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""} assign_to_checkpoint(_snake_case , _snake_case , _snake_case , additional_replacements=[meta_path] , config=_snake_case ) conv_attn_to_linear(_snake_case ) for i in range(_snake_case ): UpperCAmelCase = num_up_blocks - 1 - i UpperCAmelCase = [ key for key in up_blocks[block_id] if F'''up.{block_id}''' in key and F'''up.{block_id}.upsample''' not in key ] if F'''decoder.up.{block_id}.upsample.conv.weight''' in vae_state_dict: UpperCAmelCase = vae_state_dict[ F'''decoder.up.{block_id}.upsample.conv.weight''' ] UpperCAmelCase = vae_state_dict[ F'''decoder.up.{block_id}.upsample.conv.bias''' ] UpperCAmelCase = renew_vae_resnet_paths(_snake_case ) UpperCAmelCase = {"""old""": F'''up.{block_id}.block''', """new""": F'''up_blocks.{i}.resnets'''} assign_to_checkpoint(_snake_case , _snake_case , _snake_case , additional_replacements=[meta_path] , config=_snake_case ) UpperCAmelCase = [key for key in vae_state_dict if """decoder.mid.block""" in key] UpperCAmelCase = 2 for i in range(1 , num_mid_res_blocks + 1 ): UpperCAmelCase = [key for key in mid_resnets if F'''decoder.mid.block_{i}''' in key] UpperCAmelCase = renew_vae_resnet_paths(_snake_case ) UpperCAmelCase = {"""old""": F'''mid.block_{i}''', """new""": F'''mid_block.resnets.{i - 1}'''} assign_to_checkpoint(_snake_case , _snake_case , _snake_case , additional_replacements=[meta_path] , config=_snake_case ) UpperCAmelCase = [key for key in vae_state_dict if """decoder.mid.attn""" in key] UpperCAmelCase = renew_vae_attention_paths(_snake_case ) UpperCAmelCase = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""} assign_to_checkpoint(_snake_case , _snake_case , _snake_case , additional_replacements=[meta_path] , config=_snake_case ) conv_attn_to_linear(_snake_case ) return new_checkpoint def _a ( _snake_case , _snake_case , ): """simple docstring""" UpperCAmelCase = requests.get( """ https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml""" ) UpperCAmelCase = io.BytesIO(r.content ) UpperCAmelCase = OmegaConf.load(_snake_case ) UpperCAmelCase = 512 UpperCAmelCase = """cuda""" if torch.cuda.is_available() else """cpu""" if checkpoint_path.endswith("""safetensors""" ): from safetensors import safe_open UpperCAmelCase = {} with safe_open(_snake_case , framework="""pt""" , device="""cpu""" ) as f: for key in f.keys(): UpperCAmelCase = f.get_tensor(_snake_case ) else: UpperCAmelCase = torch.load(_snake_case , map_location=_snake_case )["""state_dict"""] # Convert the VAE model. UpperCAmelCase = create_vae_diffusers_config(_snake_case , image_size=_snake_case ) UpperCAmelCase = custom_convert_ldm_vae_checkpoint(_snake_case , _snake_case ) UpperCAmelCase = AutoencoderKL(**_snake_case ) vae.load_state_dict(_snake_case ) vae.save_pretrained(_snake_case ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() parser.add_argument("""--vae_pt_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""") parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""") _UpperCamelCase = parser.parse_args() vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
721
"""simple docstring""" from __future__ import annotations from collections.abc import Generator import requests from bsa import BeautifulSoup _UpperCamelCase = """https://www.indeed.co.in/jobs?q=mobile+app+development&l=""" def _a ( _snake_case = "mumbai" ): """simple docstring""" UpperCAmelCase = BeautifulSoup(requests.get(url + location ).content , """html.parser""" ) # This attribute finds out all the specifics listed in a job for job in soup.find_all("""div""" , attrs={"""data-tn-component""": """organicJob"""} ): UpperCAmelCase = job.find("""a""" , attrs={"""data-tn-element""": """jobTitle"""} ).text.strip() UpperCAmelCase = job.find("""span""" , {"""class""": """company"""} ).text.strip() yield job_title, company_name if __name__ == "__main__": for i, job in enumerate(fetch_jobs("""Bangalore"""), 1): print(F"""Job {i:>2} is {job[0]} at {job[1]}""")
74
0
import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. _UpperCamelCase = abspath(join(dirname(dirname(__file__)), """src""")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="""ignore""", category=FutureWarning) def _a ( _snake_case ): """simple docstring""" from diffusers.utils.testing_utils import pytest_addoption_shared pytest_addoption_shared(_snake_case ) def _a ( _snake_case ): """simple docstring""" from diffusers.utils.testing_utils import pytest_terminal_summary_main UpperCAmelCase = terminalreporter.config.getoption("""--make-reports""" ) if make_reports: pytest_terminal_summary_main(_snake_case , id=_snake_case )
700
"""simple docstring""" import unittest from transformers.utils.backbone_utils import ( BackboneMixin, get_aligned_output_features_output_indices, verify_out_features_out_indices, ) class lowerCamelCase__ ( unittest.TestCase ): def _UpperCamelCase ( self ): UpperCAmelCase = ["""a""", """b""", """c"""] # Defaults to last layer if both are None UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(A ,A ,A ) self.assertEqual(A ,["""c"""] ) self.assertEqual(A ,[2] ) # Out indices set to match out features UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(["""a""", """c"""] ,A ,A ) self.assertEqual(A ,["""a""", """c"""] ) self.assertEqual(A ,[0, 2] ) # Out features set to match out indices UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(A ,[0, 2] ,A ) self.assertEqual(A ,["""a""", """c"""] ) self.assertEqual(A ,[0, 2] ) # Out features selected from negative indices UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(A ,[-3, -1] ,A ) self.assertEqual(A ,["""a""", """c"""] ) self.assertEqual(A ,[-3, -1] ) def _UpperCamelCase ( self ): # Stage names must be set with self.assertRaises(A ): verify_out_features_out_indices(["""a""", """b"""] ,(0, 1) ,A ) # Out features must be a list with self.assertRaises(A ): verify_out_features_out_indices(("""a""", """b""") ,(0, 1) ,["""a""", """b"""] ) # Out features must be a subset of stage names with self.assertRaises(A ): verify_out_features_out_indices(["""a""", """b"""] ,(0, 1) ,["""a"""] ) # Out indices must be a list or tuple with self.assertRaises(A ): verify_out_features_out_indices(A ,0 ,["""a""", """b"""] ) # Out indices must be a subset of stage names with self.assertRaises(A ): verify_out_features_out_indices(A ,(0, 1) ,["""a"""] ) # Out features and out indices must be the same length with self.assertRaises(A ): verify_out_features_out_indices(["""a""", """b"""] ,(0,) ,["""a""", """b""", """c"""] ) # Out features should match out indices with self.assertRaises(A ): verify_out_features_out_indices(["""a""", """b"""] ,(0, 2) ,["""a""", """b""", """c"""] ) # Out features and out indices should be in order with self.assertRaises(A ): verify_out_features_out_indices(["""b""", """a"""] ,(0, 1) ,["""a""", """b"""] ) # Check passes with valid inputs verify_out_features_out_indices(["""a""", """b""", """d"""] ,(0, 1, -1) ,["""a""", """b""", """c""", """d"""] ) def _UpperCamelCase ( self ): UpperCAmelCase = BackboneMixin() UpperCAmelCase = ["""a""", """b""", """c"""] UpperCAmelCase = ["""a""", """c"""] UpperCAmelCase = [0, 2] # Check that the output features and indices are set correctly self.assertEqual(backbone.out_features ,["""a""", """c"""] ) self.assertEqual(backbone.out_indices ,[0, 2] ) # Check out features and indices are updated correctly UpperCAmelCase = ["""a""", """b"""] self.assertEqual(backbone.out_features ,["""a""", """b"""] ) self.assertEqual(backbone.out_indices ,[0, 1] ) UpperCAmelCase = [-3, -1] self.assertEqual(backbone.out_features ,["""a""", """c"""] ) self.assertEqual(backbone.out_indices ,[-3, -1] )
74
0
"""simple docstring""" def _a ( _snake_case ): """simple docstring""" return "".join([hex(_snake_case )[2:].zfill(2 ).upper() for byte in list(_snake_case )] ) def _a ( _snake_case ): """simple docstring""" if (len(_snake_case ) % 2) != 0: raise ValueError( """Base16 encoded data is invalid: Data does not have an even number of hex digits.""" ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(_snake_case ) <= set("""0123456789ABCDEF""" ): raise ValueError( """Base16 encoded data is invalid: Data is not uppercase hex or it contains invalid characters.""" ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(_snake_case ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
701
"""simple docstring""" from __future__ import annotations from typing import Any class lowerCamelCase__ : def __init__( self ,A = 6 ): UpperCAmelCase = None UpperCAmelCase = None self.create_linked_list(A ) def _UpperCamelCase ( self ,A ): UpperCAmelCase = Node() UpperCAmelCase = current_node UpperCAmelCase = current_node UpperCAmelCase = current_node for _ in range(1 ,A ): UpperCAmelCase = Node() UpperCAmelCase = current_node UpperCAmelCase = previous_node UpperCAmelCase = current_node UpperCAmelCase = self.front UpperCAmelCase = previous_node def _UpperCamelCase ( self ): return ( self.front == self.rear and self.front is not None and self.front.data is None ) def _UpperCamelCase ( self ): self.check_can_perform_operation() return self.front.data if self.front else None def _UpperCamelCase ( self ,A ): if self.rear is None: return self.check_is_full() if not self.is_empty(): UpperCAmelCase = self.rear.next if self.rear: UpperCAmelCase = data def _UpperCamelCase ( self ): self.check_can_perform_operation() if self.rear is None or self.front is None: return None if self.front == self.rear: UpperCAmelCase = self.front.data UpperCAmelCase = None return data UpperCAmelCase = self.front UpperCAmelCase = old_front.next UpperCAmelCase = old_front.data UpperCAmelCase = None return data def _UpperCamelCase ( self ): if self.is_empty(): raise Exception("""Empty Queue""" ) def _UpperCamelCase ( self ): if self.rear and self.rear.next == self.front: raise Exception("""Full Queue""" ) class lowerCamelCase__ : def __init__( self ): UpperCAmelCase = None UpperCAmelCase = None UpperCAmelCase = None if __name__ == "__main__": import doctest doctest.testmod()
74
0
"""simple docstring""" import json import os from datetime import date from pathlib import Path from tabulate import DataRow, TableFormat, tabulate _UpperCamelCase = TableFormat( lineabove=None, linebelowheader=None, linebetweenrows=None, linebelow=None, headerrow=DataRow("""""", """|""", """|"""), datarow=DataRow("""""", """|""", """|"""), padding=1, with_header_hide=None, ) _UpperCamelCase = [] _UpperCamelCase = [] _UpperCamelCase = {"""type""": """section""", """text""": {"""type""": """plain_text""", """text""": """No failed tests! 🤗""", """emoji""": True}} _UpperCamelCase = [ { """type""": """header""", """text""": { """type""": """plain_text""", """text""": F"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""", """emoji""": True, }, } ] _UpperCamelCase = 0 for log in Path().glob("""*.log"""): _UpperCamelCase = 0 with open(log, """r""") as f: for line in f: _UpperCamelCase = json.loads(line) if line.get("""nodeid""", """""") != "": _UpperCamelCase = line["""nodeid"""] if line.get("""duration""", None) is not None: _UpperCamelCase = F"""{line["duration"]:.4f}""" if line.get("""outcome""", """""") == "failed": section_num_failed += 1 failed.append([test, duration, log.name.split("""_""")[0]]) total_num_failed += 1 group_info.append([str(log), section_num_failed, failed]) _UpperCamelCase = [] log.unlink() _UpperCamelCase = """""" _UpperCamelCase = [] if total_num_failed > 0: for name, num_failed, failed_tests in group_info: if num_failed > 0: if num_failed == 1: message += F"*{name[1:]}: {num_failed} failed test*\n" else: message += F"*{name[1:]}: {num_failed} failed tests*\n" _UpperCamelCase = [] _UpperCamelCase = {} for test in failed_tests: _UpperCamelCase = test[0].split("""::""") _UpperCamelCase = data[0].split("""/""")[-1] if data[0] not in filesafailed: _UpperCamelCase = [data[1:]] else: filesafailed[data[0]] += [data[1:]] failed_table.append(data) _UpperCamelCase = [test[0] for test in failed_table] _UpperCamelCase = list(set(files)) # Count number of instances in failed_tests _UpperCamelCase = [] for file in individual_files: table.append([file, len(filesafailed[file])]) _UpperCamelCase = tabulate( table, headers=["""Test Location""", """Num Failed"""], tablefmt=hf_table_format, stralign="""right""", ) message += F"\n```\n{failed_table}\n```" all_filesafailed.append(filesafailed) if len(message) > 3000: _UpperCamelCase = """Too many failed tests, please see the full report in the Action results.""" _UpperCamelCase = len(err) + 10 _UpperCamelCase = message[: 3000 - offset] + F"""\n...\n```\n{err}""" print(F"""### {message}""") else: _UpperCamelCase = """No failed tests! 🤗""" print(F"""## {message}""") payload.append(no_error_payload) if os.environ.get("""TEST_TYPE""", """""") != "": from slack_sdk import WebClient _UpperCamelCase = WebClient(token=os.environ["""SLACK_API_TOKEN"""]) if message != "No failed tests! 🤗": _UpperCamelCase = { """type""": """section""", """text""": { """type""": """mrkdwn""", """text""": message, }, } payload.append(md_report) _UpperCamelCase = { """type""": """section""", """text""": { """type""": """mrkdwn""", """text""": """*For more details:*""", }, """accessory""": { """type""": """button""", """text""": { """type""": """plain_text""", """text""": """Check Action results""", """emoji""": True, }, """url""": F"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""", }, } payload.append(action_button) _UpperCamelCase = { """type""": """context""", """elements""": [ { """type""": """plain_text""", """text""": F"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""", } ], } payload.append(date_report) _UpperCamelCase = client.chat_postMessage(channel="""#accelerate-ci-daily""", text=message, blocks=payload) _UpperCamelCase = response.data["""ts"""] for failed_file in all_filesafailed: for test_location, test_failures in failed_file.items(): # Keep only the first instance of the test name _UpperCamelCase = """""" for i, row in enumerate(test_failures): if row[0] != test_class: _UpperCamelCase = row[0] else: _UpperCamelCase = """""" _UpperCamelCase = { """type""": """section""", """text""": { """type""": """mrkdwn""", """text""": F"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""", }, } client.chat_postMessage( channel="""#accelerate-ci-daily""", thread_ts=ts, blocks=[payload], )
702
"""simple docstring""" import argparse import numpy as np import torch from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging logging.set_verbosity_info() _UpperCamelCase = logging.get_logger("""transformers.models.speecht5""") def _a ( _snake_case , _snake_case , _snake_case ): """simple docstring""" hf_model.apply_weight_norm() UpperCAmelCase = checkpoint["""input_conv.weight_g"""] UpperCAmelCase = checkpoint["""input_conv.weight_v"""] UpperCAmelCase = checkpoint["""input_conv.bias"""] for i in range(len(config.upsample_rates ) ): UpperCAmelCase = checkpoint[F'''upsamples.{i}.1.weight_g'''] UpperCAmelCase = checkpoint[F'''upsamples.{i}.1.weight_v'''] UpperCAmelCase = checkpoint[F'''upsamples.{i}.1.bias'''] for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ): for j in range(len(config.resblock_dilation_sizes ) ): UpperCAmelCase = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g'''] UpperCAmelCase = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v'''] UpperCAmelCase = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias'''] UpperCAmelCase = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g'''] UpperCAmelCase = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v'''] UpperCAmelCase = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias'''] UpperCAmelCase = checkpoint["""output_conv.1.weight_g"""] UpperCAmelCase = checkpoint["""output_conv.1.weight_v"""] UpperCAmelCase = checkpoint["""output_conv.1.bias"""] hf_model.remove_weight_norm() @torch.no_grad() def _a ( _snake_case , _snake_case , _snake_case , _snake_case=None , _snake_case=None , ): """simple docstring""" if config_path is not None: UpperCAmelCase = SpeechTaHifiGanConfig.from_pretrained(_snake_case ) else: UpperCAmelCase = SpeechTaHifiGanConfig() UpperCAmelCase = SpeechTaHifiGan(_snake_case ) UpperCAmelCase = torch.load(_snake_case ) load_weights(orig_checkpoint["""model"""]["""generator"""] , _snake_case , _snake_case ) UpperCAmelCase = np.load(_snake_case ) UpperCAmelCase = stats[0].reshape(-1 ) UpperCAmelCase = stats[1].reshape(-1 ) UpperCAmelCase = torch.from_numpy(_snake_case ).float() UpperCAmelCase = torch.from_numpy(_snake_case ).float() model.save_pretrained(_snake_case ) if repo_id: print("""Pushing to the hub...""" ) model.push_to_hub(_snake_case ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""") parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub.""" ) _UpperCamelCase = parser.parse_args() convert_hifigan_checkpoint( args.checkpoint_path, args.stats_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
74
0
"""simple docstring""" import argparse import re import requests import torch # git clone https://github.com/salesforce/BLIP.git from models.blip import blip_decoder from models.blip_itm import blip_itm from models.blip_vqa import blip_vqa from PIL import Image from torchvision import transforms from torchvision.transforms.functional import InterpolationMode from transformers import ( BertTokenizer, BlipConfig, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, ) def _a ( _snake_case , _snake_case ): """simple docstring""" UpperCAmelCase = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg""" UpperCAmelCase = Image.open(requests.get(_snake_case , stream=_snake_case ).raw ).convert("""RGB""" ) UpperCAmelCase = transforms.Compose( [ transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ), transforms.ToTensor(), transforms.Normalize((0.48145466, 0.4578275, 0.40821073) , (0.26862954, 0.26130258, 0.27577711) ), ] ) UpperCAmelCase = transform(_snake_case ).unsqueeze(0 ).to(_snake_case ) return image def _a ( _snake_case ): """simple docstring""" if "visual_encoder" in key: UpperCAmelCase = re.sub("""visual_encoder*""" , """vision_model.encoder""" , _snake_case ) if "blocks" in key: UpperCAmelCase = re.sub(R"""blocks""" , """layers""" , _snake_case ) if "attn" in key: UpperCAmelCase = re.sub(R"""attn""" , """self_attn""" , _snake_case ) if "norm1" in key: UpperCAmelCase = re.sub(R"""norm1""" , """layer_norm1""" , _snake_case ) if "norm2" in key: UpperCAmelCase = re.sub(R"""norm2""" , """layer_norm2""" , _snake_case ) if "encoder.norm" in key: UpperCAmelCase = re.sub(R"""encoder.norm""" , """post_layernorm""" , _snake_case ) if "encoder.patch_embed.proj" in key: UpperCAmelCase = re.sub(R"""encoder.patch_embed.proj""" , """embeddings.patch_embedding""" , _snake_case ) if "encoder.pos_embed" in key: UpperCAmelCase = re.sub(R"""encoder.pos_embed""" , """embeddings.position_embedding""" , _snake_case ) if "encoder.cls_token" in key: UpperCAmelCase = re.sub(R"""encoder.cls_token""" , """embeddings.class_embedding""" , _snake_case ) if "self_attn" in key: UpperCAmelCase = re.sub(R"""self_attn.proj""" , """self_attn.projection""" , _snake_case ) return key @torch.no_grad() def _a ( _snake_case , _snake_case=None ): """simple docstring""" if config_path is not None: UpperCAmelCase = BlipConfig.from_pretrained(_snake_case ) else: UpperCAmelCase = BlipConfig(projection_dim=512 , text_config={} , vision_config={} ) UpperCAmelCase = BlipForConditionalGeneration(_snake_case ).eval() UpperCAmelCase = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth""" UpperCAmelCase = blip_decoder(pretrained=_snake_case , image_size=384 , vit="""base""" ) UpperCAmelCase = pt_model.eval() UpperCAmelCase = pt_model.state_dict() for key in modified_state_dict.copy(): UpperCAmelCase = modified_state_dict.pop(_snake_case ) UpperCAmelCase = rename_key(_snake_case ) UpperCAmelCase = value hf_model.load_state_dict(_snake_case ) UpperCAmelCase = 384 UpperCAmelCase = load_demo_image(image_size=_snake_case , device="""cpu""" ) UpperCAmelCase = BertTokenizer.from_pretrained("""bert-base-uncased""" ) UpperCAmelCase = tokenizer(["""a picture of"""] ).input_ids UpperCAmelCase = hf_model.generate(_snake_case , _snake_case ) assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102] UpperCAmelCase = hf_model.generate(_snake_case ) assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102] if pytorch_dump_folder_path is not None: hf_model.save_pretrained(_snake_case ) # model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth' UpperCAmelCase = ( """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth""" ) UpperCAmelCase = blip_vqa(pretrained=_snake_case , image_size=_snake_case , vit="""base""" ) vqa_model.eval() UpperCAmelCase = vqa_model.state_dict() for key in modified_state_dict.copy(): UpperCAmelCase = modified_state_dict.pop(_snake_case ) UpperCAmelCase = rename_key(_snake_case ) UpperCAmelCase = value UpperCAmelCase = BlipForQuestionAnswering(_snake_case ) hf_vqa_model.load_state_dict(_snake_case ) UpperCAmelCase = ["""How many dogs are in this image?"""] UpperCAmelCase = tokenizer(_snake_case , return_tensors="""pt""" ).input_ids UpperCAmelCase = hf_vqa_model.generate(_snake_case , _snake_case ) print(tokenizer.decode(answer[0] ) ) assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]" if pytorch_dump_folder_path is not None: hf_vqa_model.save_pretrained(pytorch_dump_folder_path + """_vqa""" ) UpperCAmelCase = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth""" UpperCAmelCase = blip_itm(pretrained=_snake_case , image_size=_snake_case , vit="""base""" ) itm_model.eval() UpperCAmelCase = itm_model.state_dict() for key in modified_state_dict.copy(): UpperCAmelCase = modified_state_dict.pop(_snake_case ) UpperCAmelCase = rename_key(_snake_case ) UpperCAmelCase = value UpperCAmelCase = BlipForImageTextRetrieval(_snake_case ) UpperCAmelCase = ["""A picture of a woman with a dog sitting in a beach"""] UpperCAmelCase = tokenizer( _snake_case , return_tensors="""pt""" , padding="""max_length""" , truncation=_snake_case , max_length=35 , ).input_ids hf_itm_model.load_state_dict(_snake_case ) hf_itm_model.eval() UpperCAmelCase = hf_itm_model(_snake_case , _snake_case , use_itm_head=_snake_case ) UpperCAmelCase = hf_itm_model(_snake_case , _snake_case , use_itm_head=_snake_case ) assert out[0].item() == 0.2110687494277954 assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.45698845386505127 if pytorch_dump_folder_path is not None: hf_itm_model.save_pretrained(pytorch_dump_folder_path + """_itm""" ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") _UpperCamelCase = parser.parse_args() convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
703
"""simple docstring""" # Lint as: python3 # pylint: enable=line-too-long # pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position _UpperCamelCase = """2.13.1""" import platform import pyarrow from packaging import version if version.parse(platform.python_version()) < version.parse("""3.7"""): raise ImportWarning( """To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition.""" ) if version.parse(pyarrow.__version__).major < 8: raise ImportWarning( """To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n""" """If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.""" ) del platform del pyarrow del version from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip _UpperCamelCase = concatenate_datasets _UpperCamelCase = DownloadConfig _UpperCamelCase = DownloadManager _UpperCamelCase = DownloadMode _UpperCamelCase = DownloadConfig _UpperCamelCase = DownloadMode _UpperCamelCase = DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
74
0
"""simple docstring""" import argparse from collections import OrderedDict from pathlib import Path import requests import torch from PIL import Image from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor from transformers.utils import logging logging.set_verbosity_info() _UpperCamelCase = logging.get_logger(__name__) def _a ( _snake_case ): """simple docstring""" UpperCAmelCase = OrderedDict() for key, value in state_dict.items(): if key.startswith("""module.encoder""" ): UpperCAmelCase = key.replace("""module.encoder""" , """glpn.encoder""" ) if key.startswith("""module.decoder""" ): UpperCAmelCase = key.replace("""module.decoder""" , """decoder.stages""" ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 UpperCAmelCase = key[key.find("""patch_embed""" ) + len("""patch_embed""" )] UpperCAmelCase = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(_snake_case )-1}''' ) if "norm" in key: UpperCAmelCase = key.replace("""norm""" , """layer_norm""" ) if "glpn.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 UpperCAmelCase = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )] UpperCAmelCase = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(_snake_case )-1}''' ) if "layer_norm1" in key: UpperCAmelCase = key.replace("""layer_norm1""" , """layer_norm_1""" ) if "layer_norm2" in key: UpperCAmelCase = key.replace("""layer_norm2""" , """layer_norm_2""" ) if "block" in key: # replace for example block1 by block.0 UpperCAmelCase = key[key.find("""block""" ) + len("""block""" )] UpperCAmelCase = key.replace(F'''block{idx}''' , F'''block.{int(_snake_case )-1}''' ) if "attn.q" in key: UpperCAmelCase = key.replace("""attn.q""" , """attention.self.query""" ) if "attn.proj" in key: UpperCAmelCase = key.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in key: UpperCAmelCase = key.replace("""attn""" , """attention.self""" ) if "fc1" in key: UpperCAmelCase = key.replace("""fc1""" , """dense1""" ) if "fc2" in key: UpperCAmelCase = key.replace("""fc2""" , """dense2""" ) if "linear_pred" in key: UpperCAmelCase = key.replace("""linear_pred""" , """classifier""" ) if "linear_fuse" in key: UpperCAmelCase = key.replace("""linear_fuse.conv""" , """linear_fuse""" ) UpperCAmelCase = key.replace("""linear_fuse.bn""" , """batch_norm""" ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 UpperCAmelCase = key[key.find("""linear_c""" ) + len("""linear_c""" )] UpperCAmelCase = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(_snake_case )-1}''' ) if "bot_conv" in key: UpperCAmelCase = key.replace("""bot_conv""" , """0.convolution""" ) if "skip_conv1" in key: UpperCAmelCase = key.replace("""skip_conv1""" , """1.convolution""" ) if "skip_conv2" in key: UpperCAmelCase = key.replace("""skip_conv2""" , """2.convolution""" ) if "fusion1" in key: UpperCAmelCase = key.replace("""fusion1""" , """1.fusion""" ) if "fusion2" in key: UpperCAmelCase = key.replace("""fusion2""" , """2.fusion""" ) if "fusion3" in key: UpperCAmelCase = key.replace("""fusion3""" , """3.fusion""" ) if "fusion" in key and "conv" in key: UpperCAmelCase = key.replace("""conv""" , """convolutional_layer""" ) if key.startswith("""module.last_layer_depth""" ): UpperCAmelCase = key.replace("""module.last_layer_depth""" , """head.head""" ) UpperCAmelCase = value return new_state_dict def _a ( _snake_case , _snake_case ): """simple docstring""" for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) UpperCAmelCase = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' ) UpperCAmelCase = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' ) # next, add keys and values (in that order) to the state dict UpperCAmelCase = kv_weight[ : config.hidden_sizes[i], : ] UpperCAmelCase = kv_bias[: config.hidden_sizes[i]] UpperCAmelCase = kv_weight[ config.hidden_sizes[i] :, : ] UpperCAmelCase = kv_bias[config.hidden_sizes[i] :] def _a ( ): """simple docstring""" UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" UpperCAmelCase = Image.open(requests.get(_snake_case , stream=_snake_case ).raw ) return image @torch.no_grad() def _a ( _snake_case , _snake_case , _snake_case=False , _snake_case=None ): """simple docstring""" UpperCAmelCase = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] ) # load image processor (only resize + rescale) UpperCAmelCase = GLPNImageProcessor() # prepare image UpperCAmelCase = prepare_img() UpperCAmelCase = image_processor(images=_snake_case , return_tensors="""pt""" ).pixel_values logger.info("""Converting model...""" ) # load original state dict UpperCAmelCase = torch.load(_snake_case , map_location=torch.device("""cpu""" ) ) # rename keys UpperCAmelCase = rename_keys(_snake_case ) # key and value matrices need special treatment read_in_k_v(_snake_case , _snake_case ) # create HuggingFace model and load state dict UpperCAmelCase = GLPNForDepthEstimation(_snake_case ) model.load_state_dict(_snake_case ) model.eval() # forward pass UpperCAmelCase = model(_snake_case ) UpperCAmelCase = outputs.predicted_depth # verify output if model_name is not None: if "nyu" in model_name: UpperCAmelCase = torch.tensor( [[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] ) elif "kitti" in model_name: UpperCAmelCase = torch.tensor( [[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] ) else: raise ValueError(F'''Unknown model name: {model_name}''' ) UpperCAmelCase = torch.Size([1, 480, 640] ) assert predicted_depth.shape == expected_shape assert torch.allclose(predicted_depth[0, :3, :3] , _snake_case , atol=1E-4 ) print("""Looks ok!""" ) # finally, push to hub if required if push_to_hub: logger.info("""Pushing model and image processor to the hub...""" ) model.push_to_hub( repo_path_or_name=Path(_snake_case , _snake_case ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=_snake_case , ) image_processor.push_to_hub( repo_path_or_name=Path(_snake_case , _snake_case ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=_snake_case , ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() parser.add_argument( """--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file).""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub.""" ) parser.add_argument( """--model_name""", default="""glpn-kitti""", type=str, help="""Name of the model in case you're pushing to the hub.""", ) _UpperCamelCase = parser.parse_args() convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
704
"""simple docstring""" def _a ( _snake_case ): """simple docstring""" if not isinstance(_snake_case , _snake_case ): raise ValueError("""Input must be an integer""" ) if input_num <= 0: raise ValueError("""Input must be positive""" ) return sum( divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 ) if __name__ == "__main__": import doctest doctest.testmod()
74
0
import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaImgaImgPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class lowerCamelCase__ ( snake_case , unittest.TestCase ): SCREAMING_SNAKE_CASE = KandinskyVaaImgaImgPipeline SCREAMING_SNAKE_CASE = ['''image_embeds''', '''negative_image_embeds''', '''image'''] SCREAMING_SNAKE_CASE = [ '''image_embeds''', '''negative_image_embeds''', '''image''', ] SCREAMING_SNAKE_CASE = [ '''generator''', '''height''', '''width''', '''strength''', '''guidance_scale''', '''num_inference_steps''', '''return_dict''', '''guidance_scale''', '''num_images_per_prompt''', '''output_type''', '''return_dict''', ] SCREAMING_SNAKE_CASE = False @property def _UpperCamelCase ( self ): return 32 @property def _UpperCamelCase ( self ): return 32 @property def _UpperCamelCase ( self ): return self.time_input_dim @property def _UpperCamelCase ( self ): return self.time_input_dim * 4 @property def _UpperCamelCase ( self ): return 100 @property def _UpperCamelCase ( self ): torch.manual_seed(0 ) UpperCAmelCase = { """in_channels""": 4, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """image""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } UpperCAmelCase = UNetaDConditionModel(**A ) return model @property def _UpperCamelCase ( self ): return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def _UpperCamelCase ( self ): torch.manual_seed(0 ) UpperCAmelCase = VQModel(**self.dummy_movq_kwargs ) return model def _UpperCamelCase ( self ): UpperCAmelCase = self.dummy_unet UpperCAmelCase = self.dummy_movq UpperCAmelCase = { """num_train_timesteps""": 1_000, """beta_schedule""": """linear""", """beta_start""": 0.00085, """beta_end""": 0.012, """clip_sample""": False, """set_alpha_to_one""": False, """steps_offset""": 0, """prediction_type""": """epsilon""", """thresholding""": False, } UpperCAmelCase = DDIMScheduler(**A ) UpperCAmelCase = { """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def _UpperCamelCase ( self ,A ,A=0 ): UpperCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(A ) ).to(A ) UpperCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to( A ) # create init_image UpperCAmelCase = floats_tensor((1, 3, 64, 64) ,rng=random.Random(A ) ).to(A ) UpperCAmelCase = image.cpu().permute(0 ,2 ,3 ,1 )[0] UpperCAmelCase = Image.fromarray(np.uinta(A ) ).convert("""RGB""" ).resize((256, 256) ) if str(A ).startswith("""mps""" ): UpperCAmelCase = torch.manual_seed(A ) else: UpperCAmelCase = torch.Generator(device=A ).manual_seed(A ) UpperCAmelCase = { """image""": init_image, """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """generator""": generator, """height""": 64, """width""": 64, """num_inference_steps""": 10, """guidance_scale""": 7.0, """strength""": 0.2, """output_type""": """np""", } return inputs def _UpperCamelCase ( self ): UpperCAmelCase = """cpu""" UpperCAmelCase = self.get_dummy_components() UpperCAmelCase = self.pipeline_class(**A ) UpperCAmelCase = pipe.to(A ) pipe.set_progress_bar_config(disable=A ) UpperCAmelCase = pipe(**self.get_dummy_inputs(A ) ) UpperCAmelCase = output.images UpperCAmelCase = pipe( **self.get_dummy_inputs(A ) ,return_dict=A ,)[0] UpperCAmelCase = image[0, -3:, -3:, -1] UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCAmelCase = np.array( [0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class lowerCamelCase__ ( unittest.TestCase ): def _UpperCamelCase ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _UpperCamelCase ( self ): UpperCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/kandinskyv22_img2img_frog.npy""" ) UpperCAmelCase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" ) UpperCAmelCase = """A red cartoon frog, 4k""" UpperCAmelCase = KandinskyVaaPriorPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-prior""" ,torch_dtype=torch.floataa ) pipe_prior.to(A ) UpperCAmelCase = KandinskyVaaImgaImgPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-decoder""" ,torch_dtype=torch.floataa ) UpperCAmelCase = pipeline.to(A ) pipeline.set_progress_bar_config(disable=A ) UpperCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 ) UpperCAmelCase , UpperCAmelCase = pipe_prior( A ,generator=A ,num_inference_steps=5 ,negative_prompt="""""" ,).to_tuple() UpperCAmelCase = pipeline( image=A ,image_embeds=A ,negative_image_embeds=A ,generator=A ,num_inference_steps=100 ,height=768 ,width=768 ,strength=0.2 ,output_type="""np""" ,) UpperCAmelCase = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(A ,A )
705
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForMaskedImageModeling, HfArgumentParser, Trainer, TrainingArguments, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version _UpperCamelCase = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.31.0""") require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""") _UpperCamelCase = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys()) _UpperCamelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class lowerCamelCase__ : SCREAMING_SNAKE_CASE = field( default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , ) SCREAMING_SNAKE_CASE = field(default=snake_case , metadata={'''help''': '''A folder containing the training data.'''} ) SCREAMING_SNAKE_CASE = field(default=snake_case , metadata={'''help''': '''A folder containing the validation data.'''} ) SCREAMING_SNAKE_CASE = field( default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} ) SCREAMING_SNAKE_CASE = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} ) SCREAMING_SNAKE_CASE = field( default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def _UpperCamelCase ( self ): UpperCAmelCase = {} if self.train_dir is not None: UpperCAmelCase = self.train_dir if self.validation_dir is not None: UpperCAmelCase = self.validation_dir UpperCAmelCase = data_files if data_files else None @dataclass class lowerCamelCase__ : SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={ '''help''': ( '''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a ''' '''checkpoint identifier on the hub. ''' '''Don\'t set if you want to train a model from scratch.''' ) } , ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(snake_case )} , ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } , ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , ) SCREAMING_SNAKE_CASE = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) SCREAMING_SNAKE_CASE = field(default=snake_case , metadata={'''help''': '''Name or path of preprocessor config.'''} ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={ '''help''': ( '''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.''' ) } , ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={ '''help''': ( '''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.''' ) } , ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''Stride to use for the encoder.'''} , ) class lowerCamelCase__ : def __init__( self ,A=192 ,A=32 ,A=4 ,A=0.6 ): UpperCAmelCase = input_size UpperCAmelCase = mask_patch_size UpperCAmelCase = model_patch_size UpperCAmelCase = mask_ratio if self.input_size % self.mask_patch_size != 0: raise ValueError("""Input size must be divisible by mask patch size""" ) if self.mask_patch_size % self.model_patch_size != 0: raise ValueError("""Mask patch size must be divisible by model patch size""" ) UpperCAmelCase = self.input_size // self.mask_patch_size UpperCAmelCase = self.mask_patch_size // self.model_patch_size UpperCAmelCase = self.rand_size**2 UpperCAmelCase = int(np.ceil(self.token_count * self.mask_ratio ) ) def __call__( self ): UpperCAmelCase = np.random.permutation(self.token_count )[: self.mask_count] UpperCAmelCase = np.zeros(self.token_count ,dtype=A ) UpperCAmelCase = 1 UpperCAmelCase = mask.reshape((self.rand_size, self.rand_size) ) UpperCAmelCase = mask.repeat(self.scale ,axis=0 ).repeat(self.scale ,axis=1 ) return torch.tensor(mask.flatten() ) def _a ( _snake_case ): """simple docstring""" UpperCAmelCase = torch.stack([example["""pixel_values"""] for example in examples] ) UpperCAmelCase = torch.stack([example["""mask"""] for example in examples] ) return {"pixel_values": pixel_values, "bool_masked_pos": mask} def _a ( ): """simple docstring""" UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_mim""" , _snake_case , _snake_case ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() UpperCAmelCase = training_args.get_process_log_level() logger.setLevel(_snake_case ) transformers.utils.logging.set_verbosity(_snake_case ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. UpperCAmelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: UpperCAmelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Initialize our dataset. UpperCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. UpperCAmelCase = None if """validation""" in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , _snake_case ) and data_args.train_val_split > 0.0: UpperCAmelCase = ds["""train"""].train_test_split(data_args.train_val_split ) UpperCAmelCase = split["""train"""] UpperCAmelCase = split["""test"""] # Create config # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. UpperCAmelCase = { """cache_dir""": model_args.cache_dir, """revision""": model_args.model_revision, """use_auth_token""": True if model_args.use_auth_token else None, } if model_args.config_name_or_path: UpperCAmelCase = AutoConfig.from_pretrained(model_args.config_name_or_path , **_snake_case ) elif model_args.model_name_or_path: UpperCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , **_snake_case ) else: UpperCAmelCase = CONFIG_MAPPING[model_args.model_type]() logger.warning("""You are instantiating a new config instance from scratch.""" ) if model_args.config_overrides is not None: logger.info(F'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(F'''New config: {config}''' ) # make sure the decoder_type is "simmim" (only relevant for BEiT) if hasattr(_snake_case , """decoder_type""" ): UpperCAmelCase = """simmim""" # adapt config UpperCAmelCase = model_args.image_size if model_args.image_size is not None else config.image_size UpperCAmelCase = model_args.patch_size if model_args.patch_size is not None else config.patch_size UpperCAmelCase = ( model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride ) config.update( { """image_size""": model_args.image_size, """patch_size""": model_args.patch_size, """encoder_stride""": model_args.encoder_stride, } ) # create image processor if model_args.image_processor_name: UpperCAmelCase = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **_snake_case ) elif model_args.model_name_or_path: UpperCAmelCase = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **_snake_case ) else: UpperCAmelCase = { conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items() } UpperCAmelCase = IMAGE_PROCESSOR_TYPES[model_args.model_type]() # create model if model_args.model_name_or_path: UpperCAmelCase = AutoModelForMaskedImageModeling.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("""Training new model from scratch""" ) UpperCAmelCase = AutoModelForMaskedImageModeling.from_config(_snake_case ) if training_args.do_train: UpperCAmelCase = ds["""train"""].column_names else: UpperCAmelCase = ds["""validation"""].column_names if data_args.image_column_name is not None: UpperCAmelCase = data_args.image_column_name elif "image" in column_names: UpperCAmelCase = """image""" elif "img" in column_names: UpperCAmelCase = """img""" else: UpperCAmelCase = column_names[0] # transformations as done in original SimMIM paper # source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py UpperCAmelCase = Compose( [ Lambda(lambda _snake_case : img.convert("""RGB""" ) if img.mode != "RGB" else img ), RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) # create mask generator UpperCAmelCase = MaskGenerator( input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , ) def preprocess_images(_snake_case ): UpperCAmelCase = [transforms(_snake_case ) for image in examples[image_column_name]] UpperCAmelCase = [mask_generator() for i in range(len(examples[image_column_name] ) )] return examples if training_args.do_train: if "train" not in ds: raise ValueError("""--do_train requires a train dataset""" ) if data_args.max_train_samples is not None: UpperCAmelCase = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(_snake_case ) if training_args.do_eval: if "validation" not in ds: raise ValueError("""--do_eval requires a validation dataset""" ) if data_args.max_eval_samples is not None: UpperCAmelCase = ( ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(_snake_case ) # Initialize our trainer UpperCAmelCase = Trainer( model=_snake_case , args=_snake_case , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=_snake_case , data_collator=_snake_case , ) # Training if training_args.do_train: UpperCAmelCase = None if training_args.resume_from_checkpoint is not None: UpperCAmelCase = training_args.resume_from_checkpoint elif last_checkpoint is not None: UpperCAmelCase = last_checkpoint UpperCAmelCase = trainer.train(resume_from_checkpoint=_snake_case ) trainer.save_model() trainer.log_metrics("""train""" , train_result.metrics ) trainer.save_metrics("""train""" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: UpperCAmelCase = trainer.evaluate() trainer.log_metrics("""eval""" , _snake_case ) trainer.save_metrics("""eval""" , _snake_case ) # Write model card and (optionally) push to hub UpperCAmelCase = { """finetuned_from""": model_args.model_name_or_path, """tasks""": """masked-image-modeling""", """dataset""": data_args.dataset_name, """tags""": ["""masked-image-modeling"""], } if training_args.push_to_hub: trainer.push_to_hub(**_snake_case ) else: trainer.create_model_card(**_snake_case ) if __name__ == "__main__": main()
74
0
"""simple docstring""" import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def _UpperCamelCase ( self ): UpperCAmelCase = """ylacombe/bark-small""" UpperCAmelCase = tempfile.mkdtemp() UpperCAmelCase = """en_speaker_1""" UpperCAmelCase = """This is a test string""" UpperCAmelCase = """speaker_embeddings_path.json""" UpperCAmelCase = """speaker_embeddings""" def _UpperCamelCase ( self ,**A ): return AutoTokenizer.from_pretrained(self.checkpoint ,**A ) def _UpperCamelCase ( self ): shutil.rmtree(self.tmpdirname ) def _UpperCamelCase ( self ): UpperCAmelCase = self.get_tokenizer() UpperCAmelCase = BarkProcessor(tokenizer=A ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() ) @slow def _UpperCamelCase ( self ): UpperCAmelCase = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,) processor.save_pretrained( self.tmpdirname ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,speaker_embeddings_directory=self.speaker_embeddings_directory ,) UpperCAmelCase = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" ) UpperCAmelCase = BarkProcessor.from_pretrained( self.tmpdirname ,self.speaker_embeddings_dict_path ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) def _UpperCamelCase ( self ): UpperCAmelCase = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,) UpperCAmelCase = 35 UpperCAmelCase = 2 UpperCAmelCase = 8 UpperCAmelCase = { """semantic_prompt""": np.ones(A ), """coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ), """fine_prompt""": np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset UpperCAmelCase = processor(text=self.input_string ,voice_preset=A ) UpperCAmelCase = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(A ,np.array([] ) ).tolist() ) # test loading voice preset from npz file UpperCAmelCase = os.path.join(self.tmpdirname ,"""file.npz""" ) np.savez(A ,**A ) UpperCAmelCase = processor(text=self.input_string ,voice_preset=A ) UpperCAmelCase = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(A ,np.array([] ) ).tolist() ) # test loading voice preset from the hub UpperCAmelCase = processor(text=self.input_string ,voice_preset=self.voice_preset ) def _UpperCamelCase ( self ): UpperCAmelCase = self.get_tokenizer() UpperCAmelCase = BarkProcessor(tokenizer=A ) UpperCAmelCase = processor(text=self.input_string ) UpperCAmelCase = tokenizer( self.input_string ,padding="""max_length""" ,max_length=256 ,add_special_tokens=A ,return_attention_mask=A ,return_token_type_ids=A ,) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key].squeeze().tolist() )
706
"""simple docstring""" import logging import sys from dataclasses import dataclass, field from typing import Any, Dict, List, Optional, Union import librosa import torch from datasets import DatasetDict, load_dataset from packaging import version from torch import nn from transformers import ( HfArgumentParser, Trainer, TrainingArguments, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForPreTraining, is_apex_available, trainer_utils, ) from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices if is_apex_available(): from apex import amp if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""): _UpperCamelCase = True from torch.cuda.amp import autocast _UpperCamelCase = logging.getLogger(__name__) @dataclass class lowerCamelCase__ : SCREAMING_SNAKE_CASE = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''Whether to log verbose messages or not.'''} , ) SCREAMING_SNAKE_CASE = field( default=2.0 , metadata={'''help''': '''Maximum temperature for gumbel softmax.'''} ) SCREAMING_SNAKE_CASE = field( default=0.5 , metadata={'''help''': '''Minimum temperature for gumbel softmax.'''} ) SCREAMING_SNAKE_CASE = field( default=0.99_99_95 , metadata={'''help''': '''Decay of gumbel temperature during training.'''} ) def _a ( _snake_case , _snake_case ): """simple docstring""" logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) UpperCAmelCase = logging.WARNING if model_args.verbose_logging: UpperCAmelCase = logging.DEBUG elif trainer_utils.is_main_process(training_args.local_rank ): UpperCAmelCase = logging.INFO logger.setLevel(_snake_case ) @dataclass class lowerCamelCase__ : SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) SCREAMING_SNAKE_CASE = field( default='''train''' , metadata={ '''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\'''' } , ) SCREAMING_SNAKE_CASE = field( default='''validation''' , metadata={ '''help''': ( '''The name of the validation data set split to use (via the datasets library). Defaults to \'validation\'''' ) } , ) SCREAMING_SNAKE_CASE = field( default='''file''' , metadata={'''help''': '''Column in the dataset that contains speech file path. Defaults to \'file\''''} , ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} ) SCREAMING_SNAKE_CASE = field( default=1 , metadata={ '''help''': '''The percentage of the train set used as validation set in case there\'s no validation split''' } , ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , ) SCREAMING_SNAKE_CASE = field( default=20.0 , metadata={'''help''': '''Filter audio files that are longer than `max_duration_in_seconds` seconds'''} ) @dataclass class lowerCamelCase__ : SCREAMING_SNAKE_CASE = 42 SCREAMING_SNAKE_CASE = 42 SCREAMING_SNAKE_CASE = "longest" SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None def __call__( self ,A ): # reformat list to dict and set to pytorch format UpperCAmelCase = self.feature_extractor.pad( A ,max_length=self.max_length ,padding=self.padding ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors="""pt""" ,) UpperCAmelCase = self.model._get_feat_extract_output_lengths(batch["""input_values"""].shape[-1] ) UpperCAmelCase = batch["""input_values"""].shape[0] # make sure that no loss is computed on padded inputs if batch["attention_mask"] is not None: # compute real output lengths according to convolution formula UpperCAmelCase = self.model._get_feat_extract_output_lengths(batch["""attention_mask"""].sum(-1 ) ).to( torch.long ) UpperCAmelCase = torch.zeros( (batch_size, mask_indices_seq_length) ,dtype=torch.long ,device=batch["""input_values"""].device ) # these two operations makes sure that all values # before the output lengths indices are attended to UpperCAmelCase = 1 UpperCAmelCase = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool() # sample randomly masked indices UpperCAmelCase = _compute_mask_indices( (batch_size, mask_indices_seq_length) ,self.model.config.mask_time_prob ,self.model.config.mask_time_length ,attention_mask=A ,min_masks=2 ,) return batch class lowerCamelCase__ ( snake_case ): def __init__( self ,*A ,A=1 ,A=0 ,A=1.0 ,**A ): super().__init__(*A ,**A ) UpperCAmelCase = 0 UpperCAmelCase = max_gumbel_temp UpperCAmelCase = min_gumbel_temp UpperCAmelCase = gumbel_temp_decay def _UpperCamelCase ( self ,A ,A ): model.train() UpperCAmelCase = self._prepare_inputs(A ) if self.use_amp: with autocast(): UpperCAmelCase = self.compute_loss(A ,A ) else: UpperCAmelCase = self.compute_loss(A ,A ) if self.args.n_gpu > 1 or self.deepspeed: if model.module.config.ctc_loss_reduction == "mean": UpperCAmelCase = loss.mean() elif model.module.config.ctc_loss_reduction == "sum": UpperCAmelCase = loss.sum() / (inputs["""mask_time_indices"""]).sum() else: raise ValueError(F'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' ) if self.args.gradient_accumulation_steps > 1: UpperCAmelCase = loss / self.args.gradient_accumulation_steps if self.use_amp: self.scaler.scale(A ).backward() elif self.use_apex: with amp.scale_loss(A ,self.optimizer ) as scaled_loss: scaled_loss.backward() elif self.deepspeed: self.deepspeed.backward(A ) else: loss.backward() self.num_update_step += 1 # make sure gumbel softmax temperature is decayed if self.args.n_gpu > 1 or self.deepspeed: model.module.set_gumbel_temperature( max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step ,self.min_gumbel_temp ) ) else: model.set_gumbel_temperature( max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step ,self.min_gumbel_temp ) ) return loss.detach() def _a ( ): """simple docstring""" UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses() configure_logger(_snake_case , _snake_case ) # Downloading and loading a dataset from the hub. UpperCAmelCase = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir ) if "validation" not in datasets.keys(): # make sure only "validation" and "train" keys remain" UpperCAmelCase = DatasetDict() UpperCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[:{data_args.validation_split_percentage}%]''' , cache_dir=model_args.cache_dir , ) UpperCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[{data_args.validation_split_percentage}%:]''' , cache_dir=model_args.cache_dir , ) else: # make sure only "validation" and "train" keys remain" UpperCAmelCase = DatasetDict() UpperCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split="""validation""" , cache_dir=model_args.cache_dir , ) UpperCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}''' , cache_dir=model_args.cache_dir , ) # only normalized-inputs-training is supported UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained( model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_snake_case ) def prepare_dataset(_snake_case ): # check that all files have the correct sampling rate UpperCAmelCase , UpperCAmelCase = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate ) return batch # load audio files into numpy arrays UpperCAmelCase = datasets.map( _snake_case , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["""train"""].column_names ) # filter audio files that are too long UpperCAmelCase = vectorized_datasets.filter( lambda _snake_case : len(data["""speech"""] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) ) def normalize(_snake_case ): return feature_extractor(batch["""speech"""] , sampling_rate=feature_extractor.sampling_rate ) # normalize and transform to `BatchFeatures` UpperCAmelCase = vectorized_datasets.map( _snake_case , batched=_snake_case , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["""train"""].column_names , ) # pretraining is only supported for "newer" stable layer norm architecture # apply_spec_augment has to be True, mask_feature_prob has to be 0.0 UpperCAmelCase = WavaVecaConfig.from_pretrained( model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , ) if not config.do_stable_layer_norm or config.feat_extract_norm != "layer": raise ValueError( """PreTraining is only supported for ``config.do_stable_layer_norm=True`` and""" """ ``config.feat_extract_norm='layer'""" ) UpperCAmelCase = WavaVecaForPreTraining(_snake_case ) UpperCAmelCase = DataCollatorForWavaVecaPretraining(model=_snake_case , feature_extractor=_snake_case ) UpperCAmelCase = WavaVecaPreTrainer( model=_snake_case , data_collator=_snake_case , args=_snake_case , train_dataset=vectorized_datasets["""train"""] , eval_dataset=vectorized_datasets["""validation"""] , tokenizer=_snake_case , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , ) trainer.train() if __name__ == "__main__": main()
74
0
"""simple docstring""" import asyncio import os import re import sys import tempfile import unittest from contextlib import contextmanager from copy import deepcopy from distutils.util import strtobool from enum import Enum from importlib.util import find_spec from pathlib import Path from unittest.mock import patch import pyarrow as pa import pytest import requests from packaging import version from datasets import config if config.PY_VERSION < version.parse("""3.8"""): import importlib_metadata else: import importlib.metadata as importlib_metadata def _a ( _snake_case , _snake_case=False ): """simple docstring""" try: UpperCAmelCase = os.environ[key] except KeyError: # KEY isn't set, default to `default`. UpperCAmelCase = default else: # KEY is set, convert it to True or False. try: UpperCAmelCase = strtobool(_snake_case ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(F'''If set, {key} must be yes or no.''' ) return _value _UpperCamelCase : Optional[int] = parse_flag_from_env("""RUN_SLOW""", default=False) _UpperCamelCase : List[str] = parse_flag_from_env("""RUN_REMOTE""", default=False) _UpperCamelCase : List[str] = parse_flag_from_env("""RUN_LOCAL""", default=True) _UpperCamelCase : Optional[int] = parse_flag_from_env("""RUN_PACKAGED""", default=True) # Compression _UpperCamelCase : str = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="""test requires lz4""") _UpperCamelCase : List[str] = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="""test requires py7zr""") _UpperCamelCase : int = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="""test requires zstandard""") # Audio _UpperCamelCase : Optional[Any] = pytest.mark.skipif( # On Windows and OS X, soundfile installs sndfile find_spec("""soundfile""") is None or version.parse(importlib_metadata.version("""soundfile""")) < version.parse("""0.12.0"""), reason="""test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; """, ) # Beam _UpperCamelCase : List[str] = pytest.mark.skipif( not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("""0.3.2"""), reason="""test requires apache-beam and a compatible dill version""", ) # Dill-cloudpickle compatibility _UpperCamelCase : Optional[Any] = pytest.mark.skipif( config.DILL_VERSION <= version.parse("""0.3.2"""), reason="""test requires dill>0.3.2 for cloudpickle compatibility""", ) # Windows _UpperCamelCase : Tuple = pytest.mark.skipif( sys.platform == """win32""", reason="""test should not be run on Windows""", ) def _a ( _snake_case ): """simple docstring""" try: import faiss # noqa except ImportError: UpperCAmelCase = unittest.skip("""test requires faiss""" )(_snake_case ) return test_case def _a ( _snake_case ): """simple docstring""" try: import regex # noqa except ImportError: UpperCAmelCase = unittest.skip("""test requires regex""" )(_snake_case ) return test_case def _a ( _snake_case ): """simple docstring""" try: import elasticsearch # noqa except ImportError: UpperCAmelCase = unittest.skip("""test requires elasticsearch""" )(_snake_case ) return test_case def _a ( _snake_case ): """simple docstring""" try: import sqlalchemy # noqa except ImportError: UpperCAmelCase = unittest.skip("""test requires sqlalchemy""" )(_snake_case ) return test_case def _a ( _snake_case ): """simple docstring""" if not config.TORCH_AVAILABLE: UpperCAmelCase = unittest.skip("""test requires PyTorch""" )(_snake_case ) return test_case def _a ( _snake_case ): """simple docstring""" if not config.TF_AVAILABLE: UpperCAmelCase = unittest.skip("""test requires TensorFlow""" )(_snake_case ) return test_case def _a ( _snake_case ): """simple docstring""" if not config.JAX_AVAILABLE: UpperCAmelCase = unittest.skip("""test requires JAX""" )(_snake_case ) return test_case def _a ( _snake_case ): """simple docstring""" if not config.PIL_AVAILABLE: UpperCAmelCase = unittest.skip("""test requires Pillow""" )(_snake_case ) return test_case def _a ( _snake_case ): """simple docstring""" try: import transformers # noqa F401 except ImportError: return unittest.skip("""test requires transformers""" )(_snake_case ) else: return test_case def _a ( _snake_case ): """simple docstring""" try: import tiktoken # noqa F401 except ImportError: return unittest.skip("""test requires tiktoken""" )(_snake_case ) else: return test_case def _a ( _snake_case ): """simple docstring""" try: import spacy # noqa F401 except ImportError: return unittest.skip("""test requires spacy""" )(_snake_case ) else: return test_case def _a ( _snake_case ): """simple docstring""" def _require_spacy_model(_snake_case ): try: import spacy # noqa F401 spacy.load(_snake_case ) except ImportError: return unittest.skip("""test requires spacy""" )(_snake_case ) except OSError: return unittest.skip("""test requires spacy model '{}'""".format(_snake_case ) )(_snake_case ) else: return test_case return _require_spacy_model def _a ( _snake_case ): """simple docstring""" try: import pyspark # noqa F401 except ImportError: return unittest.skip("""test requires pyspark""" )(_snake_case ) else: return test_case def _a ( _snake_case ): """simple docstring""" try: import joblibspark # noqa F401 except ImportError: return unittest.skip("""test requires joblibspark""" )(_snake_case ) else: return test_case def _a ( _snake_case ): """simple docstring""" if not _run_slow_tests or _run_slow_tests == 0: UpperCAmelCase = unittest.skip("""test is slow""" )(_snake_case ) return test_case def _a ( _snake_case ): """simple docstring""" if not _run_local_tests or _run_local_tests == 0: UpperCAmelCase = unittest.skip("""test is local""" )(_snake_case ) return test_case def _a ( _snake_case ): """simple docstring""" if not _run_packaged_tests or _run_packaged_tests == 0: UpperCAmelCase = unittest.skip("""test is packaged""" )(_snake_case ) return test_case def _a ( _snake_case ): """simple docstring""" if not _run_remote_tests or _run_remote_tests == 0: UpperCAmelCase = unittest.skip("""test requires remote""" )(_snake_case ) return test_case def _a ( *_snake_case ): """simple docstring""" def decorate(cls ): for name, fn in cls.__dict__.items(): if callable(_snake_case ) and name.startswith("""test""" ): for decorator in decorators: UpperCAmelCase = decorator(_snake_case ) setattr(cls , _snake_case , _snake_case ) return cls return decorate class lowerCamelCase__ ( snake_case ): pass class lowerCamelCase__ ( snake_case ): SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = 1 SCREAMING_SNAKE_CASE = 2 @contextmanager def _a ( _snake_case=OfflineSimulationMode.CONNECTION_FAILS , _snake_case=1E-16 ): """simple docstring""" UpperCAmelCase = requests.Session().request def timeout_request(_snake_case , _snake_case , _snake_case , **_snake_case ): # Change the url to an invalid url so that the connection hangs UpperCAmelCase = """https://10.255.255.1""" if kwargs.get("""timeout""" ) is None: raise RequestWouldHangIndefinitelyError( F'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' ) UpperCAmelCase = timeout try: return online_request(_snake_case , _snake_case , **_snake_case ) except Exception as e: # The following changes in the error are just here to make the offline timeout error prettier UpperCAmelCase = url UpperCAmelCase = e.args[0] UpperCAmelCase = (max_retry_error.args[0].replace("""10.255.255.1""" , F'''OfflineMock[{url}]''' ),) UpperCAmelCase = (max_retry_error,) raise def raise_connection_error(_snake_case , _snake_case , **_snake_case ): raise requests.ConnectionError("""Offline mode is enabled.""" , request=_snake_case ) if mode is OfflineSimulationMode.CONNECTION_FAILS: with patch("""requests.Session.send""" , _snake_case ): yield elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT: # inspired from https://stackoverflow.com/a/904609 with patch("""requests.Session.request""" , _snake_case ): yield elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1: with patch("""datasets.config.HF_DATASETS_OFFLINE""" , _snake_case ): yield else: raise ValueError("""Please use a value from the OfflineSimulationMode enum.""" ) @contextmanager def _a ( *_snake_case , **_snake_case ): """simple docstring""" UpperCAmelCase = str(Path().resolve() ) with tempfile.TemporaryDirectory(*_snake_case , **_snake_case ) as tmp_dir: try: os.chdir(_snake_case ) yield finally: os.chdir(_snake_case ) @contextmanager def _a ( ): """simple docstring""" import gc gc.collect() UpperCAmelCase = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase." @contextmanager def _a ( ): """simple docstring""" import gc gc.collect() UpperCAmelCase = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase." def _a ( _snake_case , _snake_case ): """simple docstring""" return deepcopy(_snake_case ).integers(0 , 100 , 10 ).tolist() == deepcopy(_snake_case ).integers(0 , 100 , 10 ).tolist() def _a ( _snake_case ): """simple docstring""" import decorator from requests.exceptions import HTTPError def _wrapper(_snake_case , *_snake_case , **_snake_case ): try: return func(*_snake_case , **_snake_case ) except HTTPError as err: if str(_snake_case ).startswith("""500""" ) or str(_snake_case ).startswith("""502""" ): pytest.xfail(str(_snake_case ) ) raise err return decorator.decorator(_wrapper , _snake_case ) class lowerCamelCase__ : def __init__( self ,A ,A ,A ): UpperCAmelCase = returncode UpperCAmelCase = stdout UpperCAmelCase = stderr async def _a ( _snake_case , _snake_case ): """simple docstring""" while True: UpperCAmelCase = await stream.readline() if line: callback(_snake_case ) else: break async def _a ( _snake_case , _snake_case=None , _snake_case=None , _snake_case=None , _snake_case=False , _snake_case=False ): """simple docstring""" if echo: print("""\nRunning: """ , """ """.join(_snake_case ) ) UpperCAmelCase = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=_snake_case , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_snake_case , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) UpperCAmelCase = [] UpperCAmelCase = [] def tee(_snake_case , _snake_case , _snake_case , _snake_case="" ): UpperCAmelCase = line.decode("""utf-8""" ).rstrip() sink.append(_snake_case ) if not quiet: print(_snake_case , _snake_case , file=_snake_case ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ _read_stream(p.stdout , lambda _snake_case : tee(_snake_case , _snake_case , sys.stdout , label="""stdout:""" ) ), _read_stream(p.stderr , lambda _snake_case : tee(_snake_case , _snake_case , sys.stderr , label="""stderr:""" ) ), ] , timeout=_snake_case , ) return _RunOutput(await p.wait() , _snake_case , _snake_case ) def _a ( _snake_case , _snake_case=None , _snake_case=None , _snake_case=180 , _snake_case=False , _snake_case=True ): """simple docstring""" UpperCAmelCase = asyncio.get_event_loop() UpperCAmelCase = loop.run_until_complete( _stream_subprocess(_snake_case , env=_snake_case , stdin=_snake_case , timeout=_snake_case , quiet=_snake_case , echo=_snake_case ) ) UpperCAmelCase = """ """.join(_snake_case ) if result.returncode > 0: UpperCAmelCase = """\n""".join(result.stderr ) raise RuntimeError( F'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n''' F'''The combined stderr from workers follows:\n{stderr}''' ) # check that the subprocess actually did run and produced some output, should the test rely on # the remote side to do the testing if not result.stdout and not result.stderr: raise RuntimeError(F'''\'{cmd_str}\' produced no output.''' ) return result def _a ( ): """simple docstring""" UpperCAmelCase = os.environ.get("""PYTEST_XDIST_WORKER""" , """gw0""" ) UpperCAmelCase = re.sub(R"""^gw""" , """""" , _snake_case , 0 , re.M ) return int(_snake_case ) def _a ( ): """simple docstring""" UpperCAmelCase = 2_9500 UpperCAmelCase = pytest_xdist_worker_id() return port + uniq_delta
707
"""simple docstring""" from __future__ import annotations import random import unittest from transformers import TransfoXLConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLModel, ) class lowerCamelCase__ : def __init__( self ,A ,): UpperCAmelCase = parent UpperCAmelCase = 13 UpperCAmelCase = 7 UpperCAmelCase = 30 UpperCAmelCase = self.seq_length + self.mem_len UpperCAmelCase = 15 UpperCAmelCase = True UpperCAmelCase = True UpperCAmelCase = 99 UpperCAmelCase = [10, 50, 80] UpperCAmelCase = 32 UpperCAmelCase = 32 UpperCAmelCase = 4 UpperCAmelCase = 8 UpperCAmelCase = 128 UpperCAmelCase = 2 UpperCAmelCase = 2 UpperCAmelCase = None UpperCAmelCase = 1 UpperCAmelCase = 0 UpperCAmelCase = 3 UpperCAmelCase = self.vocab_size - 1 UpperCAmelCase = 0.01 def _UpperCamelCase ( self ): UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) UpperCAmelCase = None if self.use_labels: UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) UpperCAmelCase = TransfoXLConfig( vocab_size=self.vocab_size ,mem_len=self.mem_len ,clamp_len=self.clamp_len ,cutoffs=self.cutoffs ,d_model=self.hidden_size ,d_embed=self.d_embed ,n_head=self.num_attention_heads ,d_head=self.d_head ,d_inner=self.d_inner ,div_val=self.div_val ,n_layer=self.num_hidden_layers ,eos_token_id=self.eos_token_id ,pad_token_id=self.vocab_size - 1 ,init_range=self.init_range ,num_labels=self.num_labels ,) return (config, input_ids_a, input_ids_a, lm_labels) def _UpperCamelCase ( self ): random.seed(self.seed ) tf.random.set_seed(self.seed ) def _UpperCamelCase ( self ,A ,A ,A ,A ): UpperCAmelCase = TFTransfoXLModel(A ) UpperCAmelCase , UpperCAmelCase = model(A ).to_tuple() UpperCAmelCase = {"""input_ids""": input_ids_a, """mems""": mems_a} UpperCAmelCase , UpperCAmelCase = model(A ).to_tuple() self.parent.assertEqual(hidden_states_a.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(hidden_states_a.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,) self.parent.assertListEqual( [mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,) def _UpperCamelCase ( self ,A ,A ,A ,A ): UpperCAmelCase = TFTransfoXLLMHeadModel(A ) UpperCAmelCase , UpperCAmelCase = model(A ).to_tuple() UpperCAmelCase = {"""input_ids""": input_ids_a, """labels""": lm_labels} UpperCAmelCase , UpperCAmelCase = model(A ).to_tuple() UpperCAmelCase , UpperCAmelCase = model([input_ids_a, mems_a] ).to_tuple() UpperCAmelCase = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels} UpperCAmelCase , UpperCAmelCase = model(A ).to_tuple() self.parent.assertEqual(lm_logits_a.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,) self.parent.assertEqual(lm_logits_a.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,) def _UpperCamelCase ( self ,A ,A ,A ,A ): UpperCAmelCase = TFTransfoXLForSequenceClassification(A ) UpperCAmelCase = model(A ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def _UpperCamelCase ( self ): UpperCAmelCase = self.prepare_config_and_inputs() ((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) = config_and_inputs UpperCAmelCase = {"""input_ids""": input_ids_a} return config, inputs_dict @require_tf class lowerCamelCase__ ( snake_case , snake_case , unittest.TestCase ): SCREAMING_SNAKE_CASE = ( (TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else () ) SCREAMING_SNAKE_CASE = () if is_tf_available() else () SCREAMING_SNAKE_CASE = ( { '''feature-extraction''': TFTransfoXLModel, '''text-classification''': TFTransfoXLForSequenceClassification, '''text-generation''': TFTransfoXLLMHeadModel, '''zero-shot''': TFTransfoXLForSequenceClassification, } if is_tf_available() else {} ) # TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False def _UpperCamelCase ( self ,A ,A ,A ,A ,A ): if pipeline_test_casse_name == "TextGenerationPipelineTests": # Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`. # `TransfoXLConfig` was never used in pipeline tests: cannot create a simple # tokenizer. return True return False def _UpperCamelCase ( self ): UpperCAmelCase = TFTransfoXLModelTester(self ) UpperCAmelCase = ConfigTester(self ,config_class=A ,d_embed=37 ) def _UpperCamelCase ( self ): self.config_tester.run_common_tests() def _UpperCamelCase ( self ): self.model_tester.set_seed() UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_model(*A ) def _UpperCamelCase ( self ): self.model_tester.set_seed() UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_lm_head(*A ) def _UpperCamelCase ( self ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*A ) def _UpperCamelCase ( self ): UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase = [TFTransfoXLForSequenceClassification] for model_class in self.all_model_classes: UpperCAmelCase = model_class(A ) assert isinstance(model.get_input_embeddings() ,tf.keras.layers.Layer ) if model_class in list_other_models_with_output_ebd: UpperCAmelCase = model.get_output_embeddings() assert isinstance(A ,tf.keras.layers.Layer ) UpperCAmelCase = model.get_bias() assert name is None else: UpperCAmelCase = model.get_output_embeddings() assert x is None UpperCAmelCase = model.get_bias() assert name is None def _UpperCamelCase ( self ): # TODO JP: Make TransfoXL XLA compliant pass @slow def _UpperCamelCase ( self ): for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase = TFTransfoXLModel.from_pretrained(A ) self.assertIsNotNone(A ) @unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""" ) def _UpperCamelCase ( self ): pass @require_tf class lowerCamelCase__ ( unittest.TestCase ): @unittest.skip("""Skip test until #12651 is resolved.""" ) @slow def _UpperCamelCase ( self ): UpperCAmelCase = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" ) # fmt: off UpperCAmelCase = tf.convert_to_tensor([[33,1_297,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,22,1_706,17,20_098,5,3_215,21,37,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,6_224,831,16_002,2,8,603,78_967,29_546,23,803,20,25,416,5,8,232,4,277,6,1_855,4_601,3,29_546,54,8,3_609,5,57_211,49,4,1,277,18,8,1_755,15_691,3,341,25,416,693,42_573,71,17,401,94,31,17_919,2,29_546,7_873,18,1,435,23,11_011,755,5,5_167,3,7_983,98,84,2,29_546,3_267,8,3_609,4,1,4_865,1_075,2,6_087,71,6,346,8,5_854,3,29_546,824,1_400,1_868,2,19,160,2,311,8,5_496,2,20_920,17,25,15_097,3,24,24,0]] ,dtype=tf.intaa ) # noqa: E231 # fmt: on # In 1991 , the remains of Russian Tsar Nicholas II and his family # ( except for Alexei and Maria ) are discovered . # The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the # remainder of the story . 1883 Western Siberia , # a young Grigori Rasputin is asked by his father and a group of men to perform magic . # Rasputin has a vision and denounces one of the men as a horse thief . Although his # father initially slaps him for making such an accusation , Rasputin watches as the # man is chased outside and beaten . Twenty years later , Rasputin sees a vision of # the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous , # with people , even a bishop , begging for his blessing . <eod> </s> <eos> # fmt: off UpperCAmelCase = [33,1_297,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,22,1_706,17,20_098,5,3_215,21,37,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,6_224,831,16_002,2,8,603,78_967,29_546,23,803,20,25,416,5,8,232,4,277,6,1_855,4_601,3,29_546,54,8,3_609,5,57_211,49,4,1,277,18,8,1_755,15_691,3,341,25,416,693,42_573,71,17,401,94,31,17_919,2,29_546,7_873,18,1,435,23,11_011,755,5,5_167,3,7_983,98,84,2,29_546,3_267,8,3_609,4,1,4_865,1_075,2,6_087,71,6,346,8,5_854,3,29_546,824,1_400,1_868,2,19,160,2,311,8,5_496,2,20_920,17,25,15_097,3,24,24,0,33,1,1_857,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,28,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,0] # noqa: E231 # fmt: on # In 1991, the remains of Russian Tsar Nicholas II and his family ( # except for Alexei and Maria ) are discovered. The voice of young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story. # 1883 Western Siberia, a young Grigori Rasputin is asked by his father # and a group of men to perform magic. Rasputin has a vision and # denounces one of the men as a horse thief. Although his father initially # slaps him for making such an accusation, Rasputin watches as the man # is chased outside and beaten. Twenty years later, Rasputin sees a vision # of the Virgin Mary, prompting him to become a priest. # Rasputin quickly becomes famous, with people, even a bishop, begging for # his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar # Nicholas II and his family were discovered. The voice of <unk> young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos> UpperCAmelCase = model.generate(A ,max_length=200 ,do_sample=A ) self.assertListEqual(output_ids[0].numpy().tolist() ,A )
74
0
"""simple docstring""" import torch from diffusers import StableDiffusionPipeline _UpperCamelCase = """path-to-your-trained-model""" _UpperCamelCase = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("""cuda""") _UpperCamelCase = """A photo of sks dog in a bucket""" _UpperCamelCase = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0] image.save("""dog-bucket.png""")
708
"""simple docstring""" from math import sqrt def _a ( _snake_case = 100_0000 ): """simple docstring""" UpperCAmelCase = 0 UpperCAmelCase = 0 UpperCAmelCase = 42 while num_cuboids <= limit: max_cuboid_size += 1 for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ): if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer(): num_cuboids += ( min(_snake_case , sum_shortest_sides // 2 ) - max(1 , sum_shortest_sides - max_cuboid_size ) + 1 ) return max_cuboid_size if __name__ == "__main__": print(F"""{solution() = }""")
74
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = { """s-JoL/Open-Llama-V1""": """https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json""", } class lowerCamelCase__ ( snake_case ): SCREAMING_SNAKE_CASE = '''open-llama''' def __init__( self ,A=100_000 ,A=4_096 ,A=11_008 ,A=32 ,A=32 ,A="silu" ,A=2_048 ,A=0.02 ,A=1e-6 ,A=True ,A=0 ,A=1 ,A=2 ,A=False ,A=True ,A=0.1 ,A=0.1 ,A=True ,A=True ,A=None ,**A ,): UpperCAmelCase = vocab_size UpperCAmelCase = max_position_embeddings UpperCAmelCase = hidden_size UpperCAmelCase = intermediate_size UpperCAmelCase = num_hidden_layers UpperCAmelCase = num_attention_heads UpperCAmelCase = hidden_act UpperCAmelCase = initializer_range UpperCAmelCase = rms_norm_eps UpperCAmelCase = use_cache UpperCAmelCase = kwargs.pop( """use_memorry_efficient_attention""" ,A ) UpperCAmelCase = hidden_dropout_prob UpperCAmelCase = attention_dropout_prob UpperCAmelCase = use_stable_embedding UpperCAmelCase = shared_input_output_embedding UpperCAmelCase = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=A ,bos_token_id=A ,eos_token_id=A ,tie_word_embeddings=A ,**A ,) def _UpperCamelCase ( self ): if self.rope_scaling is None: return if not isinstance(self.rope_scaling ,A ) or len(self.rope_scaling ) != 2: raise ValueError( """`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """ F'''got {self.rope_scaling}''' ) UpperCAmelCase = self.rope_scaling.get("""type""" ,A ) UpperCAmelCase = self.rope_scaling.get("""factor""" ,A ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(A ,A ) or rope_scaling_factor <= 1.0: raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
709
"""simple docstring""" import argparse import re import torch from CLAP import create_model from transformers import AutoFeatureExtractor, ClapConfig, ClapModel _UpperCamelCase = { """text_branch""": """text_model""", """audio_branch""": """audio_model.audio_encoder""", """attn""": """attention.self""", """self.proj""": """output.dense""", """attention.self_mask""": """attn_mask""", """mlp.fc1""": """intermediate.dense""", """mlp.fc2""": """output.dense""", """norm1""": """layernorm_before""", """norm2""": """layernorm_after""", """bn0""": """batch_norm""", } _UpperCamelCase = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""") def _a ( _snake_case , _snake_case=False ): """simple docstring""" UpperCAmelCase , UpperCAmelCase = create_model( """HTSAT-tiny""" , """roberta""" , _snake_case , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=_snake_case , fusion_type="""aff_2d""" if enable_fusion else None , ) return model, model_cfg def _a ( _snake_case ): """simple docstring""" UpperCAmelCase = {} UpperCAmelCase = R""".*sequential.(\d+).*""" UpperCAmelCase = R""".*_projection.(\d+).*""" for key, value in state_dict.items(): # check if any key needs to be modified for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: UpperCAmelCase = key.replace(_snake_case , _snake_case ) if re.match(_snake_case , _snake_case ): # replace sequential layers with list UpperCAmelCase = re.match(_snake_case , _snake_case ).group(1 ) UpperCAmelCase = key.replace(F'''sequential.{sequential_layer}.''' , F'''layers.{int(_snake_case )//3}.linear.''' ) elif re.match(_snake_case , _snake_case ): UpperCAmelCase = int(re.match(_snake_case , _snake_case ).group(1 ) ) # Because in CLAP they use `nn.Sequential`... UpperCAmelCase = 1 if projecton_layer == 0 else 2 UpperCAmelCase = key.replace(F'''_projection.{projecton_layer}.''' , F'''_projection.linear{transformers_projection_layer}.''' ) if "audio" and "qkv" in key: # split qkv into query key and value UpperCAmelCase = value UpperCAmelCase = mixed_qkv.size(0 ) // 3 UpperCAmelCase = mixed_qkv[:qkv_dim] UpperCAmelCase = mixed_qkv[qkv_dim : qkv_dim * 2] UpperCAmelCase = mixed_qkv[qkv_dim * 2 :] UpperCAmelCase = query_layer UpperCAmelCase = key_layer UpperCAmelCase = value_layer else: UpperCAmelCase = value return model_state_dict def _a ( _snake_case , _snake_case , _snake_case , _snake_case=False ): """simple docstring""" UpperCAmelCase , UpperCAmelCase = init_clap(_snake_case , enable_fusion=_snake_case ) clap_model.eval() UpperCAmelCase = clap_model.state_dict() UpperCAmelCase = rename_state_dict(_snake_case ) UpperCAmelCase = ClapConfig() UpperCAmelCase = enable_fusion UpperCAmelCase = ClapModel(_snake_case ) # ignore the spectrogram embedding layer model.load_state_dict(_snake_case , strict=_snake_case ) model.save_pretrained(_snake_case ) transformers_config.save_pretrained(_snake_case ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""") _UpperCamelCase = parser.parse_args() convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
74
0
"""simple docstring""" from __future__ import annotations _UpperCamelCase = tuple[int, int, int] _UpperCamelCase = tuple[str, str, str] # used alphabet -------------------------- # from string.ascii_uppercase _UpperCamelCase = """ABCDEFGHIJKLMNOPQRSTUVWXYZ""" # -------------------------- default selection -------------------------- # rotors -------------------------- _UpperCamelCase = """EGZWVONAHDCLFQMSIPJBYUKXTR""" _UpperCamelCase = """FOBHMDKEXQNRAULPGSJVTYICZW""" _UpperCamelCase = """ZJXESIUQLHAVRMDOYGTNFWPBKC""" # reflector -------------------------- _UpperCamelCase = { """A""": """N""", """N""": """A""", """B""": """O""", """O""": """B""", """C""": """P""", """P""": """C""", """D""": """Q""", """Q""": """D""", """E""": """R""", """R""": """E""", """F""": """S""", """S""": """F""", """G""": """T""", """T""": """G""", """H""": """U""", """U""": """H""", """I""": """V""", """V""": """I""", """J""": """W""", """W""": """J""", """K""": """X""", """X""": """K""", """L""": """Y""", """Y""": """L""", """M""": """Z""", """Z""": """M""", } # -------------------------- extra rotors -------------------------- _UpperCamelCase = """RMDJXFUWGISLHVTCQNKYPBEZOA""" _UpperCamelCase = """SGLCPQWZHKXAREONTFBVIYJUDM""" _UpperCamelCase = """HVSICLTYKQUBXDWAJZOMFGPREN""" _UpperCamelCase = """RZWQHFMVDBKICJLNTUXAGYPSOE""" _UpperCamelCase = """LFKIJODBEGAMQPXVUHYSTCZRWN""" _UpperCamelCase = """KOAEGVDHXPQZMLFTYWJNBRCIUS""" def _a ( _snake_case , _snake_case , _snake_case ): """simple docstring""" if (unique_rotsel := len(set(_snake_case ) )) < 3: UpperCAmelCase = F'''Please use 3 unique rotors (not {unique_rotsel})''' raise Exception(_snake_case ) # Checks if rotor positions are valid UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = rotpos if not 0 < rotorposa <= len(_snake_case ): UpperCAmelCase = F'''First rotor position is not within range of 1..26 ({rotorposa}''' raise ValueError(_snake_case ) if not 0 < rotorposa <= len(_snake_case ): UpperCAmelCase = F'''Second rotor position is not within range of 1..26 ({rotorposa})''' raise ValueError(_snake_case ) if not 0 < rotorposa <= len(_snake_case ): UpperCAmelCase = F'''Third rotor position is not within range of 1..26 ({rotorposa})''' raise ValueError(_snake_case ) # Validates string and returns dict UpperCAmelCase = _plugboard(_snake_case ) return rotpos, rotsel, pbdict def _a ( _snake_case ): """simple docstring""" if not isinstance(_snake_case , _snake_case ): UpperCAmelCase = F'''Plugboard setting isn\'t type string ({type(_snake_case )})''' raise TypeError(_snake_case ) elif len(_snake_case ) % 2 != 0: UpperCAmelCase = F'''Odd number of symbols ({len(_snake_case )})''' raise Exception(_snake_case ) elif pbstring == "": return {} pbstring.replace(""" """ , """""" ) # Checks if all characters are unique UpperCAmelCase = set() for i in pbstring: if i not in abc: UpperCAmelCase = F'''\'{i}\' not in list of symbols''' raise Exception(_snake_case ) elif i in tmppbl: UpperCAmelCase = F'''Duplicate symbol ({i})''' raise Exception(_snake_case ) else: tmppbl.add(_snake_case ) del tmppbl # Created the dictionary UpperCAmelCase = {} for j in range(0 , len(_snake_case ) - 1 , 2 ): UpperCAmelCase = pbstring[j + 1] UpperCAmelCase = pbstring[j] return pb def _a ( _snake_case , _snake_case , _snake_case = (rotora, rotora, rotora) , _snake_case = "" , ): """simple docstring""" UpperCAmelCase = text.upper() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = _validator( _snake_case , _snake_case , plugb.upper() ) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = rotor_position UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = rotor_selection rotorposa -= 1 rotorposa -= 1 rotorposa -= 1 UpperCAmelCase = [] # encryption/decryption process -------------------------- for symbol in text: if symbol in abc: # 1st plugboard -------------------------- if symbol in plugboard: UpperCAmelCase = plugboard[symbol] # rotor ra -------------------------- UpperCAmelCase = abc.index(_snake_case ) + rotorposa UpperCAmelCase = rotora[index % len(_snake_case )] # rotor rb -------------------------- UpperCAmelCase = abc.index(_snake_case ) + rotorposa UpperCAmelCase = rotora[index % len(_snake_case )] # rotor rc -------------------------- UpperCAmelCase = abc.index(_snake_case ) + rotorposa UpperCAmelCase = rotora[index % len(_snake_case )] # reflector -------------------------- # this is the reason you don't need another machine to decipher UpperCAmelCase = reflector[symbol] # 2nd rotors UpperCAmelCase = abc[rotora.index(_snake_case ) - rotorposa] UpperCAmelCase = abc[rotora.index(_snake_case ) - rotorposa] UpperCAmelCase = abc[rotora.index(_snake_case ) - rotorposa] # 2nd plugboard if symbol in plugboard: UpperCAmelCase = plugboard[symbol] # moves/resets rotor positions rotorposa += 1 if rotorposa >= len(_snake_case ): UpperCAmelCase = 0 rotorposa += 1 if rotorposa >= len(_snake_case ): UpperCAmelCase = 0 rotorposa += 1 if rotorposa >= len(_snake_case ): UpperCAmelCase = 0 # else: # pass # Error could be also raised # raise ValueError( # 'Invalid symbol('+repr(symbol)+')') result.append(_snake_case ) return "".join(_snake_case ) if __name__ == "__main__": _UpperCamelCase = """This is my Python script that emulates the Enigma machine from WWII.""" _UpperCamelCase = (1, 1, 1) _UpperCamelCase = """pictures""" _UpperCamelCase = (rotora, rotora, rotora) _UpperCamelCase = enigma(message, rotor_pos, rotor_sel, pb) print("""Encrypted message:""", en) print("""Decrypted message:""", enigma(en, rotor_pos, rotor_sel, pb))
710
"""simple docstring""" # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. _UpperCamelCase = abspath(join(dirname(dirname(__file__)), """src""")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="""ignore""", category=FutureWarning) def _a ( _snake_case ): """simple docstring""" from diffusers.utils.testing_utils import pytest_addoption_shared pytest_addoption_shared(_snake_case ) def _a ( _snake_case ): """simple docstring""" from diffusers.utils.testing_utils import pytest_terminal_summary_main UpperCAmelCase = terminalreporter.config.getoption("""--make-reports""" ) if make_reports: pytest_terminal_summary_main(_snake_case , id=_snake_case )
74
0
"""simple docstring""" def _a ( _snake_case , _snake_case ): """simple docstring""" UpperCAmelCase = 0 while b > 0: if b & 1: res += a a += a b >>= 1 return res def _a ( _snake_case , _snake_case , _snake_case ): """simple docstring""" UpperCAmelCase = 0 while b > 0: if b & 1: UpperCAmelCase = ((res % c) + (a % c)) % c a += a b >>= 1 return res
711
"""simple docstring""" from __future__ import annotations from collections.abc import MutableSequence class lowerCamelCase__ : def __init__( self ,A ,A ): if len(A ) != degree + 1: raise ValueError( """The number of coefficients should be equal to the degree + 1.""" ) UpperCAmelCase = list(A ) UpperCAmelCase = degree def __add__( self ,A ): if self.degree > polynomial_a.degree: UpperCAmelCase = self.coefficients[:] for i in range(polynomial_a.degree + 1 ): coefficients[i] += polynomial_a.coefficients[i] return Polynomial(self.degree ,A ) else: UpperCAmelCase = polynomial_a.coefficients[:] for i in range(self.degree + 1 ): coefficients[i] += self.coefficients[i] return Polynomial(polynomial_a.degree ,A ) def __sub__( self ,A ): return self + polynomial_a * Polynomial(0 ,[-1] ) def __neg__( self ): return Polynomial(self.degree ,[-c for c in self.coefficients] ) def __mul__( self ,A ): UpperCAmelCase = [0] * (self.degree + polynomial_a.degree + 1) for i in range(self.degree + 1 ): for j in range(polynomial_a.degree + 1 ): coefficients[i + j] += ( self.coefficients[i] * polynomial_a.coefficients[j] ) return Polynomial(self.degree + polynomial_a.degree ,A ) def _UpperCamelCase ( self ,A ): UpperCAmelCase = 0 for i in range(self.degree + 1 ): result += self.coefficients[i] * (substitution**i) return result def __str__( self ): UpperCAmelCase = """""" for i in range(self.degree ,-1 ,-1 ): if self.coefficients[i] == 0: continue elif self.coefficients[i] > 0: if polynomial: polynomial += " + " else: polynomial += " - " if i == 0: polynomial += str(abs(self.coefficients[i] ) ) elif i == 1: polynomial += str(abs(self.coefficients[i] ) ) + "x" else: polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(A ) return polynomial def __repr__( self ): return self.__str__() def _UpperCamelCase ( self ): UpperCAmelCase = [0] * self.degree for i in range(self.degree ): UpperCAmelCase = self.coefficients[i + 1] * (i + 1) return Polynomial(self.degree - 1 ,A ) def _UpperCamelCase ( self ,A = 0 ): UpperCAmelCase = [0] * (self.degree + 2) UpperCAmelCase = constant for i in range(self.degree + 1 ): UpperCAmelCase = self.coefficients[i] / (i + 1) return Polynomial(self.degree + 1 ,A ) def __eq__( self ,A ): if not isinstance(A ,A ): return False if self.degree != polynomial_a.degree: return False for i in range(self.degree + 1 ): if self.coefficients[i] != polynomial_a.coefficients[i]: return False return True def __ne__( self ,A ): return not self.__eq__(A )
74
0
"""simple docstring""" import copy import os from typing import TYPE_CHECKING, List, Union if TYPE_CHECKING: pass from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = { """kakaobrain/align-base""": """https://huggingface.co/kakaobrain/align-base/resolve/main/config.json""", } class lowerCamelCase__ ( snake_case ): SCREAMING_SNAKE_CASE = '''align_text_model''' def __init__( self ,A=30_522 ,A=768 ,A=12 ,A=12 ,A=3_072 ,A="gelu" ,A=0.1 ,A=0.1 ,A=512 ,A=2 ,A=0.02 ,A=1e-1_2 ,A=0 ,A="absolute" ,A=True ,**A ,): super().__init__(**A ) UpperCAmelCase = vocab_size UpperCAmelCase = hidden_size UpperCAmelCase = num_hidden_layers UpperCAmelCase = num_attention_heads UpperCAmelCase = hidden_act UpperCAmelCase = intermediate_size UpperCAmelCase = hidden_dropout_prob UpperCAmelCase = attention_probs_dropout_prob UpperCAmelCase = max_position_embeddings UpperCAmelCase = type_vocab_size UpperCAmelCase = initializer_range UpperCAmelCase = layer_norm_eps UpperCAmelCase = position_embedding_type UpperCAmelCase = use_cache UpperCAmelCase = pad_token_id @classmethod def _UpperCamelCase ( cls ,A ,**A ): cls._set_token_in_kwargs(A ) UpperCAmelCase , UpperCAmelCase = cls.get_config_dict(A ,**A ) # get the text config dict if we are loading from AlignConfig if config_dict.get("""model_type""" ) == "align": UpperCAmelCase = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(A ,**A ) class lowerCamelCase__ ( snake_case ): SCREAMING_SNAKE_CASE = '''align_vision_model''' def __init__( self ,A = 3 ,A = 600 ,A = 2.0 ,A = 3.1 ,A = 8 ,A = [3, 3, 5, 3, 5, 5, 3] ,A = [32, 16, 24, 40, 80, 112, 192] ,A = [16, 24, 40, 80, 112, 192, 320] ,A = [] ,A = [1, 2, 2, 2, 1, 2, 1] ,A = [1, 2, 2, 3, 3, 4, 1] ,A = [1, 6, 6, 6, 6, 6, 6] ,A = 0.25 ,A = "swish" ,A = 2_560 ,A = "mean" ,A = 0.02 ,A = 0.001 ,A = 0.99 ,A = 0.2 ,**A ,): super().__init__(**A ) UpperCAmelCase = num_channels UpperCAmelCase = image_size UpperCAmelCase = width_coefficient UpperCAmelCase = depth_coefficient UpperCAmelCase = depth_divisor UpperCAmelCase = kernel_sizes UpperCAmelCase = in_channels UpperCAmelCase = out_channels UpperCAmelCase = depthwise_padding UpperCAmelCase = strides UpperCAmelCase = num_block_repeats UpperCAmelCase = expand_ratios UpperCAmelCase = squeeze_expansion_ratio UpperCAmelCase = hidden_act UpperCAmelCase = hidden_dim UpperCAmelCase = pooling_type UpperCAmelCase = initializer_range UpperCAmelCase = batch_norm_eps UpperCAmelCase = batch_norm_momentum UpperCAmelCase = drop_connect_rate UpperCAmelCase = sum(A ) * 4 @classmethod def _UpperCamelCase ( cls ,A ,**A ): cls._set_token_in_kwargs(A ) UpperCAmelCase , UpperCAmelCase = cls.get_config_dict(A ,**A ) # get the vision config dict if we are loading from AlignConfig if config_dict.get("""model_type""" ) == "align": UpperCAmelCase = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(A ,**A ) class lowerCamelCase__ ( snake_case ): SCREAMING_SNAKE_CASE = '''align''' SCREAMING_SNAKE_CASE = True def __init__( self ,A=None ,A=None ,A=640 ,A=1.0 ,A=0.02 ,**A ,): super().__init__(**A ) if text_config is None: UpperCAmelCase = {} logger.info("""text_config is None. Initializing the AlignTextConfig with default values.""" ) if vision_config is None: UpperCAmelCase = {} logger.info("""vision_config is None. Initializing the AlignVisionConfig with default values.""" ) UpperCAmelCase = AlignTextConfig(**A ) UpperCAmelCase = AlignVisionConfig(**A ) UpperCAmelCase = projection_dim UpperCAmelCase = temperature_init_value UpperCAmelCase = initializer_range @classmethod def _UpperCamelCase ( cls ,A ,A ,**A ): return cls(text_config=text_config.to_dict() ,vision_config=vision_config.to_dict() ,**A ) def _UpperCamelCase ( self ): UpperCAmelCase = copy.deepcopy(self.__dict__ ) UpperCAmelCase = self.text_config.to_dict() UpperCAmelCase = self.vision_config.to_dict() UpperCAmelCase = self.__class__.model_type return output
712
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy as np import tensorflow as tf from transformers import ( TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST, FlaubertConfig, TFFlaubertForMultipleChoice, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForSequenceClassification, TFFlaubertForTokenClassification, TFFlaubertModel, TFFlaubertWithLMHeadModel, ) class lowerCamelCase__ : def __init__( self ,A ,): UpperCAmelCase = parent UpperCAmelCase = 13 UpperCAmelCase = 7 UpperCAmelCase = True UpperCAmelCase = True UpperCAmelCase = True UpperCAmelCase = True UpperCAmelCase = True UpperCAmelCase = False UpperCAmelCase = False UpperCAmelCase = False UpperCAmelCase = 2 UpperCAmelCase = 99 UpperCAmelCase = 0 UpperCAmelCase = 32 UpperCAmelCase = 2 UpperCAmelCase = 4 UpperCAmelCase = 0.1 UpperCAmelCase = 0.1 UpperCAmelCase = 512 UpperCAmelCase = 16 UpperCAmelCase = 2 UpperCAmelCase = 0.02 UpperCAmelCase = 3 UpperCAmelCase = 4 UpperCAmelCase = """last""" UpperCAmelCase = True UpperCAmelCase = None UpperCAmelCase = 0 def _UpperCamelCase ( self ): UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ,dtype=tf.floataa ) UpperCAmelCase = None if self.use_input_lengths: UpperCAmelCase = ( ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length UpperCAmelCase = None if self.use_token_type_ids: UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs ) UpperCAmelCase = None UpperCAmelCase = None UpperCAmelCase = None if self.use_labels: UpperCAmelCase = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) UpperCAmelCase = ids_tensor([self.batch_size] ,2 ,dtype=tf.floataa ) UpperCAmelCase = ids_tensor([self.batch_size] ,self.num_choices ) UpperCAmelCase = FlaubertConfig( vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,bos_token_id=self.bos_token_id ,) return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,): UpperCAmelCase = TFFlaubertModel(config=A ) UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids} UpperCAmelCase = model(A ) UpperCAmelCase = [input_ids, input_mask] UpperCAmelCase = model(A ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,): UpperCAmelCase = TFFlaubertWithLMHeadModel(A ) UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids} UpperCAmelCase = model(A ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,): UpperCAmelCase = TFFlaubertForQuestionAnsweringSimple(A ) UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths} UpperCAmelCase = model(A ) self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,): UpperCAmelCase = TFFlaubertForSequenceClassification(A ) UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths} UpperCAmelCase = model(A ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,): UpperCAmelCase = self.num_labels UpperCAmelCase = TFFlaubertForTokenClassification(config=A ) UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} UpperCAmelCase = model(A ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,): UpperCAmelCase = self.num_choices UpperCAmelCase = TFFlaubertForMultipleChoice(config=A ) UpperCAmelCase = tf.tile(tf.expand_dims(A ,1 ) ,(1, self.num_choices, 1) ) UpperCAmelCase = tf.tile(tf.expand_dims(A ,1 ) ,(1, self.num_choices, 1) ) UpperCAmelCase = tf.tile(tf.expand_dims(A ,1 ) ,(1, self.num_choices, 1) ) UpperCAmelCase = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, """token_type_ids""": multiple_choice_token_type_ids, } UpperCAmelCase = model(A ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) ) def _UpperCamelCase ( self ): UpperCAmelCase = self.prepare_config_and_inputs() ( ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ) = config_and_inputs UpperCAmelCase = { """input_ids""": input_ids, """token_type_ids""": token_type_ids, """langs""": token_type_ids, """lengths""": input_lengths, } return config, inputs_dict @require_tf class lowerCamelCase__ ( snake_case , snake_case , unittest.TestCase ): SCREAMING_SNAKE_CASE = ( ( TFFlaubertModel, TFFlaubertWithLMHeadModel, TFFlaubertForSequenceClassification, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForTokenClassification, TFFlaubertForMultipleChoice, ) if is_tf_available() else () ) SCREAMING_SNAKE_CASE = ( (TFFlaubertWithLMHeadModel,) if is_tf_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable SCREAMING_SNAKE_CASE = ( { '''feature-extraction''': TFFlaubertModel, '''fill-mask''': TFFlaubertWithLMHeadModel, '''question-answering''': TFFlaubertForQuestionAnsweringSimple, '''text-classification''': TFFlaubertForSequenceClassification, '''token-classification''': TFFlaubertForTokenClassification, '''zero-shot''': TFFlaubertForSequenceClassification, } if is_tf_available() else {} ) SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False def _UpperCamelCase ( self ,A ,A ,A ,A ,A ): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("""Fast""" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def _UpperCamelCase ( self ): UpperCAmelCase = TFFlaubertModelTester(self ) UpperCAmelCase = ConfigTester(self ,config_class=A ,emb_dim=37 ) def _UpperCamelCase ( self ): self.config_tester.run_common_tests() def _UpperCamelCase ( self ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*A ) def _UpperCamelCase ( self ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*A ) def _UpperCamelCase ( self ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*A ) def _UpperCamelCase ( self ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*A ) def _UpperCamelCase ( self ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_token_classification(*A ) def _UpperCamelCase ( self ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_multiple_choice(*A ) @slow def _UpperCamelCase ( self ): for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase = TFFlaubertModel.from_pretrained(A ) self.assertIsNotNone(A ) @require_tf @require_sentencepiece @require_tokenizers class lowerCamelCase__ ( unittest.TestCase ): @slow def _UpperCamelCase ( self ): UpperCAmelCase = TFFlaubertModel.from_pretrained("""jplu/tf-flaubert-small-cased""" ) UpperCAmelCase = tf.convert_to_tensor( [[0, 158, 735, 2_592, 1_424, 6_727, 82, 1]] ,dtype=tf.intaa ,) # "J'aime flaubert !" UpperCAmelCase = model(A )[0] UpperCAmelCase = tf.TensorShape((1, 8, 512) ) self.assertEqual(output.shape ,A ) # compare the actual values for a slice. UpperCAmelCase = tf.convert_to_tensor( [ [ [-1.8768773, -1.566555, 0.27072418], [-1.6920038, -0.5873505, 1.9329599], [-2.9563985, -1.6993835, 1.7972052], ] ] ,dtype=tf.floataa ,) self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
74
0
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_torch, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MgpstrProcessor, ViTImageProcessor @require_torch @require_vision class lowerCamelCase__ ( unittest.TestCase ): SCREAMING_SNAKE_CASE = ViTImageProcessor if is_vision_available() else None @property def _UpperCamelCase ( self ): return self.image_processor_tester.prepare_image_processor_dict() def _UpperCamelCase ( self ): UpperCAmelCase = (3, 32, 128) UpperCAmelCase = tempfile.mkdtemp() # fmt: off UpperCAmelCase = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""] # fmt: on UpperCAmelCase = dict(zip(A ,range(len(A ) ) ) ) UpperCAmelCase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write(json.dumps(A ) + """\n""" ) UpperCAmelCase = { """do_normalize""": False, """do_resize""": True, """image_processor_type""": """ViTImageProcessor""", """resample""": 3, """size""": {"""height""": 32, """width""": 128}, } UpperCAmelCase = os.path.join(self.tmpdirname ,A ) with open(self.image_processor_file ,"""w""" ,encoding="""utf-8""" ) as fp: json.dump(A ,A ) def _UpperCamelCase ( self ,**A ): return MgpstrTokenizer.from_pretrained(self.tmpdirname ,**A ) def _UpperCamelCase ( self ,**A ): return ViTImageProcessor.from_pretrained(self.tmpdirname ,**A ) def _UpperCamelCase ( self ): shutil.rmtree(self.tmpdirname ) def _UpperCamelCase ( self ): UpperCAmelCase = np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta ) UpperCAmelCase = Image.fromarray(np.moveaxis(A ,0 ,-1 ) ) return image_input def _UpperCamelCase ( self ): UpperCAmelCase = self.get_tokenizer() UpperCAmelCase = self.get_image_processor() UpperCAmelCase = MgpstrProcessor(tokenizer=A ,image_processor=A ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase = MgpstrProcessor.from_pretrained(self.tmpdirname ,use_fast=A ) self.assertEqual(processor.char_tokenizer.get_vocab() ,tokenizer.get_vocab() ) self.assertIsInstance(processor.char_tokenizer ,A ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor ,A ) def _UpperCamelCase ( self ): UpperCAmelCase = self.get_tokenizer() UpperCAmelCase = self.get_image_processor() UpperCAmelCase = MgpstrProcessor(tokenizer=A ,image_processor=A ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" ) UpperCAmelCase = self.get_image_processor(do_normalize=A ,padding_value=1.0 ) UpperCAmelCase = MgpstrProcessor.from_pretrained( self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=A ,padding_value=1.0 ) self.assertEqual(processor.char_tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.char_tokenizer ,A ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,A ) def _UpperCamelCase ( self ): UpperCAmelCase = self.get_image_processor() UpperCAmelCase = self.get_tokenizer() UpperCAmelCase = MgpstrProcessor(tokenizer=A ,image_processor=A ) UpperCAmelCase = self.prepare_image_inputs() UpperCAmelCase = image_processor(A ,return_tensors="""np""" ) UpperCAmelCase = processor(images=A ,return_tensors="""np""" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1e-2 ) def _UpperCamelCase ( self ): UpperCAmelCase = self.get_image_processor() UpperCAmelCase = self.get_tokenizer() UpperCAmelCase = MgpstrProcessor(tokenizer=A ,image_processor=A ) UpperCAmelCase = """test""" UpperCAmelCase = processor(text=A ) UpperCAmelCase = tokenizer(A ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def _UpperCamelCase ( self ): UpperCAmelCase = self.get_image_processor() UpperCAmelCase = self.get_tokenizer() UpperCAmelCase = MgpstrProcessor(tokenizer=A ,image_processor=A ) UpperCAmelCase = """test""" UpperCAmelCase = self.prepare_image_inputs() UpperCAmelCase = processor(text=A ,images=A ) self.assertListEqual(list(inputs.keys() ) ,["""pixel_values""", """labels"""] ) # test if it raises when no input is passed with pytest.raises(A ): processor() def _UpperCamelCase ( self ): UpperCAmelCase = self.get_image_processor() UpperCAmelCase = self.get_tokenizer() UpperCAmelCase = MgpstrProcessor(tokenizer=A ,image_processor=A ) UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]] UpperCAmelCase = processor.char_decode(A ) UpperCAmelCase = tokenizer.batch_decode(A ) UpperCAmelCase = [seq.replace(""" """ ,"""""" ) for seq in decoded_tok] self.assertListEqual(A ,A ) def _UpperCamelCase ( self ): UpperCAmelCase = self.get_image_processor() UpperCAmelCase = self.get_tokenizer() UpperCAmelCase = MgpstrProcessor(tokenizer=A ,image_processor=A ) UpperCAmelCase = None UpperCAmelCase = self.prepare_image_inputs() UpperCAmelCase = processor(text=A ,images=A ) self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names ) def _UpperCamelCase ( self ): UpperCAmelCase = self.get_image_processor() UpperCAmelCase = self.get_tokenizer() UpperCAmelCase = MgpstrProcessor(tokenizer=A ,image_processor=A ) UpperCAmelCase = torch.randn(1 ,27 ,38 ) UpperCAmelCase = torch.randn(1 ,27 ,50_257 ) UpperCAmelCase = torch.randn(1 ,27 ,30_522 ) UpperCAmelCase = processor.batch_decode([char_input, bpe_input, wp_input] ) self.assertListEqual(list(results.keys() ) ,["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""] )
713
"""simple docstring""" import argparse from collections import OrderedDict from pathlib import Path import requests import torch from PIL import Image from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor from transformers.utils import logging logging.set_verbosity_info() _UpperCamelCase = logging.get_logger(__name__) def _a ( _snake_case ): """simple docstring""" UpperCAmelCase = OrderedDict() for key, value in state_dict.items(): if key.startswith("""module.encoder""" ): UpperCAmelCase = key.replace("""module.encoder""" , """glpn.encoder""" ) if key.startswith("""module.decoder""" ): UpperCAmelCase = key.replace("""module.decoder""" , """decoder.stages""" ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 UpperCAmelCase = key[key.find("""patch_embed""" ) + len("""patch_embed""" )] UpperCAmelCase = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(_snake_case )-1}''' ) if "norm" in key: UpperCAmelCase = key.replace("""norm""" , """layer_norm""" ) if "glpn.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 UpperCAmelCase = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )] UpperCAmelCase = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(_snake_case )-1}''' ) if "layer_norm1" in key: UpperCAmelCase = key.replace("""layer_norm1""" , """layer_norm_1""" ) if "layer_norm2" in key: UpperCAmelCase = key.replace("""layer_norm2""" , """layer_norm_2""" ) if "block" in key: # replace for example block1 by block.0 UpperCAmelCase = key[key.find("""block""" ) + len("""block""" )] UpperCAmelCase = key.replace(F'''block{idx}''' , F'''block.{int(_snake_case )-1}''' ) if "attn.q" in key: UpperCAmelCase = key.replace("""attn.q""" , """attention.self.query""" ) if "attn.proj" in key: UpperCAmelCase = key.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in key: UpperCAmelCase = key.replace("""attn""" , """attention.self""" ) if "fc1" in key: UpperCAmelCase = key.replace("""fc1""" , """dense1""" ) if "fc2" in key: UpperCAmelCase = key.replace("""fc2""" , """dense2""" ) if "linear_pred" in key: UpperCAmelCase = key.replace("""linear_pred""" , """classifier""" ) if "linear_fuse" in key: UpperCAmelCase = key.replace("""linear_fuse.conv""" , """linear_fuse""" ) UpperCAmelCase = key.replace("""linear_fuse.bn""" , """batch_norm""" ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 UpperCAmelCase = key[key.find("""linear_c""" ) + len("""linear_c""" )] UpperCAmelCase = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(_snake_case )-1}''' ) if "bot_conv" in key: UpperCAmelCase = key.replace("""bot_conv""" , """0.convolution""" ) if "skip_conv1" in key: UpperCAmelCase = key.replace("""skip_conv1""" , """1.convolution""" ) if "skip_conv2" in key: UpperCAmelCase = key.replace("""skip_conv2""" , """2.convolution""" ) if "fusion1" in key: UpperCAmelCase = key.replace("""fusion1""" , """1.fusion""" ) if "fusion2" in key: UpperCAmelCase = key.replace("""fusion2""" , """2.fusion""" ) if "fusion3" in key: UpperCAmelCase = key.replace("""fusion3""" , """3.fusion""" ) if "fusion" in key and "conv" in key: UpperCAmelCase = key.replace("""conv""" , """convolutional_layer""" ) if key.startswith("""module.last_layer_depth""" ): UpperCAmelCase = key.replace("""module.last_layer_depth""" , """head.head""" ) UpperCAmelCase = value return new_state_dict def _a ( _snake_case , _snake_case ): """simple docstring""" for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) UpperCAmelCase = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' ) UpperCAmelCase = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' ) # next, add keys and values (in that order) to the state dict UpperCAmelCase = kv_weight[ : config.hidden_sizes[i], : ] UpperCAmelCase = kv_bias[: config.hidden_sizes[i]] UpperCAmelCase = kv_weight[ config.hidden_sizes[i] :, : ] UpperCAmelCase = kv_bias[config.hidden_sizes[i] :] def _a ( ): """simple docstring""" UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" UpperCAmelCase = Image.open(requests.get(_snake_case , stream=_snake_case ).raw ) return image @torch.no_grad() def _a ( _snake_case , _snake_case , _snake_case=False , _snake_case=None ): """simple docstring""" UpperCAmelCase = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] ) # load image processor (only resize + rescale) UpperCAmelCase = GLPNImageProcessor() # prepare image UpperCAmelCase = prepare_img() UpperCAmelCase = image_processor(images=_snake_case , return_tensors="""pt""" ).pixel_values logger.info("""Converting model...""" ) # load original state dict UpperCAmelCase = torch.load(_snake_case , map_location=torch.device("""cpu""" ) ) # rename keys UpperCAmelCase = rename_keys(_snake_case ) # key and value matrices need special treatment read_in_k_v(_snake_case , _snake_case ) # create HuggingFace model and load state dict UpperCAmelCase = GLPNForDepthEstimation(_snake_case ) model.load_state_dict(_snake_case ) model.eval() # forward pass UpperCAmelCase = model(_snake_case ) UpperCAmelCase = outputs.predicted_depth # verify output if model_name is not None: if "nyu" in model_name: UpperCAmelCase = torch.tensor( [[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] ) elif "kitti" in model_name: UpperCAmelCase = torch.tensor( [[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] ) else: raise ValueError(F'''Unknown model name: {model_name}''' ) UpperCAmelCase = torch.Size([1, 480, 640] ) assert predicted_depth.shape == expected_shape assert torch.allclose(predicted_depth[0, :3, :3] , _snake_case , atol=1E-4 ) print("""Looks ok!""" ) # finally, push to hub if required if push_to_hub: logger.info("""Pushing model and image processor to the hub...""" ) model.push_to_hub( repo_path_or_name=Path(_snake_case , _snake_case ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=_snake_case , ) image_processor.push_to_hub( repo_path_or_name=Path(_snake_case , _snake_case ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=_snake_case , ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() parser.add_argument( """--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file).""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub.""" ) parser.add_argument( """--model_name""", default="""glpn-kitti""", type=str, help="""Name of the model in case you're pushing to the hub.""", ) _UpperCamelCase = parser.parse_args() convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
74
0
"""simple docstring""" from __future__ import annotations from typing import Dict from ...configuration_utils import PretrainedConfig _UpperCamelCase = { """susnato/ernie-m-base_pytorch""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json""", """susnato/ernie-m-large_pytorch""": """https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json""", } class lowerCamelCase__ ( snake_case ): SCREAMING_SNAKE_CASE = '''ernie_m''' SCREAMING_SNAKE_CASE = {'''dropout''': '''classifier_dropout''', '''num_classes''': '''num_labels'''} def __init__( self ,A = 250_002 ,A = 768 ,A = 12 ,A = 12 ,A = 3_072 ,A = "gelu" ,A = 0.1 ,A = 0.1 ,A = 514 ,A = 0.02 ,A = 1 ,A = 1e-0_5 ,A=None ,A=False ,A=0.0 ,**A ,): super().__init__(pad_token_id=A ,**A ) UpperCAmelCase = vocab_size UpperCAmelCase = hidden_size UpperCAmelCase = num_hidden_layers UpperCAmelCase = num_attention_heads UpperCAmelCase = intermediate_size UpperCAmelCase = hidden_act UpperCAmelCase = hidden_dropout_prob UpperCAmelCase = attention_probs_dropout_prob UpperCAmelCase = max_position_embeddings UpperCAmelCase = initializer_range UpperCAmelCase = layer_norm_eps UpperCAmelCase = classifier_dropout UpperCAmelCase = is_decoder UpperCAmelCase = act_dropout
714
"""simple docstring""" def _a ( _snake_case ): # noqa: E741 """simple docstring""" UpperCAmelCase = len(_snake_case ) UpperCAmelCase = 0 UpperCAmelCase = [0] * n UpperCAmelCase = [False] * n UpperCAmelCase = [False] * n def dfs(_snake_case , _snake_case , _snake_case , _snake_case ): if parent == root: out_edge_count += 1 UpperCAmelCase = True UpperCAmelCase = at for to in l[at]: if to == parent: pass elif not visited[to]: UpperCAmelCase = dfs(_snake_case , _snake_case , _snake_case , _snake_case ) UpperCAmelCase = min(low[at] , low[to] ) # AP found via bridge if at < low[to]: UpperCAmelCase = True # AP found via cycle if at == low[to]: UpperCAmelCase = True else: UpperCAmelCase = min(low[at] , _snake_case ) return out_edge_count for i in range(_snake_case ): if not visited[i]: UpperCAmelCase = 0 UpperCAmelCase = dfs(_snake_case , _snake_case , -1 , _snake_case ) UpperCAmelCase = out_edge_count > 1 for x in range(len(_snake_case ) ): if is_art[x] is True: print(_snake_case ) # Adjacency list of graph _UpperCamelCase = { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], 3: [2, 4], 4: [3], 5: [2, 6, 8], 6: [5, 7], 7: [6, 8], 8: [5, 7], } compute_ap(data)
74
0
"""simple docstring""" class lowerCamelCase__ : def __init__( self ): UpperCAmelCase = {} # Mapping from char to TrieNode UpperCAmelCase = False def _UpperCamelCase ( self ,A ): for word in words: self.insert(A ) def _UpperCamelCase ( self ,A ): UpperCAmelCase = self for char in word: if char not in curr.nodes: UpperCAmelCase = TrieNode() UpperCAmelCase = curr.nodes[char] UpperCAmelCase = True def _UpperCamelCase ( self ,A ): UpperCAmelCase = self for char in word: if char not in curr.nodes: return False UpperCAmelCase = curr.nodes[char] return curr.is_leaf def _UpperCamelCase ( self ,A ): def _delete(A ,A ,A ) -> bool: if index == len(A ): # If word does not exist if not curr.is_leaf: return False UpperCAmelCase = False return len(curr.nodes ) == 0 UpperCAmelCase = word[index] UpperCAmelCase = curr.nodes.get(A ) # If char not in current trie node if not char_node: return False # Flag to check if node can be deleted UpperCAmelCase = _delete(A ,A ,index + 1 ) if delete_curr: del curr.nodes[char] return len(curr.nodes ) == 0 return delete_curr _delete(self ,A ,0 ) def _a ( _snake_case , _snake_case ): """simple docstring""" if node.is_leaf: print(_snake_case , end=""" """ ) for key, value in node.nodes.items(): print_words(_snake_case , word + key ) def _a ( ): """simple docstring""" UpperCAmelCase = """banana bananas bandana band apple all beast""".split() UpperCAmelCase = TrieNode() root.insert_many(_snake_case ) # print_words(root, "") assert all(root.find(_snake_case ) for word in words ) assert root.find("""banana""" ) assert not root.find("""bandanas""" ) assert not root.find("""apps""" ) assert root.find("""apple""" ) assert root.find("""all""" ) root.delete("""all""" ) assert not root.find("""all""" ) root.delete("""banana""" ) assert not root.find("""banana""" ) assert root.find("""bananas""" ) return True def _a ( _snake_case , _snake_case ): """simple docstring""" print(str(_snake_case ) , """works!""" if passes else """doesn't work :(""" ) def _a ( ): """simple docstring""" assert test_trie() def _a ( ): """simple docstring""" print_results("""Testing trie functionality""" , test_trie() ) if __name__ == "__main__": main()
715
"""simple docstring""" _UpperCamelCase = """ # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git """ _UpperCamelCase = [{"""type""": """code""", """content""": INSTALL_CONTENT}] _UpperCamelCase = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
74
0
"""simple docstring""" def _a ( _snake_case ): """simple docstring""" if not isinstance(_snake_case , _snake_case ): raise ValueError("""Input must be an integer""" ) if input_num <= 0: raise ValueError("""Input must be positive""" ) return sum( divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 ) if __name__ == "__main__": import doctest doctest.testmod()
716
"""simple docstring""" import argparse import struct import unittest class lowerCamelCase__ : def __init__( self ,A ): UpperCAmelCase = data # Initialize hash values UpperCAmelCase = [ 0x6A_09_E6_67, 0xBB_67_AE_85, 0x3C_6E_F3_72, 0xA5_4F_F5_3A, 0x51_0E_52_7F, 0x9B_05_68_8C, 0x1F_83_D9_AB, 0x5B_E0_CD_19, ] # Initialize round constants UpperCAmelCase = [ 0x42_8A_2F_98, 0x71_37_44_91, 0xB5_C0_FB_CF, 0xE9_B5_DB_A5, 0x39_56_C2_5B, 0x59_F1_11_F1, 0x92_3F_82_A4, 0xAB_1C_5E_D5, 0xD8_07_AA_98, 0x12_83_5B_01, 0x24_31_85_BE, 0x55_0C_7D_C3, 0x72_BE_5D_74, 0x80_DE_B1_FE, 0x9B_DC_06_A7, 0xC1_9B_F1_74, 0xE4_9B_69_C1, 0xEF_BE_47_86, 0x0F_C1_9D_C6, 0x24_0C_A1_CC, 0x2D_E9_2C_6F, 0x4A_74_84_AA, 0x5C_B0_A9_DC, 0x76_F9_88_DA, 0x98_3E_51_52, 0xA8_31_C6_6D, 0xB0_03_27_C8, 0xBF_59_7F_C7, 0xC6_E0_0B_F3, 0xD5_A7_91_47, 0x06_CA_63_51, 0x14_29_29_67, 0x27_B7_0A_85, 0x2E_1B_21_38, 0x4D_2C_6D_FC, 0x53_38_0D_13, 0x65_0A_73_54, 0x76_6A_0A_BB, 0x81_C2_C9_2E, 0x92_72_2C_85, 0xA2_BF_E8_A1, 0xA8_1A_66_4B, 0xC2_4B_8B_70, 0xC7_6C_51_A3, 0xD1_92_E8_19, 0xD6_99_06_24, 0xF4_0E_35_85, 0x10_6A_A0_70, 0x19_A4_C1_16, 0x1E_37_6C_08, 0x27_48_77_4C, 0x34_B0_BC_B5, 0x39_1C_0C_B3, 0x4E_D8_AA_4A, 0x5B_9C_CA_4F, 0x68_2E_6F_F3, 0x74_8F_82_EE, 0x78_A5_63_6F, 0x84_C8_78_14, 0x8C_C7_02_08, 0x90_BE_FF_FA, 0xA4_50_6C_EB, 0xBE_F9_A3_F7, 0xC6_71_78_F2, ] UpperCAmelCase = self.preprocessing(self.data ) self.final_hash() @staticmethod def _UpperCamelCase ( A ): UpperCAmelCase = b"""\x80""" + (b"""\x00""" * (63 - (len(A ) + 8) % 64)) UpperCAmelCase = struct.pack(""">Q""" ,(len(A ) * 8) ) return data + padding + big_endian_integer def _UpperCamelCase ( self ): # Convert into blocks of 64 bytes UpperCAmelCase = [ self.preprocessed_data[x : x + 64] for x in range(0 ,len(self.preprocessed_data ) ,64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers UpperCAmelCase = list(struct.unpack(""">16L""" ,A ) ) # add 48 0-ed integers words += [0] * 48 UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self.hashes for index in range(0 ,64 ): if index > 15: # modify the zero-ed indexes at the end of the array UpperCAmelCase = ( self.ror(words[index - 15] ,7 ) ^ self.ror(words[index - 15] ,18 ) ^ (words[index - 15] >> 3) ) UpperCAmelCase = ( self.ror(words[index - 2] ,17 ) ^ self.ror(words[index - 2] ,19 ) ^ (words[index - 2] >> 10) ) UpperCAmelCase = ( words[index - 16] + sa + words[index - 7] + sa ) % 0x1_00_00_00_00 # Compression UpperCAmelCase = self.ror(A ,6 ) ^ self.ror(A ,11 ) ^ self.ror(A ,25 ) UpperCAmelCase = (e & f) ^ ((~e & 0xFF_FF_FF_FF) & g) UpperCAmelCase = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0x1_00_00_00_00 UpperCAmelCase = self.ror(A ,2 ) ^ self.ror(A ,13 ) ^ self.ror(A ,22 ) UpperCAmelCase = (a & b) ^ (a & c) ^ (b & c) UpperCAmelCase = (sa + maj) % 0x1_00_00_00_00 UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = ( g, f, e, ((d + tempa) % 0x1_00_00_00_00), c, b, a, ((tempa + tempa) % 0x1_00_00_00_00), ) UpperCAmelCase = [a, b, c, d, e, f, g, h] # Modify final values UpperCAmelCase = [ ((element + mutated_hash_values[index]) % 0x1_00_00_00_00) for index, element in enumerate(self.hashes ) ] UpperCAmelCase = """""".join([hex(A )[2:].zfill(8 ) for value in self.hashes] ) def _UpperCamelCase ( self ,A ,A ): return 0xFF_FF_FF_FF & (value << (32 - rotations)) | (value >> rotations) class lowerCamelCase__ ( unittest.TestCase ): def _UpperCamelCase ( self ): import hashlib UpperCAmelCase = bytes("""Test String""" ,"""utf-8""" ) self.assertEqual(SHAaaa(A ).hash ,hashlib.shaaaa(A ).hexdigest() ) def _a ( ): """simple docstring""" import doctest doctest.testmod() UpperCAmelCase = argparse.ArgumentParser() parser.add_argument( """-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , ) parser.add_argument( """-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" ) UpperCAmelCase = parser.parse_args() UpperCAmelCase = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , """rb""" ) as f: UpperCAmelCase = f.read() else: UpperCAmelCase = bytes(_snake_case , """utf-8""" ) print(SHAaaa(_snake_case ).hash ) if __name__ == "__main__": main()
74
0
"""simple docstring""" import logging import sys from dataclasses import dataclass, field from typing import Any, Dict, List, Optional, Union import librosa import torch from datasets import DatasetDict, load_dataset from packaging import version from torch import nn from transformers import ( HfArgumentParser, Trainer, TrainingArguments, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForPreTraining, is_apex_available, trainer_utils, ) from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices if is_apex_available(): from apex import amp if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""): _UpperCamelCase = True from torch.cuda.amp import autocast _UpperCamelCase = logging.getLogger(__name__) @dataclass class lowerCamelCase__ : SCREAMING_SNAKE_CASE = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''Whether to log verbose messages or not.'''} , ) SCREAMING_SNAKE_CASE = field( default=2.0 , metadata={'''help''': '''Maximum temperature for gumbel softmax.'''} ) SCREAMING_SNAKE_CASE = field( default=0.5 , metadata={'''help''': '''Minimum temperature for gumbel softmax.'''} ) SCREAMING_SNAKE_CASE = field( default=0.99_99_95 , metadata={'''help''': '''Decay of gumbel temperature during training.'''} ) def _a ( _snake_case , _snake_case ): """simple docstring""" logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) UpperCAmelCase = logging.WARNING if model_args.verbose_logging: UpperCAmelCase = logging.DEBUG elif trainer_utils.is_main_process(training_args.local_rank ): UpperCAmelCase = logging.INFO logger.setLevel(_snake_case ) @dataclass class lowerCamelCase__ : SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) SCREAMING_SNAKE_CASE = field( default='''train''' , metadata={ '''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\'''' } , ) SCREAMING_SNAKE_CASE = field( default='''validation''' , metadata={ '''help''': ( '''The name of the validation data set split to use (via the datasets library). Defaults to \'validation\'''' ) } , ) SCREAMING_SNAKE_CASE = field( default='''file''' , metadata={'''help''': '''Column in the dataset that contains speech file path. Defaults to \'file\''''} , ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} ) SCREAMING_SNAKE_CASE = field( default=1 , metadata={ '''help''': '''The percentage of the train set used as validation set in case there\'s no validation split''' } , ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , ) SCREAMING_SNAKE_CASE = field( default=20.0 , metadata={'''help''': '''Filter audio files that are longer than `max_duration_in_seconds` seconds'''} ) @dataclass class lowerCamelCase__ : SCREAMING_SNAKE_CASE = 42 SCREAMING_SNAKE_CASE = 42 SCREAMING_SNAKE_CASE = '''longest''' SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None def __call__( self ,A ): # reformat list to dict and set to pytorch format UpperCAmelCase = self.feature_extractor.pad( A ,max_length=self.max_length ,padding=self.padding ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors="""pt""" ,) UpperCAmelCase = self.model._get_feat_extract_output_lengths(batch["""input_values"""].shape[-1] ) UpperCAmelCase = batch["""input_values"""].shape[0] # make sure that no loss is computed on padded inputs if batch["attention_mask"] is not None: # compute real output lengths according to convolution formula UpperCAmelCase = self.model._get_feat_extract_output_lengths(batch["""attention_mask"""].sum(-1 ) ).to( torch.long ) UpperCAmelCase = torch.zeros( (batch_size, mask_indices_seq_length) ,dtype=torch.long ,device=batch["""input_values"""].device ) # these two operations makes sure that all values # before the output lengths indices are attended to UpperCAmelCase = 1 UpperCAmelCase = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool() # sample randomly masked indices UpperCAmelCase = _compute_mask_indices( (batch_size, mask_indices_seq_length) ,self.model.config.mask_time_prob ,self.model.config.mask_time_length ,attention_mask=A ,min_masks=2 ,) return batch class lowerCamelCase__ ( snake_case ): def __init__( self ,*A ,A=1 ,A=0 ,A=1.0 ,**A ): super().__init__(*A ,**A ) UpperCAmelCase = 0 UpperCAmelCase = max_gumbel_temp UpperCAmelCase = min_gumbel_temp UpperCAmelCase = gumbel_temp_decay def _UpperCamelCase ( self ,A ,A ): model.train() UpperCAmelCase = self._prepare_inputs(A ) if self.use_amp: with autocast(): UpperCAmelCase = self.compute_loss(A ,A ) else: UpperCAmelCase = self.compute_loss(A ,A ) if self.args.n_gpu > 1 or self.deepspeed: if model.module.config.ctc_loss_reduction == "mean": UpperCAmelCase = loss.mean() elif model.module.config.ctc_loss_reduction == "sum": UpperCAmelCase = loss.sum() / (inputs["""mask_time_indices"""]).sum() else: raise ValueError(F'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' ) if self.args.gradient_accumulation_steps > 1: UpperCAmelCase = loss / self.args.gradient_accumulation_steps if self.use_amp: self.scaler.scale(A ).backward() elif self.use_apex: with amp.scale_loss(A ,self.optimizer ) as scaled_loss: scaled_loss.backward() elif self.deepspeed: self.deepspeed.backward(A ) else: loss.backward() self.num_update_step += 1 # make sure gumbel softmax temperature is decayed if self.args.n_gpu > 1 or self.deepspeed: model.module.set_gumbel_temperature( max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step ,self.min_gumbel_temp ) ) else: model.set_gumbel_temperature( max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step ,self.min_gumbel_temp ) ) return loss.detach() def _a ( ): """simple docstring""" UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses() configure_logger(_snake_case , _snake_case ) # Downloading and loading a dataset from the hub. UpperCAmelCase = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir ) if "validation" not in datasets.keys(): # make sure only "validation" and "train" keys remain" UpperCAmelCase = DatasetDict() UpperCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[:{data_args.validation_split_percentage}%]''' , cache_dir=model_args.cache_dir , ) UpperCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[{data_args.validation_split_percentage}%:]''' , cache_dir=model_args.cache_dir , ) else: # make sure only "validation" and "train" keys remain" UpperCAmelCase = DatasetDict() UpperCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split="""validation""" , cache_dir=model_args.cache_dir , ) UpperCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}''' , cache_dir=model_args.cache_dir , ) # only normalized-inputs-training is supported UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained( model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_snake_case ) def prepare_dataset(_snake_case ): # check that all files have the correct sampling rate UpperCAmelCase , UpperCAmelCase = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate ) return batch # load audio files into numpy arrays UpperCAmelCase = datasets.map( _snake_case , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["""train"""].column_names ) # filter audio files that are too long UpperCAmelCase = vectorized_datasets.filter( lambda _snake_case : len(data["""speech"""] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) ) def normalize(_snake_case ): return feature_extractor(batch["""speech"""] , sampling_rate=feature_extractor.sampling_rate ) # normalize and transform to `BatchFeatures` UpperCAmelCase = vectorized_datasets.map( _snake_case , batched=_snake_case , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["""train"""].column_names , ) # pretraining is only supported for "newer" stable layer norm architecture # apply_spec_augment has to be True, mask_feature_prob has to be 0.0 UpperCAmelCase = WavaVecaConfig.from_pretrained( model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , ) if not config.do_stable_layer_norm or config.feat_extract_norm != "layer": raise ValueError( """PreTraining is only supported for ``config.do_stable_layer_norm=True`` and""" """ ``config.feat_extract_norm='layer'""" ) UpperCAmelCase = WavaVecaForPreTraining(_snake_case ) UpperCAmelCase = DataCollatorForWavaVecaPretraining(model=_snake_case , feature_extractor=_snake_case ) UpperCAmelCase = WavaVecaPreTrainer( model=_snake_case , data_collator=_snake_case , args=_snake_case , train_dataset=vectorized_datasets["""train"""] , eval_dataset=vectorized_datasets["""validation"""] , tokenizer=_snake_case , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , ) trainer.train() if __name__ == "__main__": main()
717
"""simple docstring""" def _a ( _snake_case = 10 , _snake_case = 22 ): """simple docstring""" UpperCAmelCase = range(1 , _snake_case ) UpperCAmelCase = range(1 , _snake_case ) return sum( 1 for power in powers for base in bases if len(str(base**power ) ) == power ) if __name__ == "__main__": print(F"""{solution(10, 22) = }""")
74
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available _UpperCamelCase = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = ["""GPTSw3Tokenizer"""] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_swa import GPTSwaTokenizer else: import sys _UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
718
"""simple docstring""" from __future__ import annotations def _a ( _snake_case ): """simple docstring""" return len(set(_snake_case ) ) == len(_snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
74
0
"""simple docstring""" from typing import List, Union import numpy as np from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING _UpperCamelCase = logging.get_logger(__name__) @add_end_docstrings(snake_case ) class lowerCamelCase__ ( snake_case ): def __init__( self ,*A ,**A ): super().__init__(*A ,**A ) requires_backends(self ,"""vision""" ) self.check_model_type(A ) def __call__( self ,A ,**A ): return super().__call__(A ,**A ) def _UpperCamelCase ( self ,**A ): return {}, {}, {} def _UpperCamelCase ( self ,A ): UpperCAmelCase = load_image(A ) UpperCAmelCase = image.size UpperCAmelCase = self.image_processor(images=A ,return_tensors=self.framework ) return model_inputs def _UpperCamelCase ( self ,A ): UpperCAmelCase = self.model(**A ) return model_outputs def _UpperCamelCase ( self ,A ): UpperCAmelCase = model_outputs.predicted_depth UpperCAmelCase = torch.nn.functional.interpolate( predicted_depth.unsqueeze(1 ) ,size=self.image_size[::-1] ,mode="""bicubic""" ,align_corners=A ) UpperCAmelCase = prediction.squeeze().cpu().numpy() UpperCAmelCase = (output * 255 / np.max(A )).astype("""uint8""" ) UpperCAmelCase = Image.fromarray(A ) UpperCAmelCase = {} UpperCAmelCase = predicted_depth UpperCAmelCase = depth return output_dict
719
"""simple docstring""" import math def _a ( _snake_case ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_snake_case ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def _a ( _snake_case = 0.1 ): """simple docstring""" UpperCAmelCase = 3 UpperCAmelCase = 3 while primes / (2 * j - 1) >= ratio: for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ): primes += is_prime(_snake_case ) j += 2 return j if __name__ == "__main__": import doctest doctest.testmod()
74
0
"""simple docstring""" import numpy as np from cva import destroyAllWindows, imread, imshow, waitKey class lowerCamelCase__ : def __init__( self ,A ,A ,A ): if dst_width < 0 or dst_height < 0: raise ValueError("""Destination width/height should be > 0""" ) UpperCAmelCase = img UpperCAmelCase = img.shape[1] UpperCAmelCase = img.shape[0] UpperCAmelCase = dst_width UpperCAmelCase = dst_height UpperCAmelCase = self.src_w / self.dst_w UpperCAmelCase = self.src_h / self.dst_h UpperCAmelCase = UpperCAmelCase = ( np.ones((self.dst_h, self.dst_w, 3) ,np.uinta ) * 255 ) def _UpperCamelCase ( self ): for i in range(self.dst_h ): for j in range(self.dst_w ): UpperCAmelCase = self.img[self.get_y(A )][self.get_x(A )] def _UpperCamelCase ( self ,A ): return int(self.ratio_x * x ) def _UpperCamelCase ( self ,A ): return int(self.ratio_y * y ) if __name__ == "__main__": _UpperCamelCase , _UpperCamelCase = 800, 600 _UpperCamelCase = imread("""image_data/lena.jpg""", 1) _UpperCamelCase = NearestNeighbour(im, dst_w, dst_h) n.process() imshow( F"""Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}""", n.output ) waitKey(0) destroyAllWindows()
720
"""simple docstring""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCamelCase__ ( snake_case ): SCREAMING_SNAKE_CASE = ['''image_processor''', '''tokenizer'''] SCREAMING_SNAKE_CASE = '''CLIPImageProcessor''' SCREAMING_SNAKE_CASE = ('''CLIPTokenizer''', '''CLIPTokenizerFast''') def __init__( self ,A=None ,A=None ,**A ): UpperCAmelCase = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" ,A ,) UpperCAmelCase = kwargs.pop("""feature_extractor""" ) UpperCAmelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(A ,A ) def __call__( self ,A=None ,A=None ,A=None ,**A ): if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: UpperCAmelCase = self.tokenizer(A ,return_tensors=A ,**A ) if images is not None: UpperCAmelCase = self.image_processor(A ,return_tensors=A ,**A ) if text is not None and images is not None: UpperCAmelCase = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**A ) ,tensor_type=A ) def _UpperCamelCase ( self ,*A ,**A ): return self.tokenizer.batch_decode(*A ,**A ) def _UpperCamelCase ( self ,*A ,**A ): return self.tokenizer.decode(*A ,**A ) @property def _UpperCamelCase ( self ): UpperCAmelCase = self.tokenizer.model_input_names UpperCAmelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def _UpperCamelCase ( self ): warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,A ,) return self.image_processor_class @property def _UpperCamelCase ( self ): warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,A ,) return self.image_processor
74
0
"""simple docstring""" import os import sys import unittest _UpperCamelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) _UpperCamelCase = os.path.join("""tests""", """models""", """bert""", """test_modeling_bert.py""") _UpperCamelCase = os.path.join("""tests""", """models""", """blip""", """test_modeling_blip.py""") class lowerCamelCase__ ( unittest.TestCase ): def _UpperCamelCase ( self ): UpperCAmelCase = get_test_to_tester_mapping(A ) UpperCAmelCase = get_test_to_tester_mapping(A ) UpperCAmelCase = {"""BertModelTest""": """BertModelTester"""} UpperCAmelCase = { """BlipModelTest""": """BlipModelTester""", """BlipTextImageModelTest""": """BlipTextImageModelsModelTester""", """BlipTextModelTest""": """BlipTextModelTester""", """BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""", """BlipVQAModelTest""": """BlipVQAModelTester""", """BlipVisionModelTest""": """BlipVisionModelTester""", } self.assertEqual(get_test_info.to_json(A ) ,A ) self.assertEqual(get_test_info.to_json(A ) ,A ) def _UpperCamelCase ( self ): UpperCAmelCase = get_model_to_test_mapping(A ) UpperCAmelCase = get_model_to_test_mapping(A ) UpperCAmelCase = { """BertForMaskedLM""": ["""BertModelTest"""], """BertForMultipleChoice""": ["""BertModelTest"""], """BertForNextSentencePrediction""": ["""BertModelTest"""], """BertForPreTraining""": ["""BertModelTest"""], """BertForQuestionAnswering""": ["""BertModelTest"""], """BertForSequenceClassification""": ["""BertModelTest"""], """BertForTokenClassification""": ["""BertModelTest"""], """BertLMHeadModel""": ["""BertModelTest"""], """BertModel""": ["""BertModelTest"""], } UpperCAmelCase = { """BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""], """BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""], """BlipForQuestionAnswering""": ["""BlipVQAModelTest"""], """BlipModel""": ["""BlipModelTest"""], """BlipTextModel""": ["""BlipTextModelTest"""], """BlipVisionModel""": ["""BlipVisionModelTest"""], } self.assertEqual(get_test_info.to_json(A ) ,A ) self.assertEqual(get_test_info.to_json(A ) ,A ) def _UpperCamelCase ( self ): UpperCAmelCase = get_model_to_tester_mapping(A ) UpperCAmelCase = get_model_to_tester_mapping(A ) UpperCAmelCase = { """BertForMaskedLM""": ["""BertModelTester"""], """BertForMultipleChoice""": ["""BertModelTester"""], """BertForNextSentencePrediction""": ["""BertModelTester"""], """BertForPreTraining""": ["""BertModelTester"""], """BertForQuestionAnswering""": ["""BertModelTester"""], """BertForSequenceClassification""": ["""BertModelTester"""], """BertForTokenClassification""": ["""BertModelTester"""], """BertLMHeadModel""": ["""BertModelTester"""], """BertModel""": ["""BertModelTester"""], } UpperCAmelCase = { """BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""], """BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""], """BlipForQuestionAnswering""": ["""BlipVQAModelTester"""], """BlipModel""": ["""BlipModelTester"""], """BlipTextModel""": ["""BlipTextModelTester"""], """BlipVisionModel""": ["""BlipVisionModelTester"""], } self.assertEqual(get_test_info.to_json(A ) ,A ) self.assertEqual(get_test_info.to_json(A ) ,A )
721
"""simple docstring""" from __future__ import annotations from collections.abc import Generator import requests from bsa import BeautifulSoup _UpperCamelCase = """https://www.indeed.co.in/jobs?q=mobile+app+development&l=""" def _a ( _snake_case = "mumbai" ): """simple docstring""" UpperCAmelCase = BeautifulSoup(requests.get(url + location ).content , """html.parser""" ) # This attribute finds out all the specifics listed in a job for job in soup.find_all("""div""" , attrs={"""data-tn-component""": """organicJob"""} ): UpperCAmelCase = job.find("""a""" , attrs={"""data-tn-element""": """jobTitle"""} ).text.strip() UpperCAmelCase = job.find("""span""" , {"""class""": """company"""} ).text.strip() yield job_title, company_name if __name__ == "__main__": for i, job in enumerate(fetch_jobs("""Bangalore"""), 1): print(F"""Job {i:>2} is {job[0]} at {job[1]}""")
74
0
import argparse import requests import torch from PIL import Image from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor def _a ( _snake_case ): """simple docstring""" UpperCAmelCase = SwinConfig(image_size=192 ) if "base" in model_name: UpperCAmelCase = 6 UpperCAmelCase = 128 UpperCAmelCase = (2, 2, 18, 2) UpperCAmelCase = (4, 8, 16, 32) elif "large" in model_name: UpperCAmelCase = 12 UpperCAmelCase = 192 UpperCAmelCase = (2, 2, 18, 2) UpperCAmelCase = (6, 12, 24, 48) else: raise ValueError("""Model not supported, only supports base and large variants""" ) UpperCAmelCase = window_size UpperCAmelCase = embed_dim UpperCAmelCase = depths UpperCAmelCase = num_heads return config def _a ( _snake_case ): """simple docstring""" if "encoder.mask_token" in name: UpperCAmelCase = name.replace("""encoder.mask_token""" , """embeddings.mask_token""" ) if "encoder.patch_embed.proj" in name: UpperCAmelCase = name.replace("""encoder.patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) if "encoder.patch_embed.norm" in name: UpperCAmelCase = name.replace("""encoder.patch_embed.norm""" , """embeddings.norm""" ) if "attn.proj" in name: UpperCAmelCase = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name: UpperCAmelCase = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: UpperCAmelCase = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: UpperCAmelCase = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: UpperCAmelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: UpperCAmelCase = name.replace("""mlp.fc2""" , """output.dense""" ) if name == "encoder.norm.weight": UpperCAmelCase = """layernorm.weight""" if name == "encoder.norm.bias": UpperCAmelCase = """layernorm.bias""" if "decoder" in name: pass else: UpperCAmelCase = """swin.""" + name return name def _a ( _snake_case , _snake_case ): """simple docstring""" for key in orig_state_dict.copy().keys(): UpperCAmelCase = orig_state_dict.pop(_snake_case ) if "attn_mask" in key: pass elif "qkv" in key: UpperCAmelCase = key.split(""".""" ) UpperCAmelCase = int(key_split[2] ) UpperCAmelCase = int(key_split[4] ) UpperCAmelCase = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: UpperCAmelCase = val[:dim, :] UpperCAmelCase = val[ dim : dim * 2, : ] UpperCAmelCase = val[-dim:, :] else: UpperCAmelCase = val[ :dim ] UpperCAmelCase = val[ dim : dim * 2 ] UpperCAmelCase = val[ -dim: ] else: UpperCAmelCase = val return orig_state_dict def _a ( _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" UpperCAmelCase = torch.load(_snake_case , map_location="""cpu""" )["""model"""] UpperCAmelCase = get_swin_config(_snake_case ) UpperCAmelCase = SwinForMaskedImageModeling(_snake_case ) model.eval() UpperCAmelCase = convert_state_dict(_snake_case , _snake_case ) model.load_state_dict(_snake_case ) UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" UpperCAmelCase = ViTImageProcessor(size={"""height""": 192, """width""": 192} ) UpperCAmelCase = Image.open(requests.get(_snake_case , stream=_snake_case ).raw ) UpperCAmelCase = image_processor(images=_snake_case , return_tensors="""pt""" ) with torch.no_grad(): UpperCAmelCase = model(**_snake_case ).logits print(outputs.keys() ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(_snake_case ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(_snake_case ) if push_to_hub: print(F'''Pushing model and image processor for {model_name} to hub''' ) model.push_to_hub(F'''microsoft/{model_name}''' ) image_processor.push_to_hub(F'''microsoft/{model_name}''' ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""swin-base-simmim-window6-192""", type=str, choices=["""swin-base-simmim-window6-192""", """swin-large-simmim-window12-192"""], help="""Name of the Swin SimMIM model you'd like to convert.""", ) parser.add_argument( """--checkpoint_path""", default="""/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth""", type=str, help="""Path to the original PyTorch checkpoint (.pth file).""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) _UpperCamelCase = parser.parse_args() convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
700
"""simple docstring""" import unittest from transformers.utils.backbone_utils import ( BackboneMixin, get_aligned_output_features_output_indices, verify_out_features_out_indices, ) class lowerCamelCase__ ( unittest.TestCase ): def _UpperCamelCase ( self ): UpperCAmelCase = ["""a""", """b""", """c"""] # Defaults to last layer if both are None UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(A ,A ,A ) self.assertEqual(A ,["""c"""] ) self.assertEqual(A ,[2] ) # Out indices set to match out features UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(["""a""", """c"""] ,A ,A ) self.assertEqual(A ,["""a""", """c"""] ) self.assertEqual(A ,[0, 2] ) # Out features set to match out indices UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(A ,[0, 2] ,A ) self.assertEqual(A ,["""a""", """c"""] ) self.assertEqual(A ,[0, 2] ) # Out features selected from negative indices UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(A ,[-3, -1] ,A ) self.assertEqual(A ,["""a""", """c"""] ) self.assertEqual(A ,[-3, -1] ) def _UpperCamelCase ( self ): # Stage names must be set with self.assertRaises(A ): verify_out_features_out_indices(["""a""", """b"""] ,(0, 1) ,A ) # Out features must be a list with self.assertRaises(A ): verify_out_features_out_indices(("""a""", """b""") ,(0, 1) ,["""a""", """b"""] ) # Out features must be a subset of stage names with self.assertRaises(A ): verify_out_features_out_indices(["""a""", """b"""] ,(0, 1) ,["""a"""] ) # Out indices must be a list or tuple with self.assertRaises(A ): verify_out_features_out_indices(A ,0 ,["""a""", """b"""] ) # Out indices must be a subset of stage names with self.assertRaises(A ): verify_out_features_out_indices(A ,(0, 1) ,["""a"""] ) # Out features and out indices must be the same length with self.assertRaises(A ): verify_out_features_out_indices(["""a""", """b"""] ,(0,) ,["""a""", """b""", """c"""] ) # Out features should match out indices with self.assertRaises(A ): verify_out_features_out_indices(["""a""", """b"""] ,(0, 2) ,["""a""", """b""", """c"""] ) # Out features and out indices should be in order with self.assertRaises(A ): verify_out_features_out_indices(["""b""", """a"""] ,(0, 1) ,["""a""", """b"""] ) # Check passes with valid inputs verify_out_features_out_indices(["""a""", """b""", """d"""] ,(0, 1, -1) ,["""a""", """b""", """c""", """d"""] ) def _UpperCamelCase ( self ): UpperCAmelCase = BackboneMixin() UpperCAmelCase = ["""a""", """b""", """c"""] UpperCAmelCase = ["""a""", """c"""] UpperCAmelCase = [0, 2] # Check that the output features and indices are set correctly self.assertEqual(backbone.out_features ,["""a""", """c"""] ) self.assertEqual(backbone.out_indices ,[0, 2] ) # Check out features and indices are updated correctly UpperCAmelCase = ["""a""", """b"""] self.assertEqual(backbone.out_features ,["""a""", """b"""] ) self.assertEqual(backbone.out_indices ,[0, 1] ) UpperCAmelCase = [-3, -1] self.assertEqual(backbone.out_features ,["""a""", """c"""] ) self.assertEqual(backbone.out_indices ,[-3, -1] )
74
0
"""simple docstring""" import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} _UpperCamelCase = { """vocab_file""": { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json""" ), }, } _UpperCamelCase = { """vocab_file""": { """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json""" ), }, } _UpperCamelCase = { """vocab_file""": { """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json""" ), }, } _UpperCamelCase = { """facebook/dpr-ctx_encoder-single-nq-base""": 512, """facebook/dpr-ctx_encoder-multiset-base""": 512, } _UpperCamelCase = { """facebook/dpr-question_encoder-single-nq-base""": 512, """facebook/dpr-question_encoder-multiset-base""": 512, } _UpperCamelCase = { """facebook/dpr-reader-single-nq-base""": 512, """facebook/dpr-reader-multiset-base""": 512, } _UpperCamelCase = { """facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True}, } _UpperCamelCase = { """facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True}, } _UpperCamelCase = { """facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True}, } class lowerCamelCase__ ( snake_case ): SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION SCREAMING_SNAKE_CASE = DPRContextEncoderTokenizer class lowerCamelCase__ ( snake_case ): SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION SCREAMING_SNAKE_CASE = DPRQuestionEncoderTokenizer _UpperCamelCase = collections.namedtuple( """DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""] ) _UpperCamelCase = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""]) _UpperCamelCase = R""" Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Return: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. """ @add_start_docstrings(snake_case ) class lowerCamelCase__ : def __call__( self ,A ,A = None ,A = None ,A = False ,A = False ,A = None ,A = None ,A = None ,**A ,): if titles is None and texts is None: return super().__call__( A ,padding=A ,truncation=A ,max_length=A ,return_tensors=A ,return_attention_mask=A ,**A ,) elif titles is None or texts is None: UpperCAmelCase = titles if texts is None else texts return super().__call__( A ,A ,padding=A ,truncation=A ,max_length=A ,return_tensors=A ,return_attention_mask=A ,**A ,) UpperCAmelCase = titles if not isinstance(A ,A ) else [titles] UpperCAmelCase = texts if not isinstance(A ,A ) else [texts] UpperCAmelCase = len(A ) UpperCAmelCase = questions if not isinstance(A ,A ) else [questions] * n_passages assert len(A ) == len( A ), F'''There should be as many titles than texts but got {len(A )} titles and {len(A )} texts.''' UpperCAmelCase = super().__call__(A ,A ,padding=A ,truncation=A )["""input_ids"""] UpperCAmelCase = super().__call__(A ,add_special_tokens=A ,padding=A ,truncation=A )["""input_ids"""] UpperCAmelCase = { """input_ids""": [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(A ,A ) ] } if return_attention_mask is not False: UpperCAmelCase = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) UpperCAmelCase = attention_mask return self.pad(A ,padding=A ,max_length=A ,return_tensors=A ) def _UpperCamelCase ( self ,A ,A ,A = 16 ,A = 64 ,A = 4 ,): UpperCAmelCase = reader_input["""input_ids"""] UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = reader_output[:3] UpperCAmelCase = len(A ) UpperCAmelCase = sorted(range(A ) ,reverse=A ,key=relevance_logits.__getitem__ ) UpperCAmelCase = [] for doc_id in sorted_docs: UpperCAmelCase = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence UpperCAmelCase = sequence_ids.index(self.sep_token_id ,2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: UpperCAmelCase = sequence_ids.index(self.pad_token_id ) else: UpperCAmelCase = len(A ) UpperCAmelCase = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=A ,top_spans=A ,) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=A ,start_index=A ,end_index=A ,text=self.decode(sequence_ids[start_index : end_index + 1] ) ,) ) if len(A ) >= num_spans: break return nbest_spans_predictions[:num_spans] def _UpperCamelCase ( self ,A ,A ,A ,A ,): UpperCAmelCase = [] for start_index, start_score in enumerate(A ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) UpperCAmelCase = sorted(A ,key=lambda A : x[1] ,reverse=A ) UpperCAmelCase = [] for (start_index, end_index), score in scores: assert start_index <= end_index, F'''Wrong span indices: [{start_index}:{end_index}]''' UpperCAmelCase = end_index - start_index + 1 assert length <= max_answer_length, F'''Span is too long: {length} > {max_answer_length}''' if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(A ) == top_spans: break return chosen_span_intervals @add_end_docstrings(snake_case ) class lowerCamelCase__ ( snake_case , snake_case ): SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE = READER_PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE = READER_PRETRAINED_INIT_CONFIGURATION SCREAMING_SNAKE_CASE = ['''input_ids''', '''attention_mask'''] SCREAMING_SNAKE_CASE = DPRReaderTokenizer
701
"""simple docstring""" from __future__ import annotations from typing import Any class lowerCamelCase__ : def __init__( self ,A = 6 ): UpperCAmelCase = None UpperCAmelCase = None self.create_linked_list(A ) def _UpperCamelCase ( self ,A ): UpperCAmelCase = Node() UpperCAmelCase = current_node UpperCAmelCase = current_node UpperCAmelCase = current_node for _ in range(1 ,A ): UpperCAmelCase = Node() UpperCAmelCase = current_node UpperCAmelCase = previous_node UpperCAmelCase = current_node UpperCAmelCase = self.front UpperCAmelCase = previous_node def _UpperCamelCase ( self ): return ( self.front == self.rear and self.front is not None and self.front.data is None ) def _UpperCamelCase ( self ): self.check_can_perform_operation() return self.front.data if self.front else None def _UpperCamelCase ( self ,A ): if self.rear is None: return self.check_is_full() if not self.is_empty(): UpperCAmelCase = self.rear.next if self.rear: UpperCAmelCase = data def _UpperCamelCase ( self ): self.check_can_perform_operation() if self.rear is None or self.front is None: return None if self.front == self.rear: UpperCAmelCase = self.front.data UpperCAmelCase = None return data UpperCAmelCase = self.front UpperCAmelCase = old_front.next UpperCAmelCase = old_front.data UpperCAmelCase = None return data def _UpperCamelCase ( self ): if self.is_empty(): raise Exception("""Empty Queue""" ) def _UpperCamelCase ( self ): if self.rear and self.rear.next == self.front: raise Exception("""Full Queue""" ) class lowerCamelCase__ : def __init__( self ): UpperCAmelCase = None UpperCAmelCase = None UpperCAmelCase = None if __name__ == "__main__": import doctest doctest.testmod()
74
0
"""simple docstring""" import argparse import os import re import packaging.version _UpperCamelCase = """examples/""" _UpperCamelCase = { """examples""": (re.compile(R"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""), """init""": (re.compile(R"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""), """setup""": (re.compile(R"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), R"""\1version=\"VERSION\","""), """doc""": (re.compile(R"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""), } _UpperCamelCase = { """init""": """src/transformers/__init__.py""", """setup""": """setup.py""", } _UpperCamelCase = """README.md""" def _a ( _snake_case , _snake_case , _snake_case ): """simple docstring""" with open(_snake_case , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: UpperCAmelCase = f.read() UpperCAmelCase , UpperCAmelCase = REPLACE_PATTERNS[pattern] UpperCAmelCase = replace.replace("""VERSION""" , _snake_case ) UpperCAmelCase = re_pattern.sub(_snake_case , _snake_case ) with open(_snake_case , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.write(_snake_case ) def _a ( _snake_case ): """simple docstring""" for folder, directories, fnames in os.walk(_snake_case ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("""research_projects""" ) if "legacy" in directories: directories.remove("""legacy""" ) for fname in fnames: if fname.endswith(""".py""" ): update_version_in_file(os.path.join(_snake_case , _snake_case ) , _snake_case , pattern="""examples""" ) def _a ( _snake_case , _snake_case=False ): """simple docstring""" for pattern, fname in REPLACE_FILES.items(): update_version_in_file(_snake_case , _snake_case , _snake_case ) if not patch: update_version_in_examples(_snake_case ) def _a ( ): """simple docstring""" UpperCAmelCase = """🤗 Transformers currently provides the following architectures""" UpperCAmelCase = """1. Want to contribute a new model?""" with open(_snake_case , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: UpperCAmelCase = f.readlines() # Find the start of the list. UpperCAmelCase = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 UpperCAmelCase = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith("""1.""" ): UpperCAmelCase = lines[index].replace( """https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , ) index += 1 with open(_snake_case , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(_snake_case ) def _a ( ): """simple docstring""" with open(REPLACE_FILES["""init"""] , """r""" ) as f: UpperCAmelCase = f.read() UpperCAmelCase = REPLACE_PATTERNS["""init"""][0].search(_snake_case ).groups()[0] return packaging.version.parse(_snake_case ) def _a ( _snake_case=False ): """simple docstring""" UpperCAmelCase = get_version() if patch and default_version.is_devrelease: raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" ) if default_version.is_devrelease: UpperCAmelCase = default_version.base_version elif patch: UpperCAmelCase = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}''' else: UpperCAmelCase = F'''{default_version.major}.{default_version.minor + 1}.0''' # Now let's ask nicely if that's the right one. UpperCAmelCase = input(F'''Which version are you releasing? [{default_version}]''' ) if len(_snake_case ) == 0: UpperCAmelCase = default_version print(F'''Updating version to {version}.''' ) global_version_update(_snake_case , patch=_snake_case ) if not patch: print("""Cleaning main README, don't forget to run `make fix-copies`.""" ) clean_main_ref_in_model_list() def _a ( ): """simple docstring""" UpperCAmelCase = get_version() UpperCAmelCase = F'''{current_version.major}.{current_version.minor + 1}.0.dev0''' UpperCAmelCase = current_version.base_version # Check with the user we got that right. UpperCAmelCase = input(F'''Which version are we developing now? [{dev_version}]''' ) if len(_snake_case ) == 0: UpperCAmelCase = dev_version print(F'''Updating version to {version}.''' ) global_version_update(_snake_case ) print("""Cleaning main README, don't forget to run `make fix-copies`.""" ) clean_main_ref_in_model_list() if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""") parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""") _UpperCamelCase = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print("""Nothing to do after a patch :-)""") else: post_release_work()
702
"""simple docstring""" import argparse import numpy as np import torch from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging logging.set_verbosity_info() _UpperCamelCase = logging.get_logger("""transformers.models.speecht5""") def _a ( _snake_case , _snake_case , _snake_case ): """simple docstring""" hf_model.apply_weight_norm() UpperCAmelCase = checkpoint["""input_conv.weight_g"""] UpperCAmelCase = checkpoint["""input_conv.weight_v"""] UpperCAmelCase = checkpoint["""input_conv.bias"""] for i in range(len(config.upsample_rates ) ): UpperCAmelCase = checkpoint[F'''upsamples.{i}.1.weight_g'''] UpperCAmelCase = checkpoint[F'''upsamples.{i}.1.weight_v'''] UpperCAmelCase = checkpoint[F'''upsamples.{i}.1.bias'''] for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ): for j in range(len(config.resblock_dilation_sizes ) ): UpperCAmelCase = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g'''] UpperCAmelCase = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v'''] UpperCAmelCase = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias'''] UpperCAmelCase = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g'''] UpperCAmelCase = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v'''] UpperCAmelCase = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias'''] UpperCAmelCase = checkpoint["""output_conv.1.weight_g"""] UpperCAmelCase = checkpoint["""output_conv.1.weight_v"""] UpperCAmelCase = checkpoint["""output_conv.1.bias"""] hf_model.remove_weight_norm() @torch.no_grad() def _a ( _snake_case , _snake_case , _snake_case , _snake_case=None , _snake_case=None , ): """simple docstring""" if config_path is not None: UpperCAmelCase = SpeechTaHifiGanConfig.from_pretrained(_snake_case ) else: UpperCAmelCase = SpeechTaHifiGanConfig() UpperCAmelCase = SpeechTaHifiGan(_snake_case ) UpperCAmelCase = torch.load(_snake_case ) load_weights(orig_checkpoint["""model"""]["""generator"""] , _snake_case , _snake_case ) UpperCAmelCase = np.load(_snake_case ) UpperCAmelCase = stats[0].reshape(-1 ) UpperCAmelCase = stats[1].reshape(-1 ) UpperCAmelCase = torch.from_numpy(_snake_case ).float() UpperCAmelCase = torch.from_numpy(_snake_case ).float() model.save_pretrained(_snake_case ) if repo_id: print("""Pushing to the hub...""" ) model.push_to_hub(_snake_case ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""") parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub.""" ) _UpperCamelCase = parser.parse_args() convert_hifigan_checkpoint( args.checkpoint_path, args.stats_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
74
0
"""simple docstring""" def _a ( _snake_case ): """simple docstring""" UpperCAmelCase = [0 for i in range(len(_snake_case ) )] # initialize interval's left pointer and right pointer UpperCAmelCase , UpperCAmelCase = 0, 0 for i in range(1 , len(_snake_case ) ): # case when current index is inside the interval if i <= right_pointer: UpperCAmelCase = min(right_pointer - i + 1 , z_result[i - left_pointer] ) UpperCAmelCase = min_edge while go_next(_snake_case , _snake_case , _snake_case ): z_result[i] += 1 # if new index's result gives us more right interval, # we've to update left_pointer and right_pointer if i + z_result[i] - 1 > right_pointer: UpperCAmelCase , UpperCAmelCase = i, i + z_result[i] - 1 return z_result def _a ( _snake_case , _snake_case , _snake_case ): """simple docstring""" return i + z_result[i] < len(_snake_case ) and s[z_result[i]] == s[i + z_result[i]] def _a ( _snake_case , _snake_case ): """simple docstring""" UpperCAmelCase = 0 # concatenate 'pattern' and 'input_str' and call z_function # with concatenated string UpperCAmelCase = z_function(pattern + input_str ) for val in z_result: # if value is greater then length of the pattern string # that means this index is starting position of substring # which is equal to pattern string if val >= len(_snake_case ): answer += 1 return answer if __name__ == "__main__": import doctest doctest.testmod()
703
"""simple docstring""" # Lint as: python3 # pylint: enable=line-too-long # pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position _UpperCamelCase = """2.13.1""" import platform import pyarrow from packaging import version if version.parse(platform.python_version()) < version.parse("""3.7"""): raise ImportWarning( """To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition.""" ) if version.parse(pyarrow.__version__).major < 8: raise ImportWarning( """To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n""" """If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.""" ) del platform del pyarrow del version from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip _UpperCamelCase = concatenate_datasets _UpperCamelCase = DownloadConfig _UpperCamelCase = DownloadManager _UpperCamelCase = DownloadMode _UpperCamelCase = DownloadConfig _UpperCamelCase = DownloadMode _UpperCamelCase = DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
74
0
"""simple docstring""" import fire from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer def _a ( _snake_case , _snake_case , **_snake_case ): """simple docstring""" UpperCAmelCase = AutoConfig.from_pretrained(_snake_case , **_snake_case ) UpperCAmelCase = AutoModelForSeqaSeqLM.from_config(_snake_case ) model.save_pretrained(_snake_case ) AutoTokenizer.from_pretrained(_snake_case ).save_pretrained(_snake_case ) return model if __name__ == "__main__": fire.Fire(save_randomly_initialized_version)
704
"""simple docstring""" def _a ( _snake_case ): """simple docstring""" if not isinstance(_snake_case , _snake_case ): raise ValueError("""Input must be an integer""" ) if input_num <= 0: raise ValueError("""Input must be positive""" ) return sum( divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 ) if __name__ == "__main__": import doctest doctest.testmod()
74
0
def _a ( _snake_case ): """simple docstring""" if n == 1 or not isinstance(_snake_case , _snake_case ): return 0 elif n == 2: return 1 else: UpperCAmelCase = [0, 1] for i in range(2 , n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def _a ( _snake_case ): """simple docstring""" UpperCAmelCase = 0 UpperCAmelCase = 2 while digits < n: index += 1 UpperCAmelCase = len(str(fibonacci(_snake_case ) ) ) return index def _a ( _snake_case = 1000 ): """simple docstring""" return fibonacci_digits_index(_snake_case ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
705
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForMaskedImageModeling, HfArgumentParser, Trainer, TrainingArguments, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version _UpperCamelCase = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.31.0""") require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""") _UpperCamelCase = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys()) _UpperCamelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class lowerCamelCase__ : SCREAMING_SNAKE_CASE = field( default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , ) SCREAMING_SNAKE_CASE = field(default=snake_case , metadata={'''help''': '''A folder containing the training data.'''} ) SCREAMING_SNAKE_CASE = field(default=snake_case , metadata={'''help''': '''A folder containing the validation data.'''} ) SCREAMING_SNAKE_CASE = field( default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} ) SCREAMING_SNAKE_CASE = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} ) SCREAMING_SNAKE_CASE = field( default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def _UpperCamelCase ( self ): UpperCAmelCase = {} if self.train_dir is not None: UpperCAmelCase = self.train_dir if self.validation_dir is not None: UpperCAmelCase = self.validation_dir UpperCAmelCase = data_files if data_files else None @dataclass class lowerCamelCase__ : SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={ '''help''': ( '''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a ''' '''checkpoint identifier on the hub. ''' '''Don\'t set if you want to train a model from scratch.''' ) } , ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(snake_case )} , ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } , ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , ) SCREAMING_SNAKE_CASE = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) SCREAMING_SNAKE_CASE = field(default=snake_case , metadata={'''help''': '''Name or path of preprocessor config.'''} ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={ '''help''': ( '''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.''' ) } , ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={ '''help''': ( '''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.''' ) } , ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''Stride to use for the encoder.'''} , ) class lowerCamelCase__ : def __init__( self ,A=192 ,A=32 ,A=4 ,A=0.6 ): UpperCAmelCase = input_size UpperCAmelCase = mask_patch_size UpperCAmelCase = model_patch_size UpperCAmelCase = mask_ratio if self.input_size % self.mask_patch_size != 0: raise ValueError("""Input size must be divisible by mask patch size""" ) if self.mask_patch_size % self.model_patch_size != 0: raise ValueError("""Mask patch size must be divisible by model patch size""" ) UpperCAmelCase = self.input_size // self.mask_patch_size UpperCAmelCase = self.mask_patch_size // self.model_patch_size UpperCAmelCase = self.rand_size**2 UpperCAmelCase = int(np.ceil(self.token_count * self.mask_ratio ) ) def __call__( self ): UpperCAmelCase = np.random.permutation(self.token_count )[: self.mask_count] UpperCAmelCase = np.zeros(self.token_count ,dtype=A ) UpperCAmelCase = 1 UpperCAmelCase = mask.reshape((self.rand_size, self.rand_size) ) UpperCAmelCase = mask.repeat(self.scale ,axis=0 ).repeat(self.scale ,axis=1 ) return torch.tensor(mask.flatten() ) def _a ( _snake_case ): """simple docstring""" UpperCAmelCase = torch.stack([example["""pixel_values"""] for example in examples] ) UpperCAmelCase = torch.stack([example["""mask"""] for example in examples] ) return {"pixel_values": pixel_values, "bool_masked_pos": mask} def _a ( ): """simple docstring""" UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_mim""" , _snake_case , _snake_case ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() UpperCAmelCase = training_args.get_process_log_level() logger.setLevel(_snake_case ) transformers.utils.logging.set_verbosity(_snake_case ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. UpperCAmelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: UpperCAmelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Initialize our dataset. UpperCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. UpperCAmelCase = None if """validation""" in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , _snake_case ) and data_args.train_val_split > 0.0: UpperCAmelCase = ds["""train"""].train_test_split(data_args.train_val_split ) UpperCAmelCase = split["""train"""] UpperCAmelCase = split["""test"""] # Create config # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. UpperCAmelCase = { """cache_dir""": model_args.cache_dir, """revision""": model_args.model_revision, """use_auth_token""": True if model_args.use_auth_token else None, } if model_args.config_name_or_path: UpperCAmelCase = AutoConfig.from_pretrained(model_args.config_name_or_path , **_snake_case ) elif model_args.model_name_or_path: UpperCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , **_snake_case ) else: UpperCAmelCase = CONFIG_MAPPING[model_args.model_type]() logger.warning("""You are instantiating a new config instance from scratch.""" ) if model_args.config_overrides is not None: logger.info(F'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(F'''New config: {config}''' ) # make sure the decoder_type is "simmim" (only relevant for BEiT) if hasattr(_snake_case , """decoder_type""" ): UpperCAmelCase = """simmim""" # adapt config UpperCAmelCase = model_args.image_size if model_args.image_size is not None else config.image_size UpperCAmelCase = model_args.patch_size if model_args.patch_size is not None else config.patch_size UpperCAmelCase = ( model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride ) config.update( { """image_size""": model_args.image_size, """patch_size""": model_args.patch_size, """encoder_stride""": model_args.encoder_stride, } ) # create image processor if model_args.image_processor_name: UpperCAmelCase = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **_snake_case ) elif model_args.model_name_or_path: UpperCAmelCase = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **_snake_case ) else: UpperCAmelCase = { conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items() } UpperCAmelCase = IMAGE_PROCESSOR_TYPES[model_args.model_type]() # create model if model_args.model_name_or_path: UpperCAmelCase = AutoModelForMaskedImageModeling.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("""Training new model from scratch""" ) UpperCAmelCase = AutoModelForMaskedImageModeling.from_config(_snake_case ) if training_args.do_train: UpperCAmelCase = ds["""train"""].column_names else: UpperCAmelCase = ds["""validation"""].column_names if data_args.image_column_name is not None: UpperCAmelCase = data_args.image_column_name elif "image" in column_names: UpperCAmelCase = """image""" elif "img" in column_names: UpperCAmelCase = """img""" else: UpperCAmelCase = column_names[0] # transformations as done in original SimMIM paper # source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py UpperCAmelCase = Compose( [ Lambda(lambda _snake_case : img.convert("""RGB""" ) if img.mode != "RGB" else img ), RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) # create mask generator UpperCAmelCase = MaskGenerator( input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , ) def preprocess_images(_snake_case ): UpperCAmelCase = [transforms(_snake_case ) for image in examples[image_column_name]] UpperCAmelCase = [mask_generator() for i in range(len(examples[image_column_name] ) )] return examples if training_args.do_train: if "train" not in ds: raise ValueError("""--do_train requires a train dataset""" ) if data_args.max_train_samples is not None: UpperCAmelCase = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(_snake_case ) if training_args.do_eval: if "validation" not in ds: raise ValueError("""--do_eval requires a validation dataset""" ) if data_args.max_eval_samples is not None: UpperCAmelCase = ( ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(_snake_case ) # Initialize our trainer UpperCAmelCase = Trainer( model=_snake_case , args=_snake_case , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=_snake_case , data_collator=_snake_case , ) # Training if training_args.do_train: UpperCAmelCase = None if training_args.resume_from_checkpoint is not None: UpperCAmelCase = training_args.resume_from_checkpoint elif last_checkpoint is not None: UpperCAmelCase = last_checkpoint UpperCAmelCase = trainer.train(resume_from_checkpoint=_snake_case ) trainer.save_model() trainer.log_metrics("""train""" , train_result.metrics ) trainer.save_metrics("""train""" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: UpperCAmelCase = trainer.evaluate() trainer.log_metrics("""eval""" , _snake_case ) trainer.save_metrics("""eval""" , _snake_case ) # Write model card and (optionally) push to hub UpperCAmelCase = { """finetuned_from""": model_args.model_name_or_path, """tasks""": """masked-image-modeling""", """dataset""": data_args.dataset_name, """tags""": ["""masked-image-modeling"""], } if training_args.push_to_hub: trainer.push_to_hub(**_snake_case ) else: trainer.create_model_card(**_snake_case ) if __name__ == "__main__": main()
74
0
"""simple docstring""" from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _UpperCamelCase = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ """FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """FocalNetForImageClassification""", """FocalNetForMaskedImageModeling""", """FocalNetBackbone""", """FocalNetModel""", """FocalNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys _UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
706
"""simple docstring""" import logging import sys from dataclasses import dataclass, field from typing import Any, Dict, List, Optional, Union import librosa import torch from datasets import DatasetDict, load_dataset from packaging import version from torch import nn from transformers import ( HfArgumentParser, Trainer, TrainingArguments, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForPreTraining, is_apex_available, trainer_utils, ) from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices if is_apex_available(): from apex import amp if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""): _UpperCamelCase = True from torch.cuda.amp import autocast _UpperCamelCase = logging.getLogger(__name__) @dataclass class lowerCamelCase__ : SCREAMING_SNAKE_CASE = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''Whether to log verbose messages or not.'''} , ) SCREAMING_SNAKE_CASE = field( default=2.0 , metadata={'''help''': '''Maximum temperature for gumbel softmax.'''} ) SCREAMING_SNAKE_CASE = field( default=0.5 , metadata={'''help''': '''Minimum temperature for gumbel softmax.'''} ) SCREAMING_SNAKE_CASE = field( default=0.99_99_95 , metadata={'''help''': '''Decay of gumbel temperature during training.'''} ) def _a ( _snake_case , _snake_case ): """simple docstring""" logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) UpperCAmelCase = logging.WARNING if model_args.verbose_logging: UpperCAmelCase = logging.DEBUG elif trainer_utils.is_main_process(training_args.local_rank ): UpperCAmelCase = logging.INFO logger.setLevel(_snake_case ) @dataclass class lowerCamelCase__ : SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) SCREAMING_SNAKE_CASE = field( default='''train''' , metadata={ '''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\'''' } , ) SCREAMING_SNAKE_CASE = field( default='''validation''' , metadata={ '''help''': ( '''The name of the validation data set split to use (via the datasets library). Defaults to \'validation\'''' ) } , ) SCREAMING_SNAKE_CASE = field( default='''file''' , metadata={'''help''': '''Column in the dataset that contains speech file path. Defaults to \'file\''''} , ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} ) SCREAMING_SNAKE_CASE = field( default=1 , metadata={ '''help''': '''The percentage of the train set used as validation set in case there\'s no validation split''' } , ) SCREAMING_SNAKE_CASE = field( default=snake_case , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , ) SCREAMING_SNAKE_CASE = field( default=20.0 , metadata={'''help''': '''Filter audio files that are longer than `max_duration_in_seconds` seconds'''} ) @dataclass class lowerCamelCase__ : SCREAMING_SNAKE_CASE = 42 SCREAMING_SNAKE_CASE = 42 SCREAMING_SNAKE_CASE = "longest" SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None def __call__( self ,A ): # reformat list to dict and set to pytorch format UpperCAmelCase = self.feature_extractor.pad( A ,max_length=self.max_length ,padding=self.padding ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors="""pt""" ,) UpperCAmelCase = self.model._get_feat_extract_output_lengths(batch["""input_values"""].shape[-1] ) UpperCAmelCase = batch["""input_values"""].shape[0] # make sure that no loss is computed on padded inputs if batch["attention_mask"] is not None: # compute real output lengths according to convolution formula UpperCAmelCase = self.model._get_feat_extract_output_lengths(batch["""attention_mask"""].sum(-1 ) ).to( torch.long ) UpperCAmelCase = torch.zeros( (batch_size, mask_indices_seq_length) ,dtype=torch.long ,device=batch["""input_values"""].device ) # these two operations makes sure that all values # before the output lengths indices are attended to UpperCAmelCase = 1 UpperCAmelCase = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool() # sample randomly masked indices UpperCAmelCase = _compute_mask_indices( (batch_size, mask_indices_seq_length) ,self.model.config.mask_time_prob ,self.model.config.mask_time_length ,attention_mask=A ,min_masks=2 ,) return batch class lowerCamelCase__ ( snake_case ): def __init__( self ,*A ,A=1 ,A=0 ,A=1.0 ,**A ): super().__init__(*A ,**A ) UpperCAmelCase = 0 UpperCAmelCase = max_gumbel_temp UpperCAmelCase = min_gumbel_temp UpperCAmelCase = gumbel_temp_decay def _UpperCamelCase ( self ,A ,A ): model.train() UpperCAmelCase = self._prepare_inputs(A ) if self.use_amp: with autocast(): UpperCAmelCase = self.compute_loss(A ,A ) else: UpperCAmelCase = self.compute_loss(A ,A ) if self.args.n_gpu > 1 or self.deepspeed: if model.module.config.ctc_loss_reduction == "mean": UpperCAmelCase = loss.mean() elif model.module.config.ctc_loss_reduction == "sum": UpperCAmelCase = loss.sum() / (inputs["""mask_time_indices"""]).sum() else: raise ValueError(F'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' ) if self.args.gradient_accumulation_steps > 1: UpperCAmelCase = loss / self.args.gradient_accumulation_steps if self.use_amp: self.scaler.scale(A ).backward() elif self.use_apex: with amp.scale_loss(A ,self.optimizer ) as scaled_loss: scaled_loss.backward() elif self.deepspeed: self.deepspeed.backward(A ) else: loss.backward() self.num_update_step += 1 # make sure gumbel softmax temperature is decayed if self.args.n_gpu > 1 or self.deepspeed: model.module.set_gumbel_temperature( max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step ,self.min_gumbel_temp ) ) else: model.set_gumbel_temperature( max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step ,self.min_gumbel_temp ) ) return loss.detach() def _a ( ): """simple docstring""" UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses() configure_logger(_snake_case , _snake_case ) # Downloading and loading a dataset from the hub. UpperCAmelCase = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir ) if "validation" not in datasets.keys(): # make sure only "validation" and "train" keys remain" UpperCAmelCase = DatasetDict() UpperCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[:{data_args.validation_split_percentage}%]''' , cache_dir=model_args.cache_dir , ) UpperCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[{data_args.validation_split_percentage}%:]''' , cache_dir=model_args.cache_dir , ) else: # make sure only "validation" and "train" keys remain" UpperCAmelCase = DatasetDict() UpperCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split="""validation""" , cache_dir=model_args.cache_dir , ) UpperCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}''' , cache_dir=model_args.cache_dir , ) # only normalized-inputs-training is supported UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained( model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_snake_case ) def prepare_dataset(_snake_case ): # check that all files have the correct sampling rate UpperCAmelCase , UpperCAmelCase = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate ) return batch # load audio files into numpy arrays UpperCAmelCase = datasets.map( _snake_case , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["""train"""].column_names ) # filter audio files that are too long UpperCAmelCase = vectorized_datasets.filter( lambda _snake_case : len(data["""speech"""] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) ) def normalize(_snake_case ): return feature_extractor(batch["""speech"""] , sampling_rate=feature_extractor.sampling_rate ) # normalize and transform to `BatchFeatures` UpperCAmelCase = vectorized_datasets.map( _snake_case , batched=_snake_case , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["""train"""].column_names , ) # pretraining is only supported for "newer" stable layer norm architecture # apply_spec_augment has to be True, mask_feature_prob has to be 0.0 UpperCAmelCase = WavaVecaConfig.from_pretrained( model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , ) if not config.do_stable_layer_norm or config.feat_extract_norm != "layer": raise ValueError( """PreTraining is only supported for ``config.do_stable_layer_norm=True`` and""" """ ``config.feat_extract_norm='layer'""" ) UpperCAmelCase = WavaVecaForPreTraining(_snake_case ) UpperCAmelCase = DataCollatorForWavaVecaPretraining(model=_snake_case , feature_extractor=_snake_case ) UpperCAmelCase = WavaVecaPreTrainer( model=_snake_case , data_collator=_snake_case , args=_snake_case , train_dataset=vectorized_datasets["""train"""] , eval_dataset=vectorized_datasets["""validation"""] , tokenizer=_snake_case , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , ) trainer.train() if __name__ == "__main__": main()
74
0
"""simple docstring""" class lowerCamelCase__ : def __init__( self ,A ): # we need a list not a string, so do something to change the type UpperCAmelCase = arr.split(""",""" ) def _UpperCamelCase ( self ): UpperCAmelCase = [int(self.array[0] )] * len(self.array ) UpperCAmelCase = [int(self.array[0] )] * len(self.array ) for i in range(1 ,len(self.array ) ): UpperCAmelCase = max( int(self.array[i] ) + sum_value[i - 1] ,int(self.array[i] ) ) UpperCAmelCase = max(sum_value[i] ,rear[i - 1] ) return rear[len(self.array ) - 1] if __name__ == "__main__": _UpperCamelCase : Dict = input("""please input some numbers:""") _UpperCamelCase : str = SubArray(whole_array) _UpperCamelCase : List[str] = array.solve_sub_array() print(("""the results is:""", re))
707
"""simple docstring""" from __future__ import annotations import random import unittest from transformers import TransfoXLConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLModel, ) class lowerCamelCase__ : def __init__( self ,A ,): UpperCAmelCase = parent UpperCAmelCase = 13 UpperCAmelCase = 7 UpperCAmelCase = 30 UpperCAmelCase = self.seq_length + self.mem_len UpperCAmelCase = 15 UpperCAmelCase = True UpperCAmelCase = True UpperCAmelCase = 99 UpperCAmelCase = [10, 50, 80] UpperCAmelCase = 32 UpperCAmelCase = 32 UpperCAmelCase = 4 UpperCAmelCase = 8 UpperCAmelCase = 128 UpperCAmelCase = 2 UpperCAmelCase = 2 UpperCAmelCase = None UpperCAmelCase = 1 UpperCAmelCase = 0 UpperCAmelCase = 3 UpperCAmelCase = self.vocab_size - 1 UpperCAmelCase = 0.01 def _UpperCamelCase ( self ): UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) UpperCAmelCase = None if self.use_labels: UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) UpperCAmelCase = TransfoXLConfig( vocab_size=self.vocab_size ,mem_len=self.mem_len ,clamp_len=self.clamp_len ,cutoffs=self.cutoffs ,d_model=self.hidden_size ,d_embed=self.d_embed ,n_head=self.num_attention_heads ,d_head=self.d_head ,d_inner=self.d_inner ,div_val=self.div_val ,n_layer=self.num_hidden_layers ,eos_token_id=self.eos_token_id ,pad_token_id=self.vocab_size - 1 ,init_range=self.init_range ,num_labels=self.num_labels ,) return (config, input_ids_a, input_ids_a, lm_labels) def _UpperCamelCase ( self ): random.seed(self.seed ) tf.random.set_seed(self.seed ) def _UpperCamelCase ( self ,A ,A ,A ,A ): UpperCAmelCase = TFTransfoXLModel(A ) UpperCAmelCase , UpperCAmelCase = model(A ).to_tuple() UpperCAmelCase = {"""input_ids""": input_ids_a, """mems""": mems_a} UpperCAmelCase , UpperCAmelCase = model(A ).to_tuple() self.parent.assertEqual(hidden_states_a.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(hidden_states_a.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,) self.parent.assertListEqual( [mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,) def _UpperCamelCase ( self ,A ,A ,A ,A ): UpperCAmelCase = TFTransfoXLLMHeadModel(A ) UpperCAmelCase , UpperCAmelCase = model(A ).to_tuple() UpperCAmelCase = {"""input_ids""": input_ids_a, """labels""": lm_labels} UpperCAmelCase , UpperCAmelCase = model(A ).to_tuple() UpperCAmelCase , UpperCAmelCase = model([input_ids_a, mems_a] ).to_tuple() UpperCAmelCase = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels} UpperCAmelCase , UpperCAmelCase = model(A ).to_tuple() self.parent.assertEqual(lm_logits_a.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,) self.parent.assertEqual(lm_logits_a.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,) def _UpperCamelCase ( self ,A ,A ,A ,A ): UpperCAmelCase = TFTransfoXLForSequenceClassification(A ) UpperCAmelCase = model(A ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def _UpperCamelCase ( self ): UpperCAmelCase = self.prepare_config_and_inputs() ((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) = config_and_inputs UpperCAmelCase = {"""input_ids""": input_ids_a} return config, inputs_dict @require_tf class lowerCamelCase__ ( snake_case , snake_case , unittest.TestCase ): SCREAMING_SNAKE_CASE = ( (TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else () ) SCREAMING_SNAKE_CASE = () if is_tf_available() else () SCREAMING_SNAKE_CASE = ( { '''feature-extraction''': TFTransfoXLModel, '''text-classification''': TFTransfoXLForSequenceClassification, '''text-generation''': TFTransfoXLLMHeadModel, '''zero-shot''': TFTransfoXLForSequenceClassification, } if is_tf_available() else {} ) # TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False def _UpperCamelCase ( self ,A ,A ,A ,A ,A ): if pipeline_test_casse_name == "TextGenerationPipelineTests": # Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`. # `TransfoXLConfig` was never used in pipeline tests: cannot create a simple # tokenizer. return True return False def _UpperCamelCase ( self ): UpperCAmelCase = TFTransfoXLModelTester(self ) UpperCAmelCase = ConfigTester(self ,config_class=A ,d_embed=37 ) def _UpperCamelCase ( self ): self.config_tester.run_common_tests() def _UpperCamelCase ( self ): self.model_tester.set_seed() UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_model(*A ) def _UpperCamelCase ( self ): self.model_tester.set_seed() UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_lm_head(*A ) def _UpperCamelCase ( self ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*A ) def _UpperCamelCase ( self ): UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase = [TFTransfoXLForSequenceClassification] for model_class in self.all_model_classes: UpperCAmelCase = model_class(A ) assert isinstance(model.get_input_embeddings() ,tf.keras.layers.Layer ) if model_class in list_other_models_with_output_ebd: UpperCAmelCase = model.get_output_embeddings() assert isinstance(A ,tf.keras.layers.Layer ) UpperCAmelCase = model.get_bias() assert name is None else: UpperCAmelCase = model.get_output_embeddings() assert x is None UpperCAmelCase = model.get_bias() assert name is None def _UpperCamelCase ( self ): # TODO JP: Make TransfoXL XLA compliant pass @slow def _UpperCamelCase ( self ): for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase = TFTransfoXLModel.from_pretrained(A ) self.assertIsNotNone(A ) @unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""" ) def _UpperCamelCase ( self ): pass @require_tf class lowerCamelCase__ ( unittest.TestCase ): @unittest.skip("""Skip test until #12651 is resolved.""" ) @slow def _UpperCamelCase ( self ): UpperCAmelCase = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" ) # fmt: off UpperCAmelCase = tf.convert_to_tensor([[33,1_297,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,22,1_706,17,20_098,5,3_215,21,37,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,6_224,831,16_002,2,8,603,78_967,29_546,23,803,20,25,416,5,8,232,4,277,6,1_855,4_601,3,29_546,54,8,3_609,5,57_211,49,4,1,277,18,8,1_755,15_691,3,341,25,416,693,42_573,71,17,401,94,31,17_919,2,29_546,7_873,18,1,435,23,11_011,755,5,5_167,3,7_983,98,84,2,29_546,3_267,8,3_609,4,1,4_865,1_075,2,6_087,71,6,346,8,5_854,3,29_546,824,1_400,1_868,2,19,160,2,311,8,5_496,2,20_920,17,25,15_097,3,24,24,0]] ,dtype=tf.intaa ) # noqa: E231 # fmt: on # In 1991 , the remains of Russian Tsar Nicholas II and his family # ( except for Alexei and Maria ) are discovered . # The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the # remainder of the story . 1883 Western Siberia , # a young Grigori Rasputin is asked by his father and a group of men to perform magic . # Rasputin has a vision and denounces one of the men as a horse thief . Although his # father initially slaps him for making such an accusation , Rasputin watches as the # man is chased outside and beaten . Twenty years later , Rasputin sees a vision of # the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous , # with people , even a bishop , begging for his blessing . <eod> </s> <eos> # fmt: off UpperCAmelCase = [33,1_297,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,22,1_706,17,20_098,5,3_215,21,37,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,6_224,831,16_002,2,8,603,78_967,29_546,23,803,20,25,416,5,8,232,4,277,6,1_855,4_601,3,29_546,54,8,3_609,5,57_211,49,4,1,277,18,8,1_755,15_691,3,341,25,416,693,42_573,71,17,401,94,31,17_919,2,29_546,7_873,18,1,435,23,11_011,755,5,5_167,3,7_983,98,84,2,29_546,3_267,8,3_609,4,1,4_865,1_075,2,6_087,71,6,346,8,5_854,3,29_546,824,1_400,1_868,2,19,160,2,311,8,5_496,2,20_920,17,25,15_097,3,24,24,0,33,1,1_857,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,28,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,0] # noqa: E231 # fmt: on # In 1991, the remains of Russian Tsar Nicholas II and his family ( # except for Alexei and Maria ) are discovered. The voice of young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story. # 1883 Western Siberia, a young Grigori Rasputin is asked by his father # and a group of men to perform magic. Rasputin has a vision and # denounces one of the men as a horse thief. Although his father initially # slaps him for making such an accusation, Rasputin watches as the man # is chased outside and beaten. Twenty years later, Rasputin sees a vision # of the Virgin Mary, prompting him to become a priest. # Rasputin quickly becomes famous, with people, even a bishop, begging for # his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar # Nicholas II and his family were discovered. The voice of <unk> young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos> UpperCAmelCase = model.generate(A ,max_length=200 ,do_sample=A ) self.assertListEqual(output_ids[0].numpy().tolist() ,A )
74
0
"""simple docstring""" import os from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen, xsplitext from ..table import array_cast from ..utils.py_utils import no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: from .features import FeatureType _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = False, False, False @dataclass class lowerCamelCase__ : SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = None # Automatically constructed SCREAMING_SNAKE_CASE = '''dict''' SCREAMING_SNAKE_CASE = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} ) SCREAMING_SNAKE_CASE = field(default='''Audio''' , init=snake_case , repr=snake_case ) def __call__( self ): return self.pa_type def _UpperCamelCase ( self ,A ): try: import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files. except ImportError as err: raise ImportError("""To support encoding audio data, please install 'soundfile'.""" ) from err if isinstance(A ,A ): return {"bytes": None, "path": value} elif isinstance(A ,A ): return {"bytes": value, "path": None} elif "array" in value: # convert the audio array to wav bytes UpperCAmelCase = BytesIO() sf.write(A ,value["""array"""] ,value["""sampling_rate"""] ,format="""wav""" ) return {"bytes": buffer.getvalue(), "path": None} elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ): # we set "bytes": None to not duplicate the data if they're already available locally if value["path"].endswith("""pcm""" ): # "PCM" only has raw audio bytes if value.get("""sampling_rate""" ) is None: # At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate raise KeyError("""To use PCM files, please specify a 'sampling_rate' in Audio object""" ) if value.get("""bytes""" ): # If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!) UpperCAmelCase = np.frombuffer(value["""bytes"""] ,dtype=np.intaa ).astype(np.floataa ) / 32_767 else: UpperCAmelCase = np.memmap(value["""path"""] ,dtype="""h""" ,mode="""r""" ).astype(np.floataa ) / 32_767 UpperCAmelCase = BytesIO(bytes() ) sf.write(A ,A ,value["""sampling_rate"""] ,format="""wav""" ) return {"bytes": buffer.getvalue(), "path": None} else: return {"bytes": None, "path": value.get("""path""" )} elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None: # store the audio bytes, and path is used to infer the audio format using the file extension return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )} else: raise ValueError( F'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' ) def _UpperCamelCase ( self ,A ,A = None ): if not self.decode: raise RuntimeError("""Decoding is disabled for this feature. Please use Audio(decode=True) instead.""" ) UpperCAmelCase , UpperCAmelCase = (value["""path"""], BytesIO(value["""bytes"""] )) if value["""bytes"""] is not None else (value["""path"""], None) if path is None and file is None: raise ValueError(F'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' ) try: import librosa import soundfile as sf except ImportError as err: raise ImportError("""To support decoding audio files, please install 'librosa' and 'soundfile'.""" ) from err UpperCAmelCase = xsplitext(A )[1][1:].lower() if path is not None else None if not config.IS_OPUS_SUPPORTED and audio_format == "opus": raise RuntimeError( """Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, """ """You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ ) elif not config.IS_MP3_SUPPORTED and audio_format == "mp3": raise RuntimeError( """Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, """ """You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ ) if file is None: UpperCAmelCase = token_per_repo_id or {} UpperCAmelCase = path.split("""::""" )[-1] try: UpperCAmelCase = string_to_dict(A ,config.HUB_DATASETS_URL )["""repo_id"""] UpperCAmelCase = token_per_repo_id[repo_id] except (ValueError, KeyError): UpperCAmelCase = None with xopen(A ,"""rb""" ,use_auth_token=A ) as f: UpperCAmelCase , UpperCAmelCase = sf.read(A ) else: UpperCAmelCase , UpperCAmelCase = sf.read(A ) UpperCAmelCase = array.T if self.mono: UpperCAmelCase = librosa.to_mono(A ) if self.sampling_rate and self.sampling_rate != sampling_rate: UpperCAmelCase = librosa.resample(A ,orig_sr=A ,target_sr=self.sampling_rate ) UpperCAmelCase = self.sampling_rate return {"path": path, "array": array, "sampling_rate": sampling_rate} def _UpperCamelCase ( self ): from .features import Value if self.decode: raise ValueError("""Cannot flatten a decoded Audio feature.""" ) return { "bytes": Value("""binary""" ), "path": Value("""string""" ), } def _UpperCamelCase ( self ,A ): if pa.types.is_string(storage.type ): UpperCAmelCase = pa.array([None] * len(A ) ,type=pa.binary() ) UpperCAmelCase = pa.StructArray.from_arrays([bytes_array, storage] ,["""bytes""", """path"""] ,mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): UpperCAmelCase = pa.array([None] * len(A ) ,type=pa.string() ) UpperCAmelCase = pa.StructArray.from_arrays([storage, path_array] ,["""bytes""", """path"""] ,mask=storage.is_null() ) elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("""array""" ): UpperCAmelCase = pa.array([Audio().encode_example(A ) if x is not None else None for x in storage.to_pylist()] ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index("""bytes""" ) >= 0: UpperCAmelCase = storage.field("""bytes""" ) else: UpperCAmelCase = pa.array([None] * len(A ) ,type=pa.binary() ) if storage.type.get_field_index("""path""" ) >= 0: UpperCAmelCase = storage.field("""path""" ) else: UpperCAmelCase = pa.array([None] * len(A ) ,type=pa.string() ) UpperCAmelCase = pa.StructArray.from_arrays([bytes_array, path_array] ,["""bytes""", """path"""] ,mask=storage.is_null() ) return array_cast(A ,self.pa_type ) def _UpperCamelCase ( self ,A ): @no_op_if_value_is_null def path_to_bytes(A ): with xopen(A ,"""rb""" ) as f: UpperCAmelCase = f.read() return bytes_ UpperCAmelCase = pa.array( [ (path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None for x in storage.to_pylist() ] ,type=pa.binary() ,) UpperCAmelCase = pa.array( [os.path.basename(A ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] ,type=pa.string() ,) UpperCAmelCase = pa.StructArray.from_arrays([bytes_array, path_array] ,["""bytes""", """path"""] ,mask=bytes_array.is_null() ) return array_cast(A ,self.pa_type )
708
"""simple docstring""" from math import sqrt def _a ( _snake_case = 100_0000 ): """simple docstring""" UpperCAmelCase = 0 UpperCAmelCase = 0 UpperCAmelCase = 42 while num_cuboids <= limit: max_cuboid_size += 1 for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ): if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer(): num_cuboids += ( min(_snake_case , sum_shortest_sides // 2 ) - max(1 , sum_shortest_sides - max_cuboid_size ) + 1 ) return max_cuboid_size if __name__ == "__main__": print(F"""{solution() = }""")
74
0
"""simple docstring""" import math def _a ( _snake_case ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_snake_case ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def _a ( _snake_case = 0.1 ): """simple docstring""" UpperCAmelCase = 3 UpperCAmelCase = 3 while primes / (2 * j - 1) >= ratio: for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ): primes += is_prime(_snake_case ) j += 2 return j if __name__ == "__main__": import doctest doctest.testmod()
709
"""simple docstring""" import argparse import re import torch from CLAP import create_model from transformers import AutoFeatureExtractor, ClapConfig, ClapModel _UpperCamelCase = { """text_branch""": """text_model""", """audio_branch""": """audio_model.audio_encoder""", """attn""": """attention.self""", """self.proj""": """output.dense""", """attention.self_mask""": """attn_mask""", """mlp.fc1""": """intermediate.dense""", """mlp.fc2""": """output.dense""", """norm1""": """layernorm_before""", """norm2""": """layernorm_after""", """bn0""": """batch_norm""", } _UpperCamelCase = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""") def _a ( _snake_case , _snake_case=False ): """simple docstring""" UpperCAmelCase , UpperCAmelCase = create_model( """HTSAT-tiny""" , """roberta""" , _snake_case , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=_snake_case , fusion_type="""aff_2d""" if enable_fusion else None , ) return model, model_cfg def _a ( _snake_case ): """simple docstring""" UpperCAmelCase = {} UpperCAmelCase = R""".*sequential.(\d+).*""" UpperCAmelCase = R""".*_projection.(\d+).*""" for key, value in state_dict.items(): # check if any key needs to be modified for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: UpperCAmelCase = key.replace(_snake_case , _snake_case ) if re.match(_snake_case , _snake_case ): # replace sequential layers with list UpperCAmelCase = re.match(_snake_case , _snake_case ).group(1 ) UpperCAmelCase = key.replace(F'''sequential.{sequential_layer}.''' , F'''layers.{int(_snake_case )//3}.linear.''' ) elif re.match(_snake_case , _snake_case ): UpperCAmelCase = int(re.match(_snake_case , _snake_case ).group(1 ) ) # Because in CLAP they use `nn.Sequential`... UpperCAmelCase = 1 if projecton_layer == 0 else 2 UpperCAmelCase = key.replace(F'''_projection.{projecton_layer}.''' , F'''_projection.linear{transformers_projection_layer}.''' ) if "audio" and "qkv" in key: # split qkv into query key and value UpperCAmelCase = value UpperCAmelCase = mixed_qkv.size(0 ) // 3 UpperCAmelCase = mixed_qkv[:qkv_dim] UpperCAmelCase = mixed_qkv[qkv_dim : qkv_dim * 2] UpperCAmelCase = mixed_qkv[qkv_dim * 2 :] UpperCAmelCase = query_layer UpperCAmelCase = key_layer UpperCAmelCase = value_layer else: UpperCAmelCase = value return model_state_dict def _a ( _snake_case , _snake_case , _snake_case , _snake_case=False ): """simple docstring""" UpperCAmelCase , UpperCAmelCase = init_clap(_snake_case , enable_fusion=_snake_case ) clap_model.eval() UpperCAmelCase = clap_model.state_dict() UpperCAmelCase = rename_state_dict(_snake_case ) UpperCAmelCase = ClapConfig() UpperCAmelCase = enable_fusion UpperCAmelCase = ClapModel(_snake_case ) # ignore the spectrogram embedding layer model.load_state_dict(_snake_case , strict=_snake_case ) model.save_pretrained(_snake_case ) transformers_config.save_pretrained(_snake_case ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""") _UpperCamelCase = parser.parse_args() convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
74
0
"""simple docstring""" import os from collections import deque import torch from torch.utils.data import Dataset class lowerCamelCase__ ( snake_case ): def __init__( self ,A="" ,A="train" ): assert os.path.isdir(A ) UpperCAmelCase = [] UpperCAmelCase = os.listdir(A ) for story_filename in story_filenames_list: if "summary" in story_filename: continue UpperCAmelCase = os.path.join(A ,A ) if not os.path.isfile(A ): continue self.documents.append(A ) def __len__( self ): return len(self.documents ) def __getitem__( self ,A ): UpperCAmelCase = self.documents[idx] UpperCAmelCase = document_path.split("""/""" )[-1] with open(A ,encoding="""utf-8""" ) as source: UpperCAmelCase = source.read() UpperCAmelCase , UpperCAmelCase = process_story(A ) return document_name, story_lines, summary_lines def _a ( _snake_case ): """simple docstring""" UpperCAmelCase = list(filter(lambda _snake_case : len(_snake_case ) != 0 , [line.strip() for line in raw_story.split("""\n""" )] ) ) # for some unknown reason some lines miss a period, add it UpperCAmelCase = [_add_missing_period(_snake_case ) for line in nonempty_lines] # gather article lines UpperCAmelCase = [] UpperCAmelCase = deque(_snake_case ) while True: try: UpperCAmelCase = lines.popleft() if element.startswith("""@highlight""" ): break story_lines.append(_snake_case ) except IndexError: # if "@highlight" is absent from the file we pop # all elements until there is None, raising an exception. return story_lines, [] # gather summary lines UpperCAmelCase = list(filter(lambda _snake_case : not t.startswith("""@highlight""" ) , _snake_case ) ) return story_lines, summary_lines def _a ( _snake_case ): """simple docstring""" UpperCAmelCase = [""".""", """!""", """?""", """...""", """'""", """`""", """\"""", """\u2019""", """\u2019""", """)"""] if line.startswith("""@highlight""" ): return line if line[-1] in END_TOKENS: return line return line + "." def _a ( _snake_case , _snake_case , _snake_case ): """simple docstring""" if len(_snake_case ) > block_size: return sequence[:block_size] else: sequence.extend([pad_token_id] * (block_size - len(_snake_case )) ) return sequence def _a ( _snake_case , _snake_case ): """simple docstring""" UpperCAmelCase = torch.ones_like(_snake_case ) UpperCAmelCase = sequence == pad_token_id UpperCAmelCase = 0 return mask def _a ( _snake_case , _snake_case , _snake_case ): """simple docstring""" UpperCAmelCase = [tokenizer.encode(_snake_case ) for line in story_lines] UpperCAmelCase = [token for sentence in story_lines_token_ids for token in sentence] UpperCAmelCase = [tokenizer.encode(_snake_case ) for line in summary_lines] UpperCAmelCase = [token for sentence in summary_lines_token_ids for token in sentence] return story_token_ids, summary_token_ids def _a ( _snake_case , _snake_case ): """simple docstring""" UpperCAmelCase = [] for sequence in batch: UpperCAmelCase = -1 UpperCAmelCase = [] for s in sequence: if s == separator_token_id: sentence_num += 1 embeddings.append(sentence_num % 2 ) batch_embeddings.append(_snake_case ) return torch.tensor(_snake_case )
710
"""simple docstring""" # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. _UpperCamelCase = abspath(join(dirname(dirname(__file__)), """src""")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="""ignore""", category=FutureWarning) def _a ( _snake_case ): """simple docstring""" from diffusers.utils.testing_utils import pytest_addoption_shared pytest_addoption_shared(_snake_case ) def _a ( _snake_case ): """simple docstring""" from diffusers.utils.testing_utils import pytest_terminal_summary_main UpperCAmelCase = terminalreporter.config.getoption("""--make-reports""" ) if make_reports: pytest_terminal_summary_main(_snake_case , id=_snake_case )
74
0
"""simple docstring""" from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ...models import AutoencoderKL, UNetaDConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class lowerCamelCase__ ( snake_case ): SCREAMING_SNAKE_CASE = ['''vqvae'''] def __init__( self ,A ,A ,A ,A ,): super().__init__() self.register_modules(unet=A ,scheduler=A ,mel=A ,vqvae=A ) def _UpperCamelCase ( self ): return 50 if isinstance(self.scheduler ,A ) else 1_000 @torch.no_grad() def __call__( self ,A = 1 ,A = None ,A = None ,A = 0 ,A = 0 ,A = None ,A = None ,A = 0 ,A = 0 ,A = None ,A = 0 ,A = None ,A = None ,A=True ,): UpperCAmelCase = steps or self.get_default_steps() self.scheduler.set_timesteps(A ) UpperCAmelCase = step_generator or generator # For backwards compatibility if type(self.unet.config.sample_size ) == int: UpperCAmelCase = (self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: UpperCAmelCase = randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ) ,generator=A ,device=self.device ,) UpperCAmelCase = noise UpperCAmelCase = None if audio_file is not None or raw_audio is not None: self.mel.load_audio(A ,A ) UpperCAmelCase = self.mel.audio_slice_to_image(A ) UpperCAmelCase = np.frombuffer(input_image.tobytes() ,dtype="""uint8""" ).reshape( (input_image.height, input_image.width) ) UpperCAmelCase = (input_image / 255) * 2 - 1 UpperCAmelCase = torch.tensor(input_image[np.newaxis, :, :] ,dtype=torch.float ).to(self.device ) if self.vqvae is not None: UpperCAmelCase = self.vqvae.encode(torch.unsqueeze(A ,0 ) ).latent_dist.sample( generator=A )[0] UpperCAmelCase = self.vqvae.config.scaling_factor * input_images if start_step > 0: UpperCAmelCase = self.scheduler.add_noise(A ,A ,self.scheduler.timesteps[start_step - 1] ) UpperCAmelCase = ( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) UpperCAmelCase = int(mask_start_secs * pixels_per_second ) UpperCAmelCase = int(mask_end_secs * pixels_per_second ) UpperCAmelCase = self.scheduler.add_noise(A ,A ,torch.tensor(self.scheduler.timesteps[start_step:] ) ) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ): if isinstance(self.unet ,A ): UpperCAmelCase = self.unet(A ,A ,A )["""sample"""] else: UpperCAmelCase = self.unet(A ,A )["""sample"""] if isinstance(self.scheduler ,A ): UpperCAmelCase = self.scheduler.step( model_output=A ,timestep=A ,sample=A ,eta=A ,generator=A ,)["""prev_sample"""] else: UpperCAmelCase = self.scheduler.step( model_output=A ,timestep=A ,sample=A ,generator=A ,)["""prev_sample"""] if mask is not None: if mask_start > 0: UpperCAmelCase = mask[:, step, :, :mask_start] if mask_end > 0: UpperCAmelCase = mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance UpperCAmelCase = 1 / self.vqvae.config.scaling_factor * images UpperCAmelCase = self.vqvae.decode(A )["""sample"""] UpperCAmelCase = (images / 2 + 0.5).clamp(0 ,1 ) UpperCAmelCase = images.cpu().permute(0 ,2 ,3 ,1 ).numpy() UpperCAmelCase = (images * 255).round().astype("""uint8""" ) UpperCAmelCase = list( (Image.fromarray(_[:, :, 0] ) for _ in images) if images.shape[3] == 1 else (Image.fromarray(A ,mode="""RGB""" ).convert("""L""" ) for _ in images) ) UpperCAmelCase = [self.mel.image_to_audio(A ) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(A )[:, np.newaxis, :] ) ,**ImagePipelineOutput(A ) ) @torch.no_grad() def _UpperCamelCase ( self ,A ,A = 50 ): assert isinstance(self.scheduler ,A ) self.scheduler.set_timesteps(A ) UpperCAmelCase = np.array( [np.frombuffer(image.tobytes() ,dtype="""uint8""" ).reshape((1, image.height, image.width) ) for image in images] ) UpperCAmelCase = (sample / 255) * 2 - 1 UpperCAmelCase = torch.Tensor(A ).to(self.device ) for t in self.progress_bar(torch.flip(self.scheduler.timesteps ,(0,) ) ): UpperCAmelCase = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps UpperCAmelCase = self.scheduler.alphas_cumprod[t] UpperCAmelCase = ( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) UpperCAmelCase = 1 - alpha_prod_t UpperCAmelCase = self.unet(A ,A )["""sample"""] UpperCAmelCase = (1 - alpha_prod_t_prev) ** 0.5 * model_output UpperCAmelCase = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) UpperCAmelCase = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def _UpperCamelCase ( A ,A ,A ): UpperCAmelCase = acos(torch.dot(torch.flatten(A ) ,torch.flatten(A ) ) / torch.norm(A ) / torch.norm(A ) ) return sin((1 - alpha) * theta ) * xa / sin(A ) + sin(alpha * theta ) * xa / sin(A )
711
"""simple docstring""" from __future__ import annotations from collections.abc import MutableSequence class lowerCamelCase__ : def __init__( self ,A ,A ): if len(A ) != degree + 1: raise ValueError( """The number of coefficients should be equal to the degree + 1.""" ) UpperCAmelCase = list(A ) UpperCAmelCase = degree def __add__( self ,A ): if self.degree > polynomial_a.degree: UpperCAmelCase = self.coefficients[:] for i in range(polynomial_a.degree + 1 ): coefficients[i] += polynomial_a.coefficients[i] return Polynomial(self.degree ,A ) else: UpperCAmelCase = polynomial_a.coefficients[:] for i in range(self.degree + 1 ): coefficients[i] += self.coefficients[i] return Polynomial(polynomial_a.degree ,A ) def __sub__( self ,A ): return self + polynomial_a * Polynomial(0 ,[-1] ) def __neg__( self ): return Polynomial(self.degree ,[-c for c in self.coefficients] ) def __mul__( self ,A ): UpperCAmelCase = [0] * (self.degree + polynomial_a.degree + 1) for i in range(self.degree + 1 ): for j in range(polynomial_a.degree + 1 ): coefficients[i + j] += ( self.coefficients[i] * polynomial_a.coefficients[j] ) return Polynomial(self.degree + polynomial_a.degree ,A ) def _UpperCamelCase ( self ,A ): UpperCAmelCase = 0 for i in range(self.degree + 1 ): result += self.coefficients[i] * (substitution**i) return result def __str__( self ): UpperCAmelCase = """""" for i in range(self.degree ,-1 ,-1 ): if self.coefficients[i] == 0: continue elif self.coefficients[i] > 0: if polynomial: polynomial += " + " else: polynomial += " - " if i == 0: polynomial += str(abs(self.coefficients[i] ) ) elif i == 1: polynomial += str(abs(self.coefficients[i] ) ) + "x" else: polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(A ) return polynomial def __repr__( self ): return self.__str__() def _UpperCamelCase ( self ): UpperCAmelCase = [0] * self.degree for i in range(self.degree ): UpperCAmelCase = self.coefficients[i + 1] * (i + 1) return Polynomial(self.degree - 1 ,A ) def _UpperCamelCase ( self ,A = 0 ): UpperCAmelCase = [0] * (self.degree + 2) UpperCAmelCase = constant for i in range(self.degree + 1 ): UpperCAmelCase = self.coefficients[i] / (i + 1) return Polynomial(self.degree + 1 ,A ) def __eq__( self ,A ): if not isinstance(A ,A ): return False if self.degree != polynomial_a.degree: return False for i in range(self.degree + 1 ): if self.coefficients[i] != polynomial_a.coefficients[i]: return False return True def __ne__( self ,A ): return not self.__eq__(A )
74
0
"""simple docstring""" from __future__ import annotations from collections.abc import MutableSequence class lowerCamelCase__ : def __init__( self ,A ,A ): if len(A ) != degree + 1: raise ValueError( """The number of coefficients should be equal to the degree + 1.""" ) UpperCAmelCase = list(A ) UpperCAmelCase = degree def __add__( self ,A ): if self.degree > polynomial_a.degree: UpperCAmelCase = self.coefficients[:] for i in range(polynomial_a.degree + 1 ): coefficients[i] += polynomial_a.coefficients[i] return Polynomial(self.degree ,A ) else: UpperCAmelCase = polynomial_a.coefficients[:] for i in range(self.degree + 1 ): coefficients[i] += self.coefficients[i] return Polynomial(polynomial_a.degree ,A ) def __sub__( self ,A ): return self + polynomial_a * Polynomial(0 ,[-1] ) def __neg__( self ): return Polynomial(self.degree ,[-c for c in self.coefficients] ) def __mul__( self ,A ): UpperCAmelCase = [0] * (self.degree + polynomial_a.degree + 1) for i in range(self.degree + 1 ): for j in range(polynomial_a.degree + 1 ): coefficients[i + j] += ( self.coefficients[i] * polynomial_a.coefficients[j] ) return Polynomial(self.degree + polynomial_a.degree ,A ) def _UpperCamelCase ( self ,A ): UpperCAmelCase = 0 for i in range(self.degree + 1 ): result += self.coefficients[i] * (substitution**i) return result def __str__( self ): UpperCAmelCase = """""" for i in range(self.degree ,-1 ,-1 ): if self.coefficients[i] == 0: continue elif self.coefficients[i] > 0: if polynomial: polynomial += " + " else: polynomial += " - " if i == 0: polynomial += str(abs(self.coefficients[i] ) ) elif i == 1: polynomial += str(abs(self.coefficients[i] ) ) + "x" else: polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(A ) return polynomial def __repr__( self ): return self.__str__() def _UpperCamelCase ( self ): UpperCAmelCase = [0] * self.degree for i in range(self.degree ): UpperCAmelCase = self.coefficients[i + 1] * (i + 1) return Polynomial(self.degree - 1 ,A ) def _UpperCamelCase ( self ,A = 0 ): UpperCAmelCase = [0] * (self.degree + 2) UpperCAmelCase = constant for i in range(self.degree + 1 ): UpperCAmelCase = self.coefficients[i] / (i + 1) return Polynomial(self.degree + 1 ,A ) def __eq__( self ,A ): if not isinstance(A ,A ): return False if self.degree != polynomial_a.degree: return False for i in range(self.degree + 1 ): if self.coefficients[i] != polynomial_a.coefficients[i]: return False return True def __ne__( self ,A ): return not self.__eq__(A )
712
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy as np import tensorflow as tf from transformers import ( TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST, FlaubertConfig, TFFlaubertForMultipleChoice, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForSequenceClassification, TFFlaubertForTokenClassification, TFFlaubertModel, TFFlaubertWithLMHeadModel, ) class lowerCamelCase__ : def __init__( self ,A ,): UpperCAmelCase = parent UpperCAmelCase = 13 UpperCAmelCase = 7 UpperCAmelCase = True UpperCAmelCase = True UpperCAmelCase = True UpperCAmelCase = True UpperCAmelCase = True UpperCAmelCase = False UpperCAmelCase = False UpperCAmelCase = False UpperCAmelCase = 2 UpperCAmelCase = 99 UpperCAmelCase = 0 UpperCAmelCase = 32 UpperCAmelCase = 2 UpperCAmelCase = 4 UpperCAmelCase = 0.1 UpperCAmelCase = 0.1 UpperCAmelCase = 512 UpperCAmelCase = 16 UpperCAmelCase = 2 UpperCAmelCase = 0.02 UpperCAmelCase = 3 UpperCAmelCase = 4 UpperCAmelCase = """last""" UpperCAmelCase = True UpperCAmelCase = None UpperCAmelCase = 0 def _UpperCamelCase ( self ): UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ,dtype=tf.floataa ) UpperCAmelCase = None if self.use_input_lengths: UpperCAmelCase = ( ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length UpperCAmelCase = None if self.use_token_type_ids: UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs ) UpperCAmelCase = None UpperCAmelCase = None UpperCAmelCase = None if self.use_labels: UpperCAmelCase = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) UpperCAmelCase = ids_tensor([self.batch_size] ,2 ,dtype=tf.floataa ) UpperCAmelCase = ids_tensor([self.batch_size] ,self.num_choices ) UpperCAmelCase = FlaubertConfig( vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,bos_token_id=self.bos_token_id ,) return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,): UpperCAmelCase = TFFlaubertModel(config=A ) UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids} UpperCAmelCase = model(A ) UpperCAmelCase = [input_ids, input_mask] UpperCAmelCase = model(A ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,): UpperCAmelCase = TFFlaubertWithLMHeadModel(A ) UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids} UpperCAmelCase = model(A ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,): UpperCAmelCase = TFFlaubertForQuestionAnsweringSimple(A ) UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths} UpperCAmelCase = model(A ) self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,): UpperCAmelCase = TFFlaubertForSequenceClassification(A ) UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths} UpperCAmelCase = model(A ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,): UpperCAmelCase = self.num_labels UpperCAmelCase = TFFlaubertForTokenClassification(config=A ) UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} UpperCAmelCase = model(A ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,): UpperCAmelCase = self.num_choices UpperCAmelCase = TFFlaubertForMultipleChoice(config=A ) UpperCAmelCase = tf.tile(tf.expand_dims(A ,1 ) ,(1, self.num_choices, 1) ) UpperCAmelCase = tf.tile(tf.expand_dims(A ,1 ) ,(1, self.num_choices, 1) ) UpperCAmelCase = tf.tile(tf.expand_dims(A ,1 ) ,(1, self.num_choices, 1) ) UpperCAmelCase = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, """token_type_ids""": multiple_choice_token_type_ids, } UpperCAmelCase = model(A ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) ) def _UpperCamelCase ( self ): UpperCAmelCase = self.prepare_config_and_inputs() ( ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ) = config_and_inputs UpperCAmelCase = { """input_ids""": input_ids, """token_type_ids""": token_type_ids, """langs""": token_type_ids, """lengths""": input_lengths, } return config, inputs_dict @require_tf class lowerCamelCase__ ( snake_case , snake_case , unittest.TestCase ): SCREAMING_SNAKE_CASE = ( ( TFFlaubertModel, TFFlaubertWithLMHeadModel, TFFlaubertForSequenceClassification, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForTokenClassification, TFFlaubertForMultipleChoice, ) if is_tf_available() else () ) SCREAMING_SNAKE_CASE = ( (TFFlaubertWithLMHeadModel,) if is_tf_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable SCREAMING_SNAKE_CASE = ( { '''feature-extraction''': TFFlaubertModel, '''fill-mask''': TFFlaubertWithLMHeadModel, '''question-answering''': TFFlaubertForQuestionAnsweringSimple, '''text-classification''': TFFlaubertForSequenceClassification, '''token-classification''': TFFlaubertForTokenClassification, '''zero-shot''': TFFlaubertForSequenceClassification, } if is_tf_available() else {} ) SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False def _UpperCamelCase ( self ,A ,A ,A ,A ,A ): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("""Fast""" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def _UpperCamelCase ( self ): UpperCAmelCase = TFFlaubertModelTester(self ) UpperCAmelCase = ConfigTester(self ,config_class=A ,emb_dim=37 ) def _UpperCamelCase ( self ): self.config_tester.run_common_tests() def _UpperCamelCase ( self ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*A ) def _UpperCamelCase ( self ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*A ) def _UpperCamelCase ( self ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*A ) def _UpperCamelCase ( self ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*A ) def _UpperCamelCase ( self ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_token_classification(*A ) def _UpperCamelCase ( self ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_multiple_choice(*A ) @slow def _UpperCamelCase ( self ): for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase = TFFlaubertModel.from_pretrained(A ) self.assertIsNotNone(A ) @require_tf @require_sentencepiece @require_tokenizers class lowerCamelCase__ ( unittest.TestCase ): @slow def _UpperCamelCase ( self ): UpperCAmelCase = TFFlaubertModel.from_pretrained("""jplu/tf-flaubert-small-cased""" ) UpperCAmelCase = tf.convert_to_tensor( [[0, 158, 735, 2_592, 1_424, 6_727, 82, 1]] ,dtype=tf.intaa ,) # "J'aime flaubert !" UpperCAmelCase = model(A )[0] UpperCAmelCase = tf.TensorShape((1, 8, 512) ) self.assertEqual(output.shape ,A ) # compare the actual values for a slice. UpperCAmelCase = tf.convert_to_tensor( [ [ [-1.8768773, -1.566555, 0.27072418], [-1.6920038, -0.5873505, 1.9329599], [-2.9563985, -1.6993835, 1.7972052], ] ] ,dtype=tf.floataa ,) self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
74
0
"""simple docstring""" import pickle import numpy as np from matplotlib import pyplot as plt class lowerCamelCase__ : def __init__( self ,A ,A ,A ,A ,A ,A=0.2 ,A=0.2 ): UpperCAmelCase = bp_numa UpperCAmelCase = bp_numa UpperCAmelCase = bp_numa UpperCAmelCase = conva_get[:2] UpperCAmelCase = conva_get[2] UpperCAmelCase = size_pa UpperCAmelCase = rate_w UpperCAmelCase = rate_t UpperCAmelCase = [ np.mat(-1 * np.random.rand(self.conva[0] ,self.conva[0] ) + 0.5 ) for i in range(self.conva[1] ) ] UpperCAmelCase = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 ) UpperCAmelCase = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 ) UpperCAmelCase = -2 * np.random.rand(self.conva[1] ) + 1 UpperCAmelCase = -2 * np.random.rand(self.num_bpa ) + 1 UpperCAmelCase = -2 * np.random.rand(self.num_bpa ) + 1 def _UpperCamelCase ( self ,A ): # save model dict with pickle UpperCAmelCase = { """num_bp1""": self.num_bpa, """num_bp2""": self.num_bpa, """num_bp3""": self.num_bpa, """conv1""": self.conva, """step_conv1""": self.step_conva, """size_pooling1""": self.size_poolinga, """rate_weight""": self.rate_weight, """rate_thre""": self.rate_thre, """w_conv1""": self.w_conva, """wkj""": self.wkj, """vji""": self.vji, """thre_conv1""": self.thre_conva, """thre_bp2""": self.thre_bpa, """thre_bp3""": self.thre_bpa, } with open(A ,"""wb""" ) as f: pickle.dump(A ,A ) print(F'''Model saved: {save_path}''' ) @classmethod def _UpperCamelCase ( cls ,A ): # read saved model with open(A ,"""rb""" ) as f: UpperCAmelCase = pickle.load(A ) # noqa: S301 UpperCAmelCase = model_dic.get("""conv1""" ) conv_get.append(model_dic.get("""step_conv1""" ) ) UpperCAmelCase = model_dic.get("""size_pooling1""" ) UpperCAmelCase = model_dic.get("""num_bp1""" ) UpperCAmelCase = model_dic.get("""num_bp2""" ) UpperCAmelCase = model_dic.get("""num_bp3""" ) UpperCAmelCase = model_dic.get("""rate_weight""" ) UpperCAmelCase = model_dic.get("""rate_thre""" ) # create model instance UpperCAmelCase = CNN(A ,A ,A ,A ,A ,A ,A ) # modify model parameter UpperCAmelCase = model_dic.get("""w_conv1""" ) UpperCAmelCase = model_dic.get("""wkj""" ) UpperCAmelCase = model_dic.get("""vji""" ) UpperCAmelCase = model_dic.get("""thre_conv1""" ) UpperCAmelCase = model_dic.get("""thre_bp2""" ) UpperCAmelCase = model_dic.get("""thre_bp3""" ) return conv_ins def _UpperCamelCase ( self ,A ): return 1 / (1 + np.exp(-1 * x )) def _UpperCamelCase ( self ,A ): return round(A ,3 ) def _UpperCamelCase ( self ,A ,A ,A ,A ,A ): # convolution process UpperCAmelCase = convs[0] UpperCAmelCase = convs[1] UpperCAmelCase = np.shape(A )[0] # get the data slice of original image data, data_focus UpperCAmelCase = [] for i_focus in range(0 ,size_data - size_conv + 1 ,A ): for j_focus in range(0 ,size_data - size_conv + 1 ,A ): UpperCAmelCase = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(A ) # calculate the feature map of every single kernel, and saved as list of matrix UpperCAmelCase = [] UpperCAmelCase = int((size_data - size_conv) / conv_step + 1 ) for i_map in range(A ): UpperCAmelCase = [] for i_focus in range(len(A ) ): UpperCAmelCase = ( np.sum(np.multiply(data_focus[i_focus] ,w_convs[i_map] ) ) - thre_convs[i_map] ) featuremap.append(self.sig(A ) ) UpperCAmelCase = np.asmatrix(A ).reshape( A ,A ) data_featuremap.append(A ) # expanding the data slice to One dimenssion UpperCAmelCase = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(A ) ) UpperCAmelCase = np.asarray(A ) return focus_list, data_featuremap def _UpperCamelCase ( self ,A ,A ,A="average_pool" ): # pooling process UpperCAmelCase = len(featuremaps[0] ) UpperCAmelCase = int(size_map / size_pooling ) UpperCAmelCase = [] for i_map in range(len(A ) ): UpperCAmelCase = featuremaps[i_map] UpperCAmelCase = [] for i_focus in range(0 ,A ,A ): for j_focus in range(0 ,A ,A ): UpperCAmelCase = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(A ) ) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(A ) ) UpperCAmelCase = np.asmatrix(A ).reshape(A ,A ) featuremap_pooled.append(A ) return featuremap_pooled def _UpperCamelCase ( self ,A ): # expanding three dimension data to one dimension list UpperCAmelCase = [] for i in range(len(A ) ): UpperCAmelCase = np.shape(data[i] ) UpperCAmelCase = data[i].reshape(1 ,shapes[0] * shapes[1] ) UpperCAmelCase = data_listed.getA().tolist()[0] data_expanded.extend(A ) UpperCAmelCase = np.asarray(A ) return data_expanded def _UpperCamelCase ( self ,A ): # expanding matrix to one dimension list UpperCAmelCase = np.asarray(A ) UpperCAmelCase = np.shape(A ) UpperCAmelCase = data_mat.reshape(1 ,shapes[0] * shapes[1] ) return data_expanded def _UpperCamelCase ( self ,A ,A ,A ,A ,A ): UpperCAmelCase = [] UpperCAmelCase = 0 for i_map in range(A ): UpperCAmelCase = np.ones((size_map, size_map) ) for i in range(0 ,A ,A ): for j in range(0 ,A ,A ): UpperCAmelCase = pd_pool[ i_pool ] UpperCAmelCase = i_pool + 1 UpperCAmelCase = np.multiply( A ,np.multiply(out_map[i_map] ,(1 - out_map[i_map]) ) ) pd_all.append(A ) return pd_all def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A=bool ): # model traning print("""----------------------Start Training-------------------------""" ) print((""" - - Shape: Train_Data """, np.shape(A )) ) print((""" - - Shape: Teach_Data """, np.shape(A )) ) UpperCAmelCase = 0 UpperCAmelCase = [] UpperCAmelCase = 10_000 while rp < n_repeat and mse >= error_accuracy: UpperCAmelCase = 0 print(F'''-------------Learning Time {rp}--------------''' ) for p in range(len(A ) ): # print('------------Learning Image: %d--------------'%p) UpperCAmelCase = np.asmatrix(datas_train[p] ) UpperCAmelCase = np.asarray(datas_teach[p] ) UpperCAmelCase , UpperCAmelCase = self.convolute( A ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,) UpperCAmelCase = self.pooling(A ,self.size_poolinga ) UpperCAmelCase = np.shape(A ) UpperCAmelCase = self._expand(A ) UpperCAmelCase = data_bp_input UpperCAmelCase = np.dot(A ,self.vji.T ) - self.thre_bpa UpperCAmelCase = self.sig(A ) UpperCAmelCase = np.dot(A ,self.wkj.T ) - self.thre_bpa UpperCAmelCase = self.sig(A ) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- UpperCAmelCase = np.multiply( (data_teach - bp_outa) ,np.multiply(A ,(1 - bp_outa) ) ) UpperCAmelCase = np.multiply( np.dot(A ,self.wkj ) ,np.multiply(A ,(1 - bp_outa) ) ) UpperCAmelCase = np.dot(A ,self.vji ) UpperCAmelCase = pd_i_all / (self.size_poolinga * self.size_poolinga) UpperCAmelCase = pd_conva_pooled.T.getA().tolist() UpperCAmelCase = self._calculate_gradient_from_pool( A ,A ,shape_featuremapa[0] ,shape_featuremapa[1] ,self.size_poolinga ,) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1] ): UpperCAmelCase = self._expand_mat(pd_conva_all[k_conv] ) UpperCAmelCase = self.rate_weight * np.dot(A ,A ) UpperCAmelCase = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0]) ) UpperCAmelCase = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv] ) * self.rate_thre ) # all connected layer UpperCAmelCase = self.wkj + pd_k_all.T * bp_outa * self.rate_weight UpperCAmelCase = self.vji + pd_j_all.T * bp_outa * self.rate_weight UpperCAmelCase = self.thre_bpa - pd_k_all * self.rate_thre UpperCAmelCase = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image UpperCAmelCase = np.sum(abs(data_teach - bp_outa ) ) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) UpperCAmelCase = rp + 1 UpperCAmelCase = error_count / patterns all_mse.append(A ) def draw_error(): UpperCAmelCase = [error_accuracy for i in range(int(n_repeat * 1.2 ) )] plt.plot(A ,"""+-""" ) plt.plot(A ,"""r--""" ) plt.xlabel("""Learning Times""" ) plt.ylabel("""All_mse""" ) plt.grid(A ,alpha=0.5 ) plt.show() print("""------------------Training Complished---------------------""" ) print((""" - - Training epoch: """, rp, F''' - - Mse: {mse:.6f}''') ) if draw_e: draw_error() return mse def _UpperCamelCase ( self ,A ): # model predict UpperCAmelCase = [] print("""-------------------Start Testing-------------------------""" ) print((""" - - Shape: Test_Data """, np.shape(A )) ) for p in range(len(A ) ): UpperCAmelCase = np.asmatrix(datas_test[p] ) UpperCAmelCase , UpperCAmelCase = self.convolute( A ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,) UpperCAmelCase = self.pooling(A ,self.size_poolinga ) UpperCAmelCase = self._expand(A ) UpperCAmelCase = data_bp_input UpperCAmelCase = bp_outa * self.vji.T - self.thre_bpa UpperCAmelCase = self.sig(A ) UpperCAmelCase = bp_outa * self.wkj.T - self.thre_bpa UpperCAmelCase = self.sig(A ) produce_out.extend(bp_outa.getA().tolist() ) UpperCAmelCase = [list(map(self.do_round ,A ) ) for each in produce_out] return np.asarray(A ) def _UpperCamelCase ( self ,A ): # return the data of image after convoluting process so we can check it out UpperCAmelCase = np.asmatrix(A ) UpperCAmelCase , UpperCAmelCase = self.convolute( A ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,) UpperCAmelCase = self.pooling(A ,self.size_poolinga ) return data_conveda, data_pooleda if __name__ == "__main__": pass
713
"""simple docstring""" import argparse from collections import OrderedDict from pathlib import Path import requests import torch from PIL import Image from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor from transformers.utils import logging logging.set_verbosity_info() _UpperCamelCase = logging.get_logger(__name__) def _a ( _snake_case ): """simple docstring""" UpperCAmelCase = OrderedDict() for key, value in state_dict.items(): if key.startswith("""module.encoder""" ): UpperCAmelCase = key.replace("""module.encoder""" , """glpn.encoder""" ) if key.startswith("""module.decoder""" ): UpperCAmelCase = key.replace("""module.decoder""" , """decoder.stages""" ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 UpperCAmelCase = key[key.find("""patch_embed""" ) + len("""patch_embed""" )] UpperCAmelCase = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(_snake_case )-1}''' ) if "norm" in key: UpperCAmelCase = key.replace("""norm""" , """layer_norm""" ) if "glpn.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 UpperCAmelCase = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )] UpperCAmelCase = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(_snake_case )-1}''' ) if "layer_norm1" in key: UpperCAmelCase = key.replace("""layer_norm1""" , """layer_norm_1""" ) if "layer_norm2" in key: UpperCAmelCase = key.replace("""layer_norm2""" , """layer_norm_2""" ) if "block" in key: # replace for example block1 by block.0 UpperCAmelCase = key[key.find("""block""" ) + len("""block""" )] UpperCAmelCase = key.replace(F'''block{idx}''' , F'''block.{int(_snake_case )-1}''' ) if "attn.q" in key: UpperCAmelCase = key.replace("""attn.q""" , """attention.self.query""" ) if "attn.proj" in key: UpperCAmelCase = key.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in key: UpperCAmelCase = key.replace("""attn""" , """attention.self""" ) if "fc1" in key: UpperCAmelCase = key.replace("""fc1""" , """dense1""" ) if "fc2" in key: UpperCAmelCase = key.replace("""fc2""" , """dense2""" ) if "linear_pred" in key: UpperCAmelCase = key.replace("""linear_pred""" , """classifier""" ) if "linear_fuse" in key: UpperCAmelCase = key.replace("""linear_fuse.conv""" , """linear_fuse""" ) UpperCAmelCase = key.replace("""linear_fuse.bn""" , """batch_norm""" ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 UpperCAmelCase = key[key.find("""linear_c""" ) + len("""linear_c""" )] UpperCAmelCase = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(_snake_case )-1}''' ) if "bot_conv" in key: UpperCAmelCase = key.replace("""bot_conv""" , """0.convolution""" ) if "skip_conv1" in key: UpperCAmelCase = key.replace("""skip_conv1""" , """1.convolution""" ) if "skip_conv2" in key: UpperCAmelCase = key.replace("""skip_conv2""" , """2.convolution""" ) if "fusion1" in key: UpperCAmelCase = key.replace("""fusion1""" , """1.fusion""" ) if "fusion2" in key: UpperCAmelCase = key.replace("""fusion2""" , """2.fusion""" ) if "fusion3" in key: UpperCAmelCase = key.replace("""fusion3""" , """3.fusion""" ) if "fusion" in key and "conv" in key: UpperCAmelCase = key.replace("""conv""" , """convolutional_layer""" ) if key.startswith("""module.last_layer_depth""" ): UpperCAmelCase = key.replace("""module.last_layer_depth""" , """head.head""" ) UpperCAmelCase = value return new_state_dict def _a ( _snake_case , _snake_case ): """simple docstring""" for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) UpperCAmelCase = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' ) UpperCAmelCase = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' ) # next, add keys and values (in that order) to the state dict UpperCAmelCase = kv_weight[ : config.hidden_sizes[i], : ] UpperCAmelCase = kv_bias[: config.hidden_sizes[i]] UpperCAmelCase = kv_weight[ config.hidden_sizes[i] :, : ] UpperCAmelCase = kv_bias[config.hidden_sizes[i] :] def _a ( ): """simple docstring""" UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" UpperCAmelCase = Image.open(requests.get(_snake_case , stream=_snake_case ).raw ) return image @torch.no_grad() def _a ( _snake_case , _snake_case , _snake_case=False , _snake_case=None ): """simple docstring""" UpperCAmelCase = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] ) # load image processor (only resize + rescale) UpperCAmelCase = GLPNImageProcessor() # prepare image UpperCAmelCase = prepare_img() UpperCAmelCase = image_processor(images=_snake_case , return_tensors="""pt""" ).pixel_values logger.info("""Converting model...""" ) # load original state dict UpperCAmelCase = torch.load(_snake_case , map_location=torch.device("""cpu""" ) ) # rename keys UpperCAmelCase = rename_keys(_snake_case ) # key and value matrices need special treatment read_in_k_v(_snake_case , _snake_case ) # create HuggingFace model and load state dict UpperCAmelCase = GLPNForDepthEstimation(_snake_case ) model.load_state_dict(_snake_case ) model.eval() # forward pass UpperCAmelCase = model(_snake_case ) UpperCAmelCase = outputs.predicted_depth # verify output if model_name is not None: if "nyu" in model_name: UpperCAmelCase = torch.tensor( [[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] ) elif "kitti" in model_name: UpperCAmelCase = torch.tensor( [[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] ) else: raise ValueError(F'''Unknown model name: {model_name}''' ) UpperCAmelCase = torch.Size([1, 480, 640] ) assert predicted_depth.shape == expected_shape assert torch.allclose(predicted_depth[0, :3, :3] , _snake_case , atol=1E-4 ) print("""Looks ok!""" ) # finally, push to hub if required if push_to_hub: logger.info("""Pushing model and image processor to the hub...""" ) model.push_to_hub( repo_path_or_name=Path(_snake_case , _snake_case ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=_snake_case , ) image_processor.push_to_hub( repo_path_or_name=Path(_snake_case , _snake_case ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=_snake_case , ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() parser.add_argument( """--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file).""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub.""" ) parser.add_argument( """--model_name""", default="""glpn-kitti""", type=str, help="""Name of the model in case you're pushing to the hub.""", ) _UpperCamelCase = parser.parse_args() convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
74
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = { """microsoft/swin-tiny-patch4-window7-224""": ( """https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json""" ), # See all Swin models at https://huggingface.co/models?filter=swin } class lowerCamelCase__ ( snake_case , snake_case ): SCREAMING_SNAKE_CASE = '''swin''' SCREAMING_SNAKE_CASE = { '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self ,A=224 ,A=4 ,A=3 ,A=96 ,A=[2, 2, 6, 2] ,A=[3, 6, 12, 24] ,A=7 ,A=4.0 ,A=True ,A=0.0 ,A=0.0 ,A=0.1 ,A="gelu" ,A=False ,A=0.02 ,A=1e-5 ,A=32 ,A=None ,A=None ,**A ,): super().__init__(**A ) UpperCAmelCase = image_size UpperCAmelCase = patch_size UpperCAmelCase = num_channels UpperCAmelCase = embed_dim UpperCAmelCase = depths UpperCAmelCase = len(A ) UpperCAmelCase = num_heads UpperCAmelCase = window_size UpperCAmelCase = mlp_ratio UpperCAmelCase = qkv_bias UpperCAmelCase = hidden_dropout_prob UpperCAmelCase = attention_probs_dropout_prob UpperCAmelCase = drop_path_rate UpperCAmelCase = hidden_act UpperCAmelCase = use_absolute_embeddings UpperCAmelCase = layer_norm_eps UpperCAmelCase = initializer_range UpperCAmelCase = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model UpperCAmelCase = int(embed_dim * 2 ** (len(A ) - 1) ) UpperCAmelCase = ["""stem"""] + [F'''stage{idx}''' for idx in range(1 ,len(A ) + 1 )] UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices( out_features=A ,out_indices=A ,stage_names=self.stage_names ) class lowerCamelCase__ ( snake_case ): SCREAMING_SNAKE_CASE = version.parse('''1.11''' ) @property def _UpperCamelCase ( self ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def _UpperCamelCase ( self ): return 1e-4
714
"""simple docstring""" def _a ( _snake_case ): # noqa: E741 """simple docstring""" UpperCAmelCase = len(_snake_case ) UpperCAmelCase = 0 UpperCAmelCase = [0] * n UpperCAmelCase = [False] * n UpperCAmelCase = [False] * n def dfs(_snake_case , _snake_case , _snake_case , _snake_case ): if parent == root: out_edge_count += 1 UpperCAmelCase = True UpperCAmelCase = at for to in l[at]: if to == parent: pass elif not visited[to]: UpperCAmelCase = dfs(_snake_case , _snake_case , _snake_case , _snake_case ) UpperCAmelCase = min(low[at] , low[to] ) # AP found via bridge if at < low[to]: UpperCAmelCase = True # AP found via cycle if at == low[to]: UpperCAmelCase = True else: UpperCAmelCase = min(low[at] , _snake_case ) return out_edge_count for i in range(_snake_case ): if not visited[i]: UpperCAmelCase = 0 UpperCAmelCase = dfs(_snake_case , _snake_case , -1 , _snake_case ) UpperCAmelCase = out_edge_count > 1 for x in range(len(_snake_case ) ): if is_art[x] is True: print(_snake_case ) # Adjacency list of graph _UpperCamelCase = { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], 3: [2, 4], 4: [3], 5: [2, 6, 8], 6: [5, 7], 7: [6, 8], 8: [5, 7], } compute_ap(data)
74
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available _UpperCamelCase = {"""configuration_swin""": ["""SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SwinConfig""", """SwinOnnxConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ """SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""", """SwinForImageClassification""", """SwinForMaskedImageModeling""", """SwinModel""", """SwinPreTrainedModel""", """SwinBackbone""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ """TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFSwinForImageClassification""", """TFSwinForMaskedImageModeling""", """TFSwinModel""", """TFSwinPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swin import ( SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, SwinBackbone, SwinForImageClassification, SwinForMaskedImageModeling, SwinModel, SwinPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_swin import ( TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, TFSwinForImageClassification, TFSwinForMaskedImageModeling, TFSwinModel, TFSwinPreTrainedModel, ) else: import sys _UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
715
"""simple docstring""" _UpperCamelCase = """ # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git """ _UpperCamelCase = [{"""type""": """code""", """content""": INSTALL_CONTENT}] _UpperCamelCase = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
74
0
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = { """ut/deta""": """https://huggingface.co/ut/deta/resolve/main/config.json""", } class lowerCamelCase__ ( snake_case ): SCREAMING_SNAKE_CASE = '''deta''' SCREAMING_SNAKE_CASE = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', } def __init__( self ,A=None ,A=900 ,A=2_048 ,A=6 ,A=2_048 ,A=8 ,A=6 ,A=1_024 ,A=8 ,A=0.0 ,A=True ,A="relu" ,A=256 ,A=0.1 ,A=0.0 ,A=0.0 ,A=0.02 ,A=1.0 ,A=True ,A=False ,A="sine" ,A=5 ,A=4 ,A=4 ,A=True ,A=300 ,A=True ,A=True ,A=1 ,A=5 ,A=2 ,A=1 ,A=1 ,A=5 ,A=2 ,A=0.1 ,A=0.25 ,**A ,): if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) UpperCAmelCase = CONFIG_MAPPING["""resnet"""](out_features=["""stage2""", """stage3""", """stage4"""] ) else: if isinstance(A ,A ): UpperCAmelCase = backbone_config.pop("""model_type""" ) UpperCAmelCase = CONFIG_MAPPING[backbone_model_type] UpperCAmelCase = config_class.from_dict(A ) UpperCAmelCase = backbone_config UpperCAmelCase = num_queries UpperCAmelCase = max_position_embeddings UpperCAmelCase = d_model UpperCAmelCase = encoder_ffn_dim UpperCAmelCase = encoder_layers UpperCAmelCase = encoder_attention_heads UpperCAmelCase = decoder_ffn_dim UpperCAmelCase = decoder_layers UpperCAmelCase = decoder_attention_heads UpperCAmelCase = dropout UpperCAmelCase = attention_dropout UpperCAmelCase = activation_dropout UpperCAmelCase = activation_function UpperCAmelCase = init_std UpperCAmelCase = init_xavier_std UpperCAmelCase = encoder_layerdrop UpperCAmelCase = auxiliary_loss UpperCAmelCase = position_embedding_type # deformable attributes UpperCAmelCase = num_feature_levels UpperCAmelCase = encoder_n_points UpperCAmelCase = decoder_n_points UpperCAmelCase = two_stage UpperCAmelCase = two_stage_num_proposals UpperCAmelCase = with_box_refine UpperCAmelCase = assign_first_stage if two_stage is True and with_box_refine is False: raise ValueError("""If two_stage is True, with_box_refine must be True.""" ) # Hungarian matcher UpperCAmelCase = class_cost UpperCAmelCase = bbox_cost UpperCAmelCase = giou_cost # Loss coefficients UpperCAmelCase = mask_loss_coefficient UpperCAmelCase = dice_loss_coefficient UpperCAmelCase = bbox_loss_coefficient UpperCAmelCase = giou_loss_coefficient UpperCAmelCase = eos_coefficient UpperCAmelCase = focal_alpha super().__init__(is_encoder_decoder=A ,**A ) @property def _UpperCamelCase ( self ): return self.encoder_attention_heads @property def _UpperCamelCase ( self ): return self.d_model def _UpperCamelCase ( self ): UpperCAmelCase = copy.deepcopy(self.__dict__ ) UpperCAmelCase = self.backbone_config.to_dict() UpperCAmelCase = self.__class__.model_type return output
716
"""simple docstring""" import argparse import struct import unittest class lowerCamelCase__ : def __init__( self ,A ): UpperCAmelCase = data # Initialize hash values UpperCAmelCase = [ 0x6A_09_E6_67, 0xBB_67_AE_85, 0x3C_6E_F3_72, 0xA5_4F_F5_3A, 0x51_0E_52_7F, 0x9B_05_68_8C, 0x1F_83_D9_AB, 0x5B_E0_CD_19, ] # Initialize round constants UpperCAmelCase = [ 0x42_8A_2F_98, 0x71_37_44_91, 0xB5_C0_FB_CF, 0xE9_B5_DB_A5, 0x39_56_C2_5B, 0x59_F1_11_F1, 0x92_3F_82_A4, 0xAB_1C_5E_D5, 0xD8_07_AA_98, 0x12_83_5B_01, 0x24_31_85_BE, 0x55_0C_7D_C3, 0x72_BE_5D_74, 0x80_DE_B1_FE, 0x9B_DC_06_A7, 0xC1_9B_F1_74, 0xE4_9B_69_C1, 0xEF_BE_47_86, 0x0F_C1_9D_C6, 0x24_0C_A1_CC, 0x2D_E9_2C_6F, 0x4A_74_84_AA, 0x5C_B0_A9_DC, 0x76_F9_88_DA, 0x98_3E_51_52, 0xA8_31_C6_6D, 0xB0_03_27_C8, 0xBF_59_7F_C7, 0xC6_E0_0B_F3, 0xD5_A7_91_47, 0x06_CA_63_51, 0x14_29_29_67, 0x27_B7_0A_85, 0x2E_1B_21_38, 0x4D_2C_6D_FC, 0x53_38_0D_13, 0x65_0A_73_54, 0x76_6A_0A_BB, 0x81_C2_C9_2E, 0x92_72_2C_85, 0xA2_BF_E8_A1, 0xA8_1A_66_4B, 0xC2_4B_8B_70, 0xC7_6C_51_A3, 0xD1_92_E8_19, 0xD6_99_06_24, 0xF4_0E_35_85, 0x10_6A_A0_70, 0x19_A4_C1_16, 0x1E_37_6C_08, 0x27_48_77_4C, 0x34_B0_BC_B5, 0x39_1C_0C_B3, 0x4E_D8_AA_4A, 0x5B_9C_CA_4F, 0x68_2E_6F_F3, 0x74_8F_82_EE, 0x78_A5_63_6F, 0x84_C8_78_14, 0x8C_C7_02_08, 0x90_BE_FF_FA, 0xA4_50_6C_EB, 0xBE_F9_A3_F7, 0xC6_71_78_F2, ] UpperCAmelCase = self.preprocessing(self.data ) self.final_hash() @staticmethod def _UpperCamelCase ( A ): UpperCAmelCase = b"""\x80""" + (b"""\x00""" * (63 - (len(A ) + 8) % 64)) UpperCAmelCase = struct.pack(""">Q""" ,(len(A ) * 8) ) return data + padding + big_endian_integer def _UpperCamelCase ( self ): # Convert into blocks of 64 bytes UpperCAmelCase = [ self.preprocessed_data[x : x + 64] for x in range(0 ,len(self.preprocessed_data ) ,64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers UpperCAmelCase = list(struct.unpack(""">16L""" ,A ) ) # add 48 0-ed integers words += [0] * 48 UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self.hashes for index in range(0 ,64 ): if index > 15: # modify the zero-ed indexes at the end of the array UpperCAmelCase = ( self.ror(words[index - 15] ,7 ) ^ self.ror(words[index - 15] ,18 ) ^ (words[index - 15] >> 3) ) UpperCAmelCase = ( self.ror(words[index - 2] ,17 ) ^ self.ror(words[index - 2] ,19 ) ^ (words[index - 2] >> 10) ) UpperCAmelCase = ( words[index - 16] + sa + words[index - 7] + sa ) % 0x1_00_00_00_00 # Compression UpperCAmelCase = self.ror(A ,6 ) ^ self.ror(A ,11 ) ^ self.ror(A ,25 ) UpperCAmelCase = (e & f) ^ ((~e & 0xFF_FF_FF_FF) & g) UpperCAmelCase = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0x1_00_00_00_00 UpperCAmelCase = self.ror(A ,2 ) ^ self.ror(A ,13 ) ^ self.ror(A ,22 ) UpperCAmelCase = (a & b) ^ (a & c) ^ (b & c) UpperCAmelCase = (sa + maj) % 0x1_00_00_00_00 UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = ( g, f, e, ((d + tempa) % 0x1_00_00_00_00), c, b, a, ((tempa + tempa) % 0x1_00_00_00_00), ) UpperCAmelCase = [a, b, c, d, e, f, g, h] # Modify final values UpperCAmelCase = [ ((element + mutated_hash_values[index]) % 0x1_00_00_00_00) for index, element in enumerate(self.hashes ) ] UpperCAmelCase = """""".join([hex(A )[2:].zfill(8 ) for value in self.hashes] ) def _UpperCamelCase ( self ,A ,A ): return 0xFF_FF_FF_FF & (value << (32 - rotations)) | (value >> rotations) class lowerCamelCase__ ( unittest.TestCase ): def _UpperCamelCase ( self ): import hashlib UpperCAmelCase = bytes("""Test String""" ,"""utf-8""" ) self.assertEqual(SHAaaa(A ).hash ,hashlib.shaaaa(A ).hexdigest() ) def _a ( ): """simple docstring""" import doctest doctest.testmod() UpperCAmelCase = argparse.ArgumentParser() parser.add_argument( """-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , ) parser.add_argument( """-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" ) UpperCAmelCase = parser.parse_args() UpperCAmelCase = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , """rb""" ) as f: UpperCAmelCase = f.read() else: UpperCAmelCase = bytes(_snake_case , """utf-8""" ) print(SHAaaa(_snake_case ).hash ) if __name__ == "__main__": main()
74
0
"""simple docstring""" import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.local_sgd import LocalSGD ######################################################################## # This is a fully working simple example to use Accelerate # with LocalSGD, which is a method to synchronize model # parameters every K batches. It is different, but complementary # to gradient accumulation. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## _UpperCamelCase = 16 _UpperCamelCase = 32 def _a ( _snake_case , _snake_case = 16 ): """simple docstring""" UpperCAmelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" ) UpperCAmelCase = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(_snake_case ): # max_length=None => use the model max length (it's actually the default) UpperCAmelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_snake_case , max_length=_snake_case ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): UpperCAmelCase = datasets.map( _snake_case , batched=_snake_case , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCAmelCase = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(_snake_case ): # On TPU it's best to pad everything to the same length or training will be very slow. UpperCAmelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": UpperCAmelCase = 16 elif accelerator.mixed_precision != "no": UpperCAmelCase = 8 else: UpperCAmelCase = None return tokenizer.pad( _snake_case , padding="""longest""" , max_length=_snake_case , pad_to_multiple_of=_snake_case , return_tensors="""pt""" , ) # Instantiate dataloaders. UpperCAmelCase = DataLoader( tokenized_datasets["""train"""] , shuffle=_snake_case , collate_fn=_snake_case , batch_size=_snake_case ) UpperCAmelCase = DataLoader( tokenized_datasets["""validation"""] , shuffle=_snake_case , collate_fn=_snake_case , batch_size=_snake_case ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders _UpperCamelCase = mocked_dataloaders # noqa: F811 def _a ( _snake_case , _snake_case ): """simple docstring""" if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , _snake_case ) == "1": UpperCAmelCase = 2 # New Code # UpperCAmelCase = int(args.gradient_accumulation_steps ) UpperCAmelCase = int(args.local_sgd_steps ) # Initialize accelerator UpperCAmelCase = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_snake_case ) if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]: raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCAmelCase = config["""lr"""] UpperCAmelCase = int(config["""num_epochs"""] ) UpperCAmelCase = int(config["""seed"""] ) UpperCAmelCase = int(config["""batch_size"""] ) UpperCAmelCase = evaluate.load("""glue""" , """mrpc""" ) set_seed(_snake_case ) UpperCAmelCase , UpperCAmelCase = get_dataloaders(_snake_case , _snake_case ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=_snake_case ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). UpperCAmelCase = model.to(accelerator.device ) # Instantiate optimizer UpperCAmelCase = AdamW(params=model.parameters() , lr=_snake_case ) # Instantiate scheduler UpperCAmelCase = get_linear_schedule_with_warmup( optimizer=_snake_case , num_warmup_steps=100 , num_training_steps=(len(_snake_case ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) # Now we train the model for epoch in range(_snake_case ): model.train() with LocalSGD( accelerator=_snake_case , model=_snake_case , local_sgd_steps=_snake_case , enabled=local_sgd_steps is not None ) as local_sgd: for step, batch in enumerate(_snake_case ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(_snake_case ): UpperCAmelCase = model(**_snake_case ) UpperCAmelCase = output.loss accelerator.backward(_snake_case ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # LocalSGD-specific line local_sgd.step() model.eval() for step, batch in enumerate(_snake_case ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCAmelCase = model(**_snake_case ) UpperCAmelCase = outputs.logits.argmax(dim=-1 ) UpperCAmelCase , UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=_snake_case , references=_snake_case , ) UpperCAmelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , _snake_case ) def _a ( ): """simple docstring""" UpperCAmelCase = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=_snake_case , default=_snake_case , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) # New Code # parser.add_argument( """--gradient_accumulation_steps""" , type=_snake_case , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , ) parser.add_argument( """--local_sgd_steps""" , type=_snake_case , default=8 , help="""Number of local SGD steps or None to disable local SGD""" ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) UpperCAmelCase = parser.parse_args() UpperCAmelCase = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(_snake_case , _snake_case ) if __name__ == "__main__": main()
717
"""simple docstring""" def _a ( _snake_case = 10 , _snake_case = 22 ): """simple docstring""" UpperCAmelCase = range(1 , _snake_case ) UpperCAmelCase = range(1 , _snake_case ) return sum( 1 for power in powers for base in bases if len(str(base**power ) ) == power ) if __name__ == "__main__": print(F"""{solution(10, 22) = }""")
74
0
"""simple docstring""" import argparse import struct import unittest class lowerCamelCase__ : def __init__( self ,A ): UpperCAmelCase = data # Initialize hash values UpperCAmelCase = [ 0x6A_09_E6_67, 0xBB_67_AE_85, 0x3C_6E_F3_72, 0xA5_4F_F5_3A, 0x51_0E_52_7F, 0x9B_05_68_8C, 0x1F_83_D9_AB, 0x5B_E0_CD_19, ] # Initialize round constants UpperCAmelCase = [ 0x42_8A_2F_98, 0x71_37_44_91, 0xB5_C0_FB_CF, 0xE9_B5_DB_A5, 0x39_56_C2_5B, 0x59_F1_11_F1, 0x92_3F_82_A4, 0xAB_1C_5E_D5, 0xD8_07_AA_98, 0x12_83_5B_01, 0x24_31_85_BE, 0x55_0C_7D_C3, 0x72_BE_5D_74, 0x80_DE_B1_FE, 0x9B_DC_06_A7, 0xC1_9B_F1_74, 0xE4_9B_69_C1, 0xEF_BE_47_86, 0x0F_C1_9D_C6, 0x24_0C_A1_CC, 0x2D_E9_2C_6F, 0x4A_74_84_AA, 0x5C_B0_A9_DC, 0x76_F9_88_DA, 0x98_3E_51_52, 0xA8_31_C6_6D, 0xB0_03_27_C8, 0xBF_59_7F_C7, 0xC6_E0_0B_F3, 0xD5_A7_91_47, 0x06_CA_63_51, 0x14_29_29_67, 0x27_B7_0A_85, 0x2E_1B_21_38, 0x4D_2C_6D_FC, 0x53_38_0D_13, 0x65_0A_73_54, 0x76_6A_0A_BB, 0x81_C2_C9_2E, 0x92_72_2C_85, 0xA2_BF_E8_A1, 0xA8_1A_66_4B, 0xC2_4B_8B_70, 0xC7_6C_51_A3, 0xD1_92_E8_19, 0xD6_99_06_24, 0xF4_0E_35_85, 0x10_6A_A0_70, 0x19_A4_C1_16, 0x1E_37_6C_08, 0x27_48_77_4C, 0x34_B0_BC_B5, 0x39_1C_0C_B3, 0x4E_D8_AA_4A, 0x5B_9C_CA_4F, 0x68_2E_6F_F3, 0x74_8F_82_EE, 0x78_A5_63_6F, 0x84_C8_78_14, 0x8C_C7_02_08, 0x90_BE_FF_FA, 0xA4_50_6C_EB, 0xBE_F9_A3_F7, 0xC6_71_78_F2, ] UpperCAmelCase = self.preprocessing(self.data ) self.final_hash() @staticmethod def _UpperCamelCase ( A ): UpperCAmelCase = b"""\x80""" + (b"""\x00""" * (63 - (len(A ) + 8) % 64)) UpperCAmelCase = struct.pack(""">Q""" ,(len(A ) * 8) ) return data + padding + big_endian_integer def _UpperCamelCase ( self ): # Convert into blocks of 64 bytes UpperCAmelCase = [ self.preprocessed_data[x : x + 64] for x in range(0 ,len(self.preprocessed_data ) ,64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers UpperCAmelCase = list(struct.unpack(""">16L""" ,A ) ) # add 48 0-ed integers words += [0] * 48 UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self.hashes for index in range(0 ,64 ): if index > 15: # modify the zero-ed indexes at the end of the array UpperCAmelCase = ( self.ror(words[index - 15] ,7 ) ^ self.ror(words[index - 15] ,18 ) ^ (words[index - 15] >> 3) ) UpperCAmelCase = ( self.ror(words[index - 2] ,17 ) ^ self.ror(words[index - 2] ,19 ) ^ (words[index - 2] >> 10) ) UpperCAmelCase = ( words[index - 16] + sa + words[index - 7] + sa ) % 0x1_00_00_00_00 # Compression UpperCAmelCase = self.ror(A ,6 ) ^ self.ror(A ,11 ) ^ self.ror(A ,25 ) UpperCAmelCase = (e & f) ^ ((~e & 0xFF_FF_FF_FF) & g) UpperCAmelCase = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0x1_00_00_00_00 UpperCAmelCase = self.ror(A ,2 ) ^ self.ror(A ,13 ) ^ self.ror(A ,22 ) UpperCAmelCase = (a & b) ^ (a & c) ^ (b & c) UpperCAmelCase = (sa + maj) % 0x1_00_00_00_00 UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = ( g, f, e, ((d + tempa) % 0x1_00_00_00_00), c, b, a, ((tempa + tempa) % 0x1_00_00_00_00), ) UpperCAmelCase = [a, b, c, d, e, f, g, h] # Modify final values UpperCAmelCase = [ ((element + mutated_hash_values[index]) % 0x1_00_00_00_00) for index, element in enumerate(self.hashes ) ] UpperCAmelCase = """""".join([hex(A )[2:].zfill(8 ) for value in self.hashes] ) def _UpperCamelCase ( self ,A ,A ): return 0xFF_FF_FF_FF & (value << (32 - rotations)) | (value >> rotations) class lowerCamelCase__ ( unittest.TestCase ): def _UpperCamelCase ( self ): import hashlib UpperCAmelCase = bytes("""Test String""" ,"""utf-8""" ) self.assertEqual(SHAaaa(A ).hash ,hashlib.shaaaa(A ).hexdigest() ) def _a ( ): """simple docstring""" import doctest doctest.testmod() UpperCAmelCase = argparse.ArgumentParser() parser.add_argument( """-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , ) parser.add_argument( """-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" ) UpperCAmelCase = parser.parse_args() UpperCAmelCase = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , """rb""" ) as f: UpperCAmelCase = f.read() else: UpperCAmelCase = bytes(_snake_case , """utf-8""" ) print(SHAaaa(_snake_case ).hash ) if __name__ == "__main__": main()
718
"""simple docstring""" from __future__ import annotations def _a ( _snake_case ): """simple docstring""" return len(set(_snake_case ) ) == len(_snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
74
0
"""simple docstring""" import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TextClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. _UpperCamelCase = {"""LayoutLMv2Config""", """LayoutLMv3Config"""} @is_pipeline_test class lowerCamelCase__ ( unittest.TestCase ): SCREAMING_SNAKE_CASE = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING SCREAMING_SNAKE_CASE = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: SCREAMING_SNAKE_CASE = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: SCREAMING_SNAKE_CASE = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } @require_torch def _UpperCamelCase ( self ): UpperCAmelCase = pipeline( task="""text-classification""" ,model="""hf-internal-testing/tiny-random-distilbert""" ,framework="""pt""" ) UpperCAmelCase = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(A ) ,[{"""label""": """LABEL_0""", """score""": 0.504}] ) UpperCAmelCase = text_classifier("""This is great !""" ,top_k=2 ) self.assertEqual( nested_simplify(A ) ,[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}] ) UpperCAmelCase = text_classifier(["""This is great !""", """This is bad"""] ,top_k=2 ) self.assertEqual( nested_simplify(A ) ,[ [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}], [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}], ] ,) UpperCAmelCase = text_classifier("""This is great !""" ,top_k=1 ) self.assertEqual(nested_simplify(A ) ,[{"""label""": """LABEL_0""", """score""": 0.504}] ) # Legacy behavior UpperCAmelCase = text_classifier("""This is great !""" ,return_all_scores=A ) self.assertEqual(nested_simplify(A ) ,[{"""label""": """LABEL_0""", """score""": 0.504}] ) UpperCAmelCase = text_classifier("""This is great !""" ,return_all_scores=A ) self.assertEqual( nested_simplify(A ) ,[[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}]] ) UpperCAmelCase = text_classifier(["""This is great !""", """Something else"""] ,return_all_scores=A ) self.assertEqual( nested_simplify(A ) ,[ [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}], [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}], ] ,) UpperCAmelCase = text_classifier(["""This is great !""", """Something else"""] ,return_all_scores=A ) self.assertEqual( nested_simplify(A ) ,[ {"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_0""", """score""": 0.504}, ] ,) @require_torch def _UpperCamelCase ( self ): import torch UpperCAmelCase = pipeline( task="""text-classification""" ,model="""hf-internal-testing/tiny-random-distilbert""" ,framework="""pt""" ,device=torch.device("""cpu""" ) ,) UpperCAmelCase = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(A ) ,[{"""label""": """LABEL_0""", """score""": 0.504}] ) @require_tf def _UpperCamelCase ( self ): UpperCAmelCase = pipeline( task="""text-classification""" ,model="""hf-internal-testing/tiny-random-distilbert""" ,framework="""tf""" ) UpperCAmelCase = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(A ) ,[{"""label""": """LABEL_0""", """score""": 0.504}] ) @slow @require_torch def _UpperCamelCase ( self ): UpperCAmelCase = pipeline("""text-classification""" ) UpperCAmelCase = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(A ) ,[{"""label""": """POSITIVE""", """score""": 1.0}] ) UpperCAmelCase = text_classifier("""This is bad !""" ) self.assertEqual(nested_simplify(A ) ,[{"""label""": """NEGATIVE""", """score""": 1.0}] ) UpperCAmelCase = text_classifier("""Birds are a type of animal""" ) self.assertEqual(nested_simplify(A ) ,[{"""label""": """POSITIVE""", """score""": 0.988}] ) @slow @require_tf def _UpperCamelCase ( self ): UpperCAmelCase = pipeline("""text-classification""" ,framework="""tf""" ) UpperCAmelCase = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(A ) ,[{"""label""": """POSITIVE""", """score""": 1.0}] ) UpperCAmelCase = text_classifier("""This is bad !""" ) self.assertEqual(nested_simplify(A ) ,[{"""label""": """NEGATIVE""", """score""": 1.0}] ) UpperCAmelCase = text_classifier("""Birds are a type of animal""" ) self.assertEqual(nested_simplify(A ) ,[{"""label""": """POSITIVE""", """score""": 0.988}] ) def _UpperCamelCase ( self ,A ,A ,A ): UpperCAmelCase = TextClassificationPipeline(model=A ,tokenizer=A ) return text_classifier, ["HuggingFace is in", "This is another test"] def _UpperCamelCase ( self ,A ,A ): UpperCAmelCase = text_classifier.model # Small inputs because BartTokenizer tiny has maximum position embeddings = 22 UpperCAmelCase = """HuggingFace is in""" UpperCAmelCase = text_classifier(A ) self.assertEqual(nested_simplify(A ) ,[{"""label""": ANY(A ), """score""": ANY(A )}] ) self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() ) UpperCAmelCase = ["""HuggingFace is in """, """Paris is in France"""] UpperCAmelCase = text_classifier(A ) self.assertEqual( nested_simplify(A ) ,[{"""label""": ANY(A ), """score""": ANY(A )}, {"""label""": ANY(A ), """score""": ANY(A )}] ,) self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() ) self.assertTrue(outputs[1]["""label"""] in model.config.idalabel.values() ) # Forcing to get all results with `top_k=None` # This is NOT the legacy format UpperCAmelCase = text_classifier(A ,top_k=A ) UpperCAmelCase = len(model.config.idalabel.values() ) self.assertEqual( nested_simplify(A ) ,[[{"""label""": ANY(A ), """score""": ANY(A )}] * N, [{"""label""": ANY(A ), """score""": ANY(A )}] * N] ,) UpperCAmelCase = {"""text""": """HuggingFace is in """, """text_pair""": """Paris is in France"""} UpperCAmelCase = text_classifier(A ) self.assertEqual( nested_simplify(A ) ,{"""label""": ANY(A ), """score""": ANY(A )} ,) self.assertTrue(outputs["""label"""] in model.config.idalabel.values() ) # This might be used a text pair, but tokenizer + pipe interaction # makes it hard to understand that it's not using the pair properly # https://github.com/huggingface/transformers/issues/17305 # We disabled this usage instead as it was outputting wrong outputs. UpperCAmelCase = [["""HuggingFace is in """, """Paris is in France"""]] with self.assertRaises(A ): text_classifier(A ) # This used to be valid for doing text pairs # We're keeping it working because of backward compatibility UpperCAmelCase = text_classifier([[["""HuggingFace is in """, """Paris is in France"""]]] ) self.assertEqual( nested_simplify(A ) ,[{"""label""": ANY(A ), """score""": ANY(A )}] ,) self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
719
"""simple docstring""" import math def _a ( _snake_case ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_snake_case ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def _a ( _snake_case = 0.1 ): """simple docstring""" UpperCAmelCase = 3 UpperCAmelCase = 3 while primes / (2 * j - 1) >= ratio: for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ): primes += is_prime(_snake_case ) j += 2 return j if __name__ == "__main__": import doctest doctest.testmod()
74
0
"""simple docstring""" import argparse import re import torch from CLAP import create_model from transformers import AutoFeatureExtractor, ClapConfig, ClapModel _UpperCamelCase = { """text_branch""": """text_model""", """audio_branch""": """audio_model.audio_encoder""", """attn""": """attention.self""", """self.proj""": """output.dense""", """attention.self_mask""": """attn_mask""", """mlp.fc1""": """intermediate.dense""", """mlp.fc2""": """output.dense""", """norm1""": """layernorm_before""", """norm2""": """layernorm_after""", """bn0""": """batch_norm""", } _UpperCamelCase = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""") def _a ( _snake_case , _snake_case=False ): """simple docstring""" UpperCAmelCase , UpperCAmelCase = create_model( """HTSAT-tiny""" , """roberta""" , _snake_case , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=_snake_case , fusion_type="""aff_2d""" if enable_fusion else None , ) return model, model_cfg def _a ( _snake_case ): """simple docstring""" UpperCAmelCase = {} UpperCAmelCase = R""".*sequential.(\d+).*""" UpperCAmelCase = R""".*_projection.(\d+).*""" for key, value in state_dict.items(): # check if any key needs to be modified for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: UpperCAmelCase = key.replace(_snake_case , _snake_case ) if re.match(_snake_case , _snake_case ): # replace sequential layers with list UpperCAmelCase = re.match(_snake_case , _snake_case ).group(1 ) UpperCAmelCase = key.replace(F'''sequential.{sequential_layer}.''' , F'''layers.{int(_snake_case )//3}.linear.''' ) elif re.match(_snake_case , _snake_case ): UpperCAmelCase = int(re.match(_snake_case , _snake_case ).group(1 ) ) # Because in CLAP they use `nn.Sequential`... UpperCAmelCase = 1 if projecton_layer == 0 else 2 UpperCAmelCase = key.replace(F'''_projection.{projecton_layer}.''' , F'''_projection.linear{transformers_projection_layer}.''' ) if "audio" and "qkv" in key: # split qkv into query key and value UpperCAmelCase = value UpperCAmelCase = mixed_qkv.size(0 ) // 3 UpperCAmelCase = mixed_qkv[:qkv_dim] UpperCAmelCase = mixed_qkv[qkv_dim : qkv_dim * 2] UpperCAmelCase = mixed_qkv[qkv_dim * 2 :] UpperCAmelCase = query_layer UpperCAmelCase = key_layer UpperCAmelCase = value_layer else: UpperCAmelCase = value return model_state_dict def _a ( _snake_case , _snake_case , _snake_case , _snake_case=False ): """simple docstring""" UpperCAmelCase , UpperCAmelCase = init_clap(_snake_case , enable_fusion=_snake_case ) clap_model.eval() UpperCAmelCase = clap_model.state_dict() UpperCAmelCase = rename_state_dict(_snake_case ) UpperCAmelCase = ClapConfig() UpperCAmelCase = enable_fusion UpperCAmelCase = ClapModel(_snake_case ) # ignore the spectrogram embedding layer model.load_state_dict(_snake_case , strict=_snake_case ) model.save_pretrained(_snake_case ) transformers_config.save_pretrained(_snake_case ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""") _UpperCamelCase = parser.parse_args() convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
720
"""simple docstring""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCamelCase__ ( snake_case ): SCREAMING_SNAKE_CASE = ['''image_processor''', '''tokenizer'''] SCREAMING_SNAKE_CASE = '''CLIPImageProcessor''' SCREAMING_SNAKE_CASE = ('''CLIPTokenizer''', '''CLIPTokenizerFast''') def __init__( self ,A=None ,A=None ,**A ): UpperCAmelCase = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" ,A ,) UpperCAmelCase = kwargs.pop("""feature_extractor""" ) UpperCAmelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(A ,A ) def __call__( self ,A=None ,A=None ,A=None ,**A ): if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: UpperCAmelCase = self.tokenizer(A ,return_tensors=A ,**A ) if images is not None: UpperCAmelCase = self.image_processor(A ,return_tensors=A ,**A ) if text is not None and images is not None: UpperCAmelCase = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**A ) ,tensor_type=A ) def _UpperCamelCase ( self ,*A ,**A ): return self.tokenizer.batch_decode(*A ,**A ) def _UpperCamelCase ( self ,*A ,**A ): return self.tokenizer.decode(*A ,**A ) @property def _UpperCamelCase ( self ): UpperCAmelCase = self.tokenizer.model_input_names UpperCAmelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def _UpperCamelCase ( self ): warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,A ,) return self.image_processor_class @property def _UpperCamelCase ( self ): warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,A ,) return self.image_processor
74
0
"""simple docstring""" def _a ( _snake_case ): """simple docstring""" if edge <= 0 or not isinstance(_snake_case , _snake_case ): raise ValueError("""Length must be a positive.""" ) return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2) def _a ( _snake_case ): """simple docstring""" if edge <= 0 or not isinstance(_snake_case , _snake_case ): raise ValueError("""Length must be a positive.""" ) return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3) if __name__ == "__main__": import doctest doctest.testmod()
721
"""simple docstring""" from __future__ import annotations from collections.abc import Generator import requests from bsa import BeautifulSoup _UpperCamelCase = """https://www.indeed.co.in/jobs?q=mobile+app+development&l=""" def _a ( _snake_case = "mumbai" ): """simple docstring""" UpperCAmelCase = BeautifulSoup(requests.get(url + location ).content , """html.parser""" ) # This attribute finds out all the specifics listed in a job for job in soup.find_all("""div""" , attrs={"""data-tn-component""": """organicJob"""} ): UpperCAmelCase = job.find("""a""" , attrs={"""data-tn-element""": """jobTitle"""} ).text.strip() UpperCAmelCase = job.find("""span""" , {"""class""": """company"""} ).text.strip() yield job_title, company_name if __name__ == "__main__": for i, job in enumerate(fetch_jobs("""Bangalore"""), 1): print(F"""Job {i:>2} is {job[0]} at {job[1]}""")
74
0
import webbrowser from sys import argv from urllib.parse import parse_qs, quote import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": __A : int = '%20'.join(argv[1:]) if len(argv) > 1 else quote(str(input('Search: '))) print('Googling.....') __A : str = F"""https://www.google.com/search?q={query}&num=100""" __A : int = requests.get( url, headers={'User-Agent': str(UserAgent().random)}, ) try: __A : str = ( BeautifulSoup(res.text, 'html.parser') .find('div', attrs={'class': 'yuRUbf'}) .find('a') .get('href') ) except AttributeError: __A : Any = parse_qs( BeautifulSoup(res.text, 'html.parser') .find('div', attrs={'class': 'kCrYT'}) .find('a') .get('href') )['url'][0] webbrowser.open(link)
75
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING __A : Union[str, Any] = logging.get_logger(__name__) __A : Optional[Any] = { 'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json', } class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): _UpperCamelCase:Union[str, Any] = "deta" _UpperCamelCase:int = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=900 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="sine" , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=300 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.2_5 , **_SCREAMING_SNAKE_CASE , )-> str: if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) lowerCamelCase_ =CONFIG_MAPPING["""resnet"""](out_features=["""stage2""", """stage3""", """stage4"""] ) else: if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): lowerCamelCase_ =backbone_config.pop("""model_type""" ) lowerCamelCase_ =CONFIG_MAPPING[backbone_model_type] lowerCamelCase_ =config_class.from_dict(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =backbone_config lowerCamelCase_ =num_queries lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =d_model lowerCamelCase_ =encoder_ffn_dim lowerCamelCase_ =encoder_layers lowerCamelCase_ =encoder_attention_heads lowerCamelCase_ =decoder_ffn_dim lowerCamelCase_ =decoder_layers lowerCamelCase_ =decoder_attention_heads lowerCamelCase_ =dropout lowerCamelCase_ =attention_dropout lowerCamelCase_ =activation_dropout lowerCamelCase_ =activation_function lowerCamelCase_ =init_std lowerCamelCase_ =init_xavier_std lowerCamelCase_ =encoder_layerdrop lowerCamelCase_ =auxiliary_loss lowerCamelCase_ =position_embedding_type # deformable attributes lowerCamelCase_ =num_feature_levels lowerCamelCase_ =encoder_n_points lowerCamelCase_ =decoder_n_points lowerCamelCase_ =two_stage lowerCamelCase_ =two_stage_num_proposals lowerCamelCase_ =with_box_refine lowerCamelCase_ =assign_first_stage if two_stage is True and with_box_refine is False: raise ValueError("""If two_stage is True, with_box_refine must be True.""" ) # Hungarian matcher lowerCamelCase_ =class_cost lowerCamelCase_ =bbox_cost lowerCamelCase_ =giou_cost # Loss coefficients lowerCamelCase_ =mask_loss_coefficient lowerCamelCase_ =dice_loss_coefficient lowerCamelCase_ =bbox_loss_coefficient lowerCamelCase_ =giou_loss_coefficient lowerCamelCase_ =eos_coefficient lowerCamelCase_ =focal_alpha super().__init__(is_encoder_decoder=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @property def _snake_case ( self )-> int: return self.encoder_attention_heads @property def _snake_case ( self )-> int: return self.d_model def _snake_case ( self )-> str: lowerCamelCase_ =copy.deepcopy(self.__dict__ ) lowerCamelCase_ =self.backbone_config.to_dict() lowerCamelCase_ =self.__class__.model_type return output
75
1