code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' import torch import torch.nn as nn from transformers.modeling_utils import ModuleUtilsMixin from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class lowerCamelCase_ ( __a , __a , __a ): @register_to_config def __init__( self : List[str] , _A : int , _A : int , _A : int , _A : float , _A : int , _A : int , _A : int , _A : int , _A : str , _A : bool = False , ): '''simple docstring''' super().__init__() UpperCAmelCase__ : List[str] = nn.Embedding(_A , _A ) UpperCAmelCase__ : Dict = nn.Embedding(_A , _A ) UpperCAmelCase__ : Tuple = False UpperCAmelCase__ : List[Any] = nn.Dropout(p=_A ) UpperCAmelCase__ : List[str] = TaConfig( vocab_size=_A , d_model=_A , num_heads=_A , d_kv=_A , d_ff=_A , dropout_rate=_A , feed_forward_proj=_A , is_decoder=_A , is_encoder_decoder=_A , ) UpperCAmelCase__ : Union[str, Any] = nn.ModuleList() for lyr_num in range(_A ): UpperCAmelCase__ : Union[str, Any] = TaBlock(_A ) self.encoders.append(_A ) UpperCAmelCase__ : Union[str, Any] = TaLayerNorm(_A ) UpperCAmelCase__ : Union[str, Any] = nn.Dropout(p=_A ) def lowercase_ ( self : Optional[Any] , _A : int , _A : Tuple ): '''simple docstring''' UpperCAmelCase__ : str = self.token_embedder(_A ) UpperCAmelCase__ : List[str] = encoder_input_tokens.shape[1] UpperCAmelCase__ : int = torch.arange(_A , device=encoder_input_tokens.device ) x += self.position_encoding(_A ) UpperCAmelCase__ : int = self.dropout_pre(_A ) # inverted the attention mask UpperCAmelCase__ : List[Any] = encoder_input_tokens.size() UpperCAmelCase__ : Any = self.get_extended_attention_mask(_A , _A ) for lyr in self.encoders: UpperCAmelCase__ : str = lyr(_A , _A )[0] UpperCAmelCase__ : Union[str, Any] = self.layer_norm(_A ) return self.dropout_post(_A ), encoder_inputs_mask
299
'''simple docstring''' from collections.abc import Iterable from typing import Any class lowerCamelCase_ : def __init__( self : List[Any] , _A : int | None = None ): '''simple docstring''' UpperCAmelCase__ : List[Any] = value UpperCAmelCase__ : Node | None = None # Added in order to delete a node easier UpperCAmelCase__ : Node | None = None UpperCAmelCase__ : Node | None = None def __repr__( self : Optional[Any] ): '''simple docstring''' from pprint import pformat if self.left is None and self.right is None: return str(self.value ) return pformat({f"""{self.value}""": (self.left, self.right)} , indent=1 ) class lowerCamelCase_ : def __init__( self : Optional[Any] , _A : Node | None = None ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = root def __str__( self : Union[str, Any] ): '''simple docstring''' return str(self.root ) def lowercase_ ( self : str , _A : Node , _A : Node | None ): '''simple docstring''' if new_children is not None: # reset its kids UpperCAmelCase__ : Dict = node.parent if node.parent is not None: # reset its parent if self.is_right(_A ): # If it is the right children UpperCAmelCase__ : str = new_children else: UpperCAmelCase__ : Optional[int] = new_children else: UpperCAmelCase__ : Union[str, Any] = new_children def lowercase_ ( self : Union[str, Any] , _A : Node ): '''simple docstring''' if node.parent and node.parent.right: return node == node.parent.right return False def lowercase_ ( self : int ): '''simple docstring''' return self.root is None def lowercase_ ( self : List[str] , _A : Any ): '''simple docstring''' UpperCAmelCase__ : Dict = Node(_A ) # create a new Node if self.empty(): # if Tree is empty UpperCAmelCase__ : List[Any] = new_node # set its root else: # Tree is not empty UpperCAmelCase__ : str = self.root # from root if parent_node is None: return while True: # While we don't get to a leaf if value < parent_node.value: # We go left if parent_node.left is None: UpperCAmelCase__ : Optional[Any] = new_node # We insert the new node in a leaf break else: UpperCAmelCase__ : Any = parent_node.left else: if parent_node.right is None: UpperCAmelCase__ : str = new_node break else: UpperCAmelCase__ : List[str] = parent_node.right UpperCAmelCase__ : Tuple = parent_node def lowercase_ ( self : Optional[Any] , *_A : Tuple ): '''simple docstring''' for value in values: self.__insert(_A ) def lowercase_ ( self : Union[str, Any] , _A : int ): '''simple docstring''' if self.empty(): raise IndexError('''Warning: Tree is empty! please use another.''' ) else: UpperCAmelCase__ : List[Any] = self.root # use lazy evaluation here to avoid NoneType Attribute error while node is not None and node.value is not value: UpperCAmelCase__ : str = node.left if value < node.value else node.right return node def lowercase_ ( self : List[Any] , _A : Node | None = None ): '''simple docstring''' if node is None: if self.root is None: return None UpperCAmelCase__ : int = self.root if not self.empty(): while node.right is not None: UpperCAmelCase__ : Tuple = node.right return node def lowercase_ ( self : List[Any] , _A : Node | None = None ): '''simple docstring''' if node is None: UpperCAmelCase__ : Optional[int] = self.root if self.root is None: return None if not self.empty(): UpperCAmelCase__ : Optional[int] = self.root while node.left is not None: UpperCAmelCase__ : Tuple = node.left return node def lowercase_ ( self : List[Any] , _A : int ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.search(_A ) # Look for the node with that label if node is not None: if node.left is None and node.right is None: # If it has no children self.__reassign_nodes(_A , _A ) elif node.left is None: # Has only right children self.__reassign_nodes(_A , node.right ) elif node.right is None: # Has only left children self.__reassign_nodes(_A , node.left ) else: UpperCAmelCase__ : Union[str, Any] = self.get_max( node.left ) # Gets the max value of the left branch self.remove(tmp_node.value ) # type: ignore UpperCAmelCase__ : Optional[Any] = ( tmp_node.value # type: ignore ) # Assigns the value to the node to delete and keep tree structure def lowercase_ ( self : List[str] , _A : Node | None ): '''simple docstring''' if node is not None: yield node # Preorder Traversal yield from self.preorder_traverse(node.left ) yield from self.preorder_traverse(node.right ) def lowercase_ ( self : str , _A : Any=None ): '''simple docstring''' if traversal_function is None: return self.preorder_traverse(self.root ) else: return traversal_function(self.root ) def lowercase_ ( self : Dict , _A : list , _A : Node | None ): '''simple docstring''' if node: self.inorder(_A , node.left ) arr.append(node.value ) self.inorder(_A , node.right ) def lowercase_ ( self : Optional[Any] , _A : int , _A : Node ): '''simple docstring''' UpperCAmelCase__ : list[int] = [] self.inorder(_A , _A ) # append all values to list using inorder traversal return arr[k - 1] def a__ ( lowerCAmelCase__ ) -> list[Node]: UpperCAmelCase__ : Union[str, Any] = [] if curr_node is not None: UpperCAmelCase__ : str = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node] return node_list def a__ ( ) -> None: UpperCAmelCase__ : List[Any] = (8, 3, 6, 1, 10, 14, 13, 4, 7) UpperCAmelCase__ : str = BinarySearchTree() for i in testlist: t.insert(lowerCAmelCase__ ) # Prints all the elements of the list in order traversal print(lowerCAmelCase__ ) if t.search(6 ) is not None: print('''The value 6 exists''' ) else: print('''The value 6 doesn\'t exist''' ) if t.search(-1 ) is not None: print('''The value -1 exists''' ) else: print('''The value -1 doesn\'t exist''' ) if not t.empty(): print('''Max Value: ''' , t.get_max().value ) # type: ignore print('''Min Value: ''' , t.get_min().value ) # type: ignore for i in testlist: t.remove(lowerCAmelCase__ ) print(lowerCAmelCase__ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
299
1
'''simple docstring''' import qiskit def a__ ( lowerCAmelCase__ = 2 ) -> qiskit.result.counts.Counts: UpperCAmelCase__ : str = qubits # Using Aer's simulator UpperCAmelCase__ : List[str] = qiskit.Aer.get_backend('''aer_simulator''' ) # Creating a Quantum Circuit acting on the q register UpperCAmelCase__ : List[str] = qiskit.QuantumCircuit(lowerCAmelCase__ , lowerCAmelCase__ ) # Adding a H gate on qubit 0 (now q0 in superposition) circuit.h(0 ) for i in range(1 , lowerCAmelCase__ ): # Adding CX (CNOT) gate circuit.cx(i - 1 , lowerCAmelCase__ ) # Mapping the quantum measurement to the classical bits circuit.measure(list(range(lowerCAmelCase__ ) ) , list(range(lowerCAmelCase__ ) ) ) # Now measuring any one qubit would affect other qubits to collapse # their super position and have same state as the measured one. # Executing the circuit on the simulator UpperCAmelCase__ : str = qiskit.execute(lowerCAmelCase__ , lowerCAmelCase__ , shots=10_00 ) return job.result().get_counts(lowerCAmelCase__ ) if __name__ == "__main__": print(F"""Total count for various states are: {quantum_entanglement(3)}""")
299
'''simple docstring''' import argparse import torch from transformers import ( SpeechTaConfig, SpeechTaFeatureExtractor, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaProcessor, SpeechTaTokenizer, logging, ) from transformers.tokenization_utils import AddedToken logging.set_verbosity_info() UpperCamelCase__ = logging.get_logger('''transformers.models.speecht5''') UpperCamelCase__ = { '''speech_encoder_prenet.layer_norm''': '''speecht5.encoder.prenet.feature_projection.layer_norm''', '''speech_encoder_prenet.post_extract_proj''': '''speecht5.encoder.prenet.feature_projection.projection''', '''speech_encoder_prenet.pos_conv.0''': '''speecht5.encoder.prenet.pos_conv_embed.conv''', '''speech_encoder_prenet.mask_emb''': '''speecht5.encoder.prenet.masked_spec_embed''', } UpperCamelCase__ = { '''text_encoder_prenet.encoder_prenet.0''': '''speecht5.encoder.prenet.embed_tokens''', '''text_encoder_prenet.encoder_prenet.1.alpha''': '''speecht5.encoder.prenet.encode_positions.alpha''', } UpperCamelCase__ = { '''speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0''': '''speecht5.decoder.prenet.layers.0''', '''speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0''': '''speecht5.decoder.prenet.layers.1''', '''speech_decoder_prenet.decoder_prenet.0.1''': '''speecht5.decoder.prenet.final_layer''', '''speech_decoder_prenet.decoder_prenet.1.alpha''': '''speecht5.decoder.prenet.encode_positions.alpha''', '''speech_decoder_prenet.spkembs_layer.0''': '''speecht5.decoder.prenet.speaker_embeds_layer''', } UpperCamelCase__ = { '''speech_decoder_postnet.feat_out''': '''speech_decoder_postnet.feat_out''', '''speech_decoder_postnet.prob_out''': '''speech_decoder_postnet.prob_out''', '''speech_decoder_postnet.postnet.postnet.0.0''': '''speech_decoder_postnet.layers.0.conv''', '''speech_decoder_postnet.postnet.postnet.0.1''': '''speech_decoder_postnet.layers.0.batch_norm''', '''speech_decoder_postnet.postnet.postnet.1.0''': '''speech_decoder_postnet.layers.1.conv''', '''speech_decoder_postnet.postnet.postnet.1.1''': '''speech_decoder_postnet.layers.1.batch_norm''', '''speech_decoder_postnet.postnet.postnet.2.0''': '''speech_decoder_postnet.layers.2.conv''', '''speech_decoder_postnet.postnet.postnet.2.1''': '''speech_decoder_postnet.layers.2.batch_norm''', '''speech_decoder_postnet.postnet.postnet.3.0''': '''speech_decoder_postnet.layers.3.conv''', '''speech_decoder_postnet.postnet.postnet.3.1''': '''speech_decoder_postnet.layers.3.batch_norm''', '''speech_decoder_postnet.postnet.postnet.4.0''': '''speech_decoder_postnet.layers.4.conv''', '''speech_decoder_postnet.postnet.postnet.4.1''': '''speech_decoder_postnet.layers.4.batch_norm''', } UpperCamelCase__ = { '''text_decoder_prenet.embed_tokens''': '''speecht5.decoder.prenet.embed_tokens''', } UpperCamelCase__ = { '''text_decoder_postnet.output_projection''': '''text_decoder_postnet.lm_head''', } UpperCamelCase__ = { '''encoder.layers.*.self_attn.k_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj''', '''encoder.layers.*.self_attn.v_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj''', '''encoder.layers.*.self_attn.q_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj''', '''encoder.layers.*.self_attn.out_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj''', '''encoder.layers.*.self_attn_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.layer_norm''', '''encoder.layers.*.fc1''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense''', '''encoder.layers.*.fc2''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense''', '''encoder.layers.*.final_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''speecht5.encoder.wrapped_encoder.layer_norm''', '''encoder.pos_emb.pe_k''': '''speecht5.encoder.wrapped_encoder.embed_positions.pe_k''', } UpperCamelCase__ = { '''decoder.layers.*.self_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj''', '''decoder.layers.*.self_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj''', '''decoder.layers.*.self_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj''', '''decoder.layers.*.self_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj''', '''decoder.layers.*.self_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm''', '''decoder.layers.*.encoder_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj''', '''decoder.layers.*.encoder_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj''', '''decoder.layers.*.encoder_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj''', '''decoder.layers.*.encoder_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj''', '''decoder.layers.*.encoder_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm''', '''decoder.layers.*.fc1''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense''', '''decoder.layers.*.fc2''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense''', '''decoder.layers.*.final_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm''', } UpperCamelCase__ = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_TEXT_DECODER_PRENET, **MAPPING_TEXT_DECODER_POSTNET, } UpperCamelCase__ = { **MAPPING_TEXT_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } UpperCamelCase__ = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } UpperCamelCase__ = [] UpperCamelCase__ = [ '''encoder.version''', '''encoder.layers.*.norm_k.weight''', '''encoder.layers.*.norm_k.bias''', '''decoder.version''', '''decoder.layers.*.norm_k.weight''', '''decoder.layers.*.norm_k.bias''', '''decoder.pos_emb.pe_k''', '''speech_encoder_prenet.embed_positions._float_tensor''', '''text_decoder_prenet.embed_positions._float_tensor''', ] UpperCamelCase__ = IGNORE_KEYS + [ '''encoder.proj''', '''text_encoder_prenet.*''', '''speech_decoder_prenet.*''', '''speech_decoder_postnet.*''', ] UpperCamelCase__ = IGNORE_KEYS + [ '''encoder.proj''', '''speech_encoder_prenet.*''', '''text_decoder_prenet.*''', '''text_decoder_postnet.*''', ] UpperCamelCase__ = IGNORE_KEYS + [ '''encoder.proj''', '''text_encoder_prenet.*''', '''text_decoder_prenet.*''', '''text_decoder_postnet.*''', ] def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int: for attribute in key.split('''.''' ): UpperCAmelCase__ : Optional[int] = getattr(lowerCAmelCase__ , lowerCAmelCase__ ) if weight_type is not None: UpperCAmelCase__ : List[str] = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape else: UpperCAmelCase__ : Any = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": UpperCAmelCase__ : Union[str, Any] = value elif weight_type == "weight_g": UpperCAmelCase__ : Tuple = value elif weight_type == "weight_v": UpperCAmelCase__ : List[Any] = value elif weight_type == "bias": UpperCAmelCase__ : int = value elif weight_type == "running_mean": UpperCAmelCase__ : int = value elif weight_type == "running_var": UpperCAmelCase__ : Union[str, Any] = value elif weight_type == "num_batches_tracked": UpperCAmelCase__ : List[Any] = value else: UpperCAmelCase__ : Union[str, Any] = value logger.info(F"""{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.""" ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]: for key in ignore_keys: if key.endswith('''.*''' ): if name.startswith(key[:-1] ): return True elif ".*." in key: UpperCAmelCase__ , UpperCAmelCase__ : int = key.split('''.*.''' ) if prefix in name and suffix in name: return True elif key in name: return True return False def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]: UpperCAmelCase__ : int = [] if task == "s2t": UpperCAmelCase__ : Optional[Any] = hf_model.speechta.encoder.prenet.feature_encoder UpperCAmelCase__ : List[Any] = MAPPING_S2T UpperCAmelCase__ : int = IGNORE_KEYS_S2T elif task == "t2s": UpperCAmelCase__ : List[str] = None UpperCAmelCase__ : Tuple = MAPPING_T2S UpperCAmelCase__ : Union[str, Any] = IGNORE_KEYS_T2S elif task == "s2s": UpperCAmelCase__ : Optional[int] = hf_model.speechta.encoder.prenet.feature_encoder UpperCAmelCase__ : Tuple = MAPPING_S2S UpperCAmelCase__ : int = IGNORE_KEYS_S2S else: raise ValueError(F"""Unsupported task: {task}""" ) for name, value in fairseq_dict.items(): if should_ignore(lowerCAmelCase__ , lowerCAmelCase__ ): logger.info(F"""{name} was ignored""" ) continue UpperCAmelCase__ : List[Any] = False if "conv_layers" in name: load_conv_layer( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == '''group''' , ) UpperCAmelCase__ : Tuple = True else: for key, mapped_key in MAPPING.items(): # mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if "*" in key: UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = key.split('''.*.''' ) if prefix in name and suffix in name: UpperCAmelCase__ : List[str] = suffix # if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: if key in name: UpperCAmelCase__ : Optional[int] = True if "*" in mapped_key: UpperCAmelCase__ : Any = name.split(lowerCAmelCase__ )[0].split('''.''' )[-2] UpperCAmelCase__ : Union[str, Any] = mapped_key.replace('''*''' , lowerCAmelCase__ ) if "weight_g" in name: UpperCAmelCase__ : Dict = '''weight_g''' elif "weight_v" in name: UpperCAmelCase__ : Union[str, Any] = '''weight_v''' elif "bias" in name: UpperCAmelCase__ : Optional[int] = '''bias''' elif "weight" in name: UpperCAmelCase__ : Optional[int] = '''weight''' elif "running_mean" in name: UpperCAmelCase__ : Optional[int] = '''running_mean''' elif "running_var" in name: UpperCAmelCase__ : List[Any] = '''running_var''' elif "num_batches_tracked" in name: UpperCAmelCase__ : Optional[Any] = '''num_batches_tracked''' else: UpperCAmelCase__ : Union[str, Any] = None set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) continue if not is_used: unused_weights.append(lowerCAmelCase__ ) logger.warning(F"""Unused weights: {unused_weights}""" ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str: UpperCAmelCase__ : Optional[int] = full_name.split('''conv_layers.''' )[-1] UpperCAmelCase__ : Optional[Any] = name.split('''.''' ) UpperCAmelCase__ : Any = int(items[0] ) UpperCAmelCase__ : Optional[int] = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) UpperCAmelCase__ : Any = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) UpperCAmelCase__ : int = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) UpperCAmelCase__ : List[str] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) UpperCAmelCase__ : Union[str, Any] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(lowerCAmelCase__ ) @torch.no_grad() def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , ) -> Any: if config_path is not None: UpperCAmelCase__ : Optional[Any] = SpeechTaConfig.from_pretrained(lowerCAmelCase__ ) else: UpperCAmelCase__ : str = SpeechTaConfig() if task == "s2t": UpperCAmelCase__ : str = config.max_text_positions UpperCAmelCase__ : List[str] = SpeechTaForSpeechToText(lowerCAmelCase__ ) elif task == "t2s": UpperCAmelCase__ : Tuple = 18_76 UpperCAmelCase__ : int = 6_00 UpperCAmelCase__ : Union[str, Any] = config.max_speech_positions UpperCAmelCase__ : Optional[Any] = SpeechTaForTextToSpeech(lowerCAmelCase__ ) elif task == "s2s": UpperCAmelCase__ : Tuple = 18_76 UpperCAmelCase__ : Optional[Any] = config.max_speech_positions UpperCAmelCase__ : Dict = SpeechTaForSpeechToSpeech(lowerCAmelCase__ ) else: raise ValueError(F"""Unknown task name: {task}""" ) if vocab_path: UpperCAmelCase__ : Tuple = SpeechTaTokenizer(lowerCAmelCase__ , model_max_length=config.max_text_positions ) # Mask token behaves like a normal word, i.e. include the space before it UpperCAmelCase__ : Dict = AddedToken('''<mask>''' , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) UpperCAmelCase__ : int = mask_token tokenizer.add_special_tokens({'''mask_token''': mask_token} ) tokenizer.add_tokens(['''<ctc_blank>'''] ) UpperCAmelCase__ : Optional[Any] = SpeechTaFeatureExtractor() UpperCAmelCase__ : Any = SpeechTaProcessor(tokenizer=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ ) processor.save_pretrained(lowerCAmelCase__ ) UpperCAmelCase__ : List[str] = torch.load(lowerCAmelCase__ ) recursively_load_weights(fairseq_checkpoint['''model'''] , lowerCAmelCase__ , lowerCAmelCase__ ) model.save_pretrained(lowerCAmelCase__ ) if repo_id: print('''Pushing to the hub...''' ) processor.push_to_hub(lowerCAmelCase__ ) model.push_to_hub(lowerCAmelCase__ ) if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() parser.add_argument( '''--task''', default='''s2t''', type=str, help='''Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.''', ) parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--vocab_path''', default=None, type=str, help='''Path to SentencePiece model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) UpperCamelCase__ = parser.parse_args() convert_speechta_checkpoint( args.task, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.vocab_path, args.push_to_hub, )
299
1
'''simple docstring''' import unittest from transformers import DebertaVaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaVaForMaskedLM, DebertaVaForMultipleChoice, DebertaVaForQuestionAnswering, DebertaVaForSequenceClassification, DebertaVaForTokenClassification, DebertaVaModel, ) from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCamelCase_ ( __a ): def __init__( self : Dict , _A : int , _A : Tuple=13 , _A : List[str]=7 , _A : int=True , _A : Union[str, Any]=True , _A : List[Any]=True , _A : Optional[Any]=True , _A : Optional[Any]=99 , _A : Any=32 , _A : Dict=5 , _A : List[Any]=4 , _A : Union[str, Any]=37 , _A : int="gelu" , _A : Optional[int]=0.1 , _A : Any=0.1 , _A : List[Any]=512 , _A : List[Any]=16 , _A : List[str]=2 , _A : List[Any]=0.0_2 , _A : List[str]=False , _A : Optional[int]=True , _A : Any="None" , _A : Optional[int]=3 , _A : int=4 , _A : Optional[Any]=None , ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = parent UpperCAmelCase__ : int = batch_size UpperCAmelCase__ : Tuple = seq_length UpperCAmelCase__ : Any = is_training UpperCAmelCase__ : Dict = use_input_mask UpperCAmelCase__ : Optional[int] = use_token_type_ids UpperCAmelCase__ : Optional[Any] = use_labels UpperCAmelCase__ : str = vocab_size UpperCAmelCase__ : Optional[int] = hidden_size UpperCAmelCase__ : int = num_hidden_layers UpperCAmelCase__ : Dict = num_attention_heads UpperCAmelCase__ : str = intermediate_size UpperCAmelCase__ : List[str] = hidden_act UpperCAmelCase__ : int = hidden_dropout_prob UpperCAmelCase__ : int = attention_probs_dropout_prob UpperCAmelCase__ : Optional[Any] = max_position_embeddings UpperCAmelCase__ : List[Any] = type_vocab_size UpperCAmelCase__ : List[Any] = type_sequence_label_size UpperCAmelCase__ : Optional[int] = initializer_range UpperCAmelCase__ : List[str] = num_labels UpperCAmelCase__ : Union[str, Any] = num_choices UpperCAmelCase__ : str = relative_attention UpperCAmelCase__ : Union[str, Any] = position_biased_input UpperCAmelCase__ : Optional[Any] = pos_att_type UpperCAmelCase__ : List[str] = scope def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase__ : Optional[Any] = None if self.use_input_mask: UpperCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) UpperCAmelCase__ : List[str] = None if self.use_token_type_ids: UpperCAmelCase__ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase__ : Optional[int] = None UpperCAmelCase__ : List[str] = None UpperCAmelCase__ : Dict = None if self.use_labels: UpperCAmelCase__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase__ : str = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase__ : Union[str, Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowercase_ ( self : List[str] ): '''simple docstring''' return DebertaVaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def lowercase_ ( self : str , _A : Union[str, Any] ): '''simple docstring''' self.parent.assertListEqual(list(result.loss.size() ) , [] ) def lowercase_ ( self : str , _A : str , _A : List[Any] , _A : Optional[Any] , _A : Dict , _A : str , _A : Any , _A : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = DebertaVaModel(config=_A ) model.to(_A ) model.eval() UpperCAmelCase__ : str = model(_A , attention_mask=_A , token_type_ids=_A )[0] UpperCAmelCase__ : Optional[int] = model(_A , token_type_ids=_A )[0] UpperCAmelCase__ : List[Any] = model(_A )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def lowercase_ ( self : Tuple , _A : str , _A : Optional[Any] , _A : Union[str, Any] , _A : int , _A : Optional[int] , _A : List[str] , _A : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : str = DebertaVaForMaskedLM(config=_A ) model.to(_A ) model.eval() UpperCAmelCase__ : str = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowercase_ ( self : Optional[Any] , _A : Dict , _A : Optional[int] , _A : str , _A : str , _A : int , _A : Tuple , _A : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.num_labels UpperCAmelCase__ : List[str] = DebertaVaForSequenceClassification(_A ) model.to(_A ) model.eval() UpperCAmelCase__ : Any = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(_A ) def lowercase_ ( self : Optional[int] , _A : int , _A : int , _A : Union[str, Any] , _A : Any , _A : int , _A : Union[str, Any] , _A : Dict ): '''simple docstring''' UpperCAmelCase__ : Any = self.num_labels UpperCAmelCase__ : str = DebertaVaForTokenClassification(config=_A ) model.to(_A ) model.eval() UpperCAmelCase__ : Dict = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowercase_ ( self : Union[str, Any] , _A : List[Any] , _A : Optional[int] , _A : Optional[Any] , _A : Any , _A : List[Any] , _A : Tuple , _A : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : List[str] = DebertaVaForQuestionAnswering(config=_A ) model.to(_A ) model.eval() UpperCAmelCase__ : Optional[int] = model( _A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowercase_ ( self : Optional[Any] , _A : Dict , _A : Any , _A : Tuple , _A : Optional[Any] , _A : Tuple , _A : List[str] , _A : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : List[str] = DebertaVaForMultipleChoice(config=_A ) model.to(_A ) model.eval() UpperCAmelCase__ : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase__ : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase__ : int = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase__ : List[str] = model( _A , attention_mask=_A , token_type_ids=_A , labels=_A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : int = self.prepare_config_and_inputs() ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : List[Any] = config_and_inputs UpperCAmelCase__ : Union[str, Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class lowerCamelCase_ ( __a , __a , unittest.TestCase ): lowerCAmelCase__ = ( ( DebertaVaModel, DebertaVaForMaskedLM, DebertaVaForSequenceClassification, DebertaVaForTokenClassification, DebertaVaForQuestionAnswering, DebertaVaForMultipleChoice, ) if is_torch_available() else () ) lowerCAmelCase__ = ( { 'feature-extraction': DebertaVaModel, 'fill-mask': DebertaVaForMaskedLM, 'question-answering': DebertaVaForQuestionAnswering, 'text-classification': DebertaVaForSequenceClassification, 'token-classification': DebertaVaForTokenClassification, 'zero-shot': DebertaVaForSequenceClassification, } if is_torch_available() else {} ) lowerCAmelCase__ = True lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = DebertaVaModelTester(self ) UpperCAmelCase__ : Union[str, Any] = ConfigTester(self , config_class=_A , hidden_size=37 ) def lowercase_ ( self : Optional[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def lowercase_ ( self : str ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*_A ) def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*_A ) def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*_A ) def lowercase_ ( self : str ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*_A ) def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*_A ) def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_multiple_choice(*_A ) @slow def lowercase_ ( self : Optional[Any] ): '''simple docstring''' for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : Dict = DebertaVaModel.from_pretrained(_A ) self.assertIsNotNone(_A ) @require_torch @require_sentencepiece @require_tokenizers class lowerCamelCase_ ( unittest.TestCase ): @unittest.skip(reason='''Model not available yet''' ) def lowercase_ ( self : List[str] ): '''simple docstring''' pass @slow def lowercase_ ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : Tuple = DebertaVaModel.from_pretrained('''microsoft/deberta-v2-xlarge''' ) UpperCAmelCase__ : str = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] ) UpperCAmelCase__ : List[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): UpperCAmelCase__ : List[str] = model(_A , attention_mask=_A )[0] # compare the actual values for a slice. UpperCAmelCase__ : str = torch.tensor( [[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1e-4 ) , f"""{output[:, 1:4, 1:4]}""" )
299
'''simple docstring''' import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''encoder.layer_norm_for_extract''': '''layer_norm_for_extract''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''label_embs_concat''': '''label_embeddings_concat''', '''mask_emb''': '''masked_spec_embed''', '''spk_proj''': '''speaker_proj''', } UpperCamelCase__ = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', '''label_embeddings_concat''', '''speaker_proj''', '''layer_norm_for_extract''', ] def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]: for attribute in key.split('''.''' ): UpperCAmelCase__ : Optional[int] = getattr(lowerCAmelCase__ , lowerCAmelCase__ ) if weight_type is not None: UpperCAmelCase__ : Any = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape else: UpperCAmelCase__ : Union[str, Any] = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": UpperCAmelCase__ : int = value elif weight_type == "weight_g": UpperCAmelCase__ : Dict = value elif weight_type == "weight_v": UpperCAmelCase__ : List[str] = value elif weight_type == "bias": UpperCAmelCase__ : Tuple = value else: UpperCAmelCase__ : Tuple = value logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict: UpperCAmelCase__ : Optional[int] = [] UpperCAmelCase__ : Dict = fairseq_model.state_dict() UpperCAmelCase__ : Union[str, Any] = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): UpperCAmelCase__ : Any = False if "conv_layers" in name: load_conv_layer( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == '''group''' , ) UpperCAmelCase__ : str = True else: for key, mapped_key in MAPPING.items(): UpperCAmelCase__ : List[str] = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key): # special case since naming is very similar continue UpperCAmelCase__ : Optional[int] = True if "*" in mapped_key: UpperCAmelCase__ : str = name.split(lowerCAmelCase__ )[0].split('''.''' )[-2] UpperCAmelCase__ : Optional[int] = mapped_key.replace('''*''' , lowerCAmelCase__ ) if "weight_g" in name: UpperCAmelCase__ : List[str] = '''weight_g''' elif "weight_v" in name: UpperCAmelCase__ : Dict = '''weight_v''' elif "bias" in name: UpperCAmelCase__ : Optional[int] = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj UpperCAmelCase__ : Tuple = '''weight''' else: UpperCAmelCase__ : Optional[Any] = None set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) continue if not is_used: unused_weights.append(lowerCAmelCase__ ) logger.warning(F"""Unused weights: {unused_weights}""" ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]: UpperCAmelCase__ : Tuple = full_name.split('''conv_layers.''' )[-1] UpperCAmelCase__ : Optional[Any] = name.split('''.''' ) UpperCAmelCase__ : Union[str, Any] = int(items[0] ) UpperCAmelCase__ : Tuple = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) UpperCAmelCase__ : str = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) UpperCAmelCase__ : Optional[int] = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" ) UpperCAmelCase__ : List[str] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) UpperCAmelCase__ : Optional[Any] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(lowerCAmelCase__ ) @torch.no_grad() def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True ) -> Any: if config_path is not None: UpperCAmelCase__ : Any = UniSpeechSatConfig.from_pretrained(lowerCAmelCase__ ) else: UpperCAmelCase__ : int = UniSpeechSatConfig() UpperCAmelCase__ : Tuple = '''''' if is_finetuned: UpperCAmelCase__ : Optional[int] = UniSpeechSatForCTC(lowerCAmelCase__ ) else: UpperCAmelCase__ : List[Any] = UniSpeechSatForPreTraining(lowerCAmelCase__ ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) UpperCAmelCase__ : Union[str, Any] = model[0].eval() recursively_load_weights(lowerCAmelCase__ , lowerCAmelCase__ ) hf_wavavec.save_pretrained(lowerCAmelCase__ ) if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) UpperCamelCase__ = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
299
1
'''simple docstring''' import math def a__ ( lowerCAmelCase__ ) -> bool: UpperCAmelCase__ : List[str] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 ) return exponent == int(lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ = 1 / 1_23_45 ) -> int: UpperCAmelCase__ : Any = 0 UpperCAmelCase__ : Union[str, Any] = 0 UpperCAmelCase__ : str = 3 while True: UpperCAmelCase__ : List[str] = (integer**2 - 1) / 4 # if candidate is an integer, then there is a partition for k if partition_candidate == int(lowerCAmelCase__ ): UpperCAmelCase__ : Tuple = int(lowerCAmelCase__ ) total_partitions += 1 if check_partition_perfect(lowerCAmelCase__ ): perfect_partitions += 1 if perfect_partitions > 0: if perfect_partitions / total_partitions < max_proportion: return int(lowerCAmelCase__ ) integer += 1 if __name__ == "__main__": print(F"""{solution() = }""")
299
'''simple docstring''' import itertools import random import unittest import numpy as np from transformers import ASTFeatureExtractor from transformers.testing_utils import require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin UpperCamelCase__ = random.Random() if is_torch_available(): import torch def a__ ( lowerCAmelCase__ , lowerCAmelCase__=1.0 , lowerCAmelCase__=None , lowerCAmelCase__=None ) -> Optional[Any]: if rng is None: UpperCAmelCase__ : List[str] = global_rng UpperCAmelCase__ : Optional[Any] = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class lowerCamelCase_ ( unittest.TestCase ): def __init__( self : Any , _A : List[str] , _A : int=7 , _A : Dict=400 , _A : Tuple=2_000 , _A : Optional[int]=1 , _A : List[Any]=0.0 , _A : Any=16_000 , _A : int=True , _A : str=True , ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = parent UpperCAmelCase__ : str = batch_size UpperCAmelCase__ : Dict = min_seq_length UpperCAmelCase__ : str = max_seq_length UpperCAmelCase__ : List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) UpperCAmelCase__ : Optional[Any] = feature_size UpperCAmelCase__ : int = padding_value UpperCAmelCase__ : int = sampling_rate UpperCAmelCase__ : Tuple = return_attention_mask UpperCAmelCase__ : str = do_normalize def lowercase_ ( self : Optional[int] ): '''simple docstring''' return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def lowercase_ ( self : int , _A : Optional[Any]=False , _A : Any=False ): '''simple docstring''' def _flatten(_A : Union[str, Any] ): return list(itertools.chain(*_A ) ) if equal_length: UpperCAmelCase__ : Tuple = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size UpperCAmelCase__ : Optional[int] = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: UpperCAmelCase__ : Dict = [np.asarray(_A ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class lowerCamelCase_ ( __a , unittest.TestCase ): lowerCAmelCase__ = ASTFeatureExtractor def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : int = ASTFeatureExtractionTester(self ) def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 UpperCAmelCase__ : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] UpperCAmelCase__ : List[Any] = [np.asarray(_A ) for speech_input in speech_inputs] # Test not batched input UpperCAmelCase__ : str = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values UpperCAmelCase__ : List[Any] = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) ) # Test batched UpperCAmelCase__ : Optional[Any] = feat_extract(_A , padding=_A , return_tensors='''np''' ).input_values UpperCAmelCase__ : Optional[int] = feat_extract(_A , padding=_A , return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(_A , _A ): self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. UpperCAmelCase__ : Tuple = [floats_list((1, x) )[0] for x in (800, 800, 800)] UpperCAmelCase__ : Any = np.asarray(_A ) UpperCAmelCase__ : int = feat_extract(_A , return_tensors='''np''' ).input_values UpperCAmelCase__ : List[str] = feat_extract(_A , return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(_A , _A ): self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) ) @require_torch def lowercase_ ( self : List[str] ): '''simple docstring''' import torch UpperCAmelCase__ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCAmelCase__ : Any = np.random.rand(100 ).astype(np.floataa ) UpperCAmelCase__ : int = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: UpperCAmelCase__ : str = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) UpperCAmelCase__ : Any = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def lowercase_ ( self : int , _A : List[Any] ): '''simple docstring''' from datasets import load_dataset UpperCAmelCase__ : Tuple = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' ) # automatic decoding with librispeech UpperCAmelCase__ : List[Any] = ds.sort('''id''' ).select(range(_A ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] @require_torch def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Any = torch.tensor( [-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6, -1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3, -1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6, -0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9] ) # fmt: on UpperCAmelCase__ : Optional[Any] = self._load_datasamples(1 ) UpperCAmelCase__ : Optional[int] = ASTFeatureExtractor() UpperCAmelCase__ : Dict = feature_extractor(_A , return_tensors='''pt''' ).input_values self.assertEquals(input_values.shape , (1, 1_024, 128) ) self.assertTrue(torch.allclose(input_values[0, 0, :30] , _A , atol=1e-4 ) )
299
1
'''simple docstring''' import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece @require_tokenizers class lowerCamelCase_ ( __a , unittest.TestCase ): lowerCAmelCase__ = XLMRobertaTokenizer lowerCAmelCase__ = XLMRobertaTokenizerFast lowerCAmelCase__ = True lowerCAmelCase__ = True def lowercase_ ( self : Dict ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing UpperCAmelCase__ : Union[str, Any] = XLMRobertaTokenizer(_A , keep_accents=_A ) tokenizer.save_pretrained(self.tmpdirname ) def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = '''<pad>''' UpperCAmelCase__ : Dict = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A ) def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(vocab_keys[-1] , '''<mask>''' ) self.assertEqual(len(_A ) , 1_002 ) def lowercase_ ( self : int ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1_002 ) def lowercase_ ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : str = XLMRobertaTokenizer(_A , keep_accents=_A ) UpperCAmelCase__ : int = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) UpperCAmelCase__ : Union[str, Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( _A , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) UpperCAmelCase__ : Dict = tokenizer.convert_tokens_to_ids(_A ) self.assertListEqual( _A , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) UpperCAmelCase__ : Optional[int] = tokenizer.convert_ids_to_tokens(_A ) self.assertListEqual( _A , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) def lowercase_ ( self : str ): '''simple docstring''' if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return UpperCAmelCase__ : List[str] = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): UpperCAmelCase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(_A , **_A ) UpperCAmelCase__ : Optional[int] = self.tokenizer_class.from_pretrained(_A , **_A ) UpperCAmelCase__ : List[str] = tempfile.mkdtemp() UpperCAmelCase__ : Any = tokenizer_r.save_pretrained(_A ) UpperCAmelCase__ : Tuple = tokenizer_p.save_pretrained(_A ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) UpperCAmelCase__ : Optional[int] = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f ) self.assertSequenceEqual(_A , _A ) # Checks everything loads correctly in the same way UpperCAmelCase__ : Any = tokenizer_r.from_pretrained(_A ) UpperCAmelCase__ : Dict = tokenizer_p.from_pretrained(_A ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_A , _A ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(_A ) # Save tokenizer rust, legacy_format=True UpperCAmelCase__ : Union[str, Any] = tempfile.mkdtemp() UpperCAmelCase__ : Union[str, Any] = tokenizer_r.save_pretrained(_A , legacy_format=_A ) UpperCAmelCase__ : List[str] = tokenizer_p.save_pretrained(_A ) # Checks it save with the same files self.assertSequenceEqual(_A , _A ) # Checks everything loads correctly in the same way UpperCAmelCase__ : List[str] = tokenizer_r.from_pretrained(_A ) UpperCAmelCase__ : List[str] = tokenizer_p.from_pretrained(_A ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_A , _A ) ) shutil.rmtree(_A ) # Save tokenizer rust, legacy_format=False UpperCAmelCase__ : Union[str, Any] = tempfile.mkdtemp() UpperCAmelCase__ : Dict = tokenizer_r.save_pretrained(_A , legacy_format=_A ) UpperCAmelCase__ : str = tokenizer_p.save_pretrained(_A ) # Checks it saved the tokenizer.json file self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way UpperCAmelCase__ : Union[str, Any] = tokenizer_r.from_pretrained(_A ) UpperCAmelCase__ : Optional[Any] = tokenizer_p.from_pretrained(_A ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_A , _A ) ) shutil.rmtree(_A ) @cached_property def lowercase_ ( self : Optional[Any] ): '''simple docstring''' return XLMRobertaTokenizer.from_pretrained('''xlm-roberta-base''' ) def lowercase_ ( self : Any ): '''simple docstring''' with tempfile.NamedTemporaryFile() as f: shutil.copyfile(_A , f.name ) UpperCAmelCase__ : int = XLMRobertaTokenizer(f.name , keep_accents=_A ) UpperCAmelCase__ : str = pickle.dumps(_A ) pickle.loads(_A ) def lowercase_ ( self : int ): '''simple docstring''' if not self.test_rust_tokenizer: return UpperCAmelCase__ : Optional[Any] = self.get_tokenizer() UpperCAmelCase__ : Union[str, Any] = self.get_rust_tokenizer() UpperCAmelCase__ : Dict = '''I was born in 92000, and this is falsé.''' UpperCAmelCase__ : Dict = tokenizer.tokenize(_A ) UpperCAmelCase__ : List[Any] = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) UpperCAmelCase__ : int = tokenizer.encode(_A , add_special_tokens=_A ) UpperCAmelCase__ : Optional[Any] = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) UpperCAmelCase__ : Any = self.get_rust_tokenizer() UpperCAmelCase__ : List[Any] = tokenizer.encode(_A ) UpperCAmelCase__ : Union[str, Any] = rust_tokenizer.encode(_A ) self.assertListEqual(_A , _A ) @slow def lowercase_ ( self : str ): '''simple docstring''' UpperCAmelCase__ : str = '''Hello World!''' UpperCAmelCase__ : Tuple = [0, 35_378, 6_661, 38, 2] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(_A , self.big_tokenizer.encode(_A ) ) @slow def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : List[str] = ( '''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will''' ''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth''' ) UpperCAmelCase__ : Any = [ 0, 3_293, 83, 10, 4_552, 4_989, 7_986, 678, 10, 5_915, 111, 179_459, 124_850, 4, 6_044, 237, 12, 6, 5, 6, 4, 6_780, 705, 15, 1_388, 44, 378, 10_114, 711, 152, 20, 6, 5, 22_376, 642, 1_221, 15_190, 34_153, 450, 5_608, 959, 1_119, 57_702, 136, 186, 47, 1_098, 29_367, 47, # 4426, # What fairseq tokenizes from "<unk>": "_<" # 3678, # What fairseq tokenizes from "<unk>": "unk" # 2740, # What fairseq tokenizes from "<unk>": ">" 3, # What we tokenize from "<unk>": "<unk>" 6, # Residue from the tokenization: an extra sentencepiece underline 4, 6_044, 237, 6_284, 50_901, 528, 31, 90, 34, 927, 2, ] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(_A , self.big_tokenizer.encode(_A ) ) @slow def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : int = {'''input_ids''': [[0, 11_062, 82_772, 7, 15, 82_772, 538, 51_529, 237, 17_198, 1_290, 206, 9, 215_175, 1_314, 136, 17_198, 1_290, 206, 9, 56_359, 42, 122_009, 9, 16_466, 16, 87_344, 4_537, 9, 4_717, 78_381, 6, 159_958, 7, 15, 24_480, 618, 4, 527, 22_693, 5_428, 4, 2_777, 24_480, 9_874, 4, 43_523, 594, 4, 803, 18_392, 33_189, 18, 4, 43_523, 24_447, 12_399, 100, 24_955, 83_658, 9_626, 144_057, 15, 839, 22_335, 16, 136, 24_955, 83_658, 83_479, 15, 39_102, 724, 16, 678, 645, 2_789, 1_328, 4_589, 42, 122_009, 115_774, 23, 805, 1_328, 46_876, 7, 136, 53_894, 1_940, 42_227, 41_159, 17_721, 823, 425, 4, 27_512, 98_722, 206, 136, 5_531, 4_970, 919, 17_336, 5, 2], [0, 20_080, 618, 83, 82_775, 47, 479, 9, 1_517, 73, 53_894, 333, 80_581, 110_117, 18_811, 5_256, 1_295, 51, 152_526, 297, 7_986, 390, 124_416, 538, 35_431, 214, 98, 15_044, 25_737, 136, 7_108, 43_701, 23, 756, 135_355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63_773, 119_455, 6, 147_797, 88_203, 7, 645, 70, 21, 3_285, 10_269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_A , model_name='''xlm-roberta-base''' , revision='''d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3''' , )
299
'''simple docstring''' import os import shutil import sys import tempfile import unittest from pathlib import Path import pytest import transformers from transformers import ( BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoTokenizer, BertConfig, BertTokenizer, BertTokenizerFast, CTRLTokenizer, GPTaTokenizer, GPTaTokenizerFast, PreTrainedTokenizerFast, RobertaTokenizer, RobertaTokenizerFast, is_tokenizers_available, ) from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.auto.tokenization_auto import ( TOKENIZER_MAPPING, get_tokenizer_config, tokenizer_class_from_name, ) from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import ( DUMMY_DIFF_TOKENIZER_IDENTIFIER, DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tokenizers, slow, ) sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils''')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class lowerCamelCase_ ( unittest.TestCase ): def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = 0 @slow def lowercase_ ( self : Dict ): '''simple docstring''' for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x): UpperCAmelCase__ : int = AutoTokenizer.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) ) self.assertGreater(len(_A ) , 0 ) for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys(): UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , (GPTaTokenizer, GPTaTokenizerFast) ) self.assertGreater(len(_A ) , 0 ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : int = AutoTokenizer.from_pretrained(_A ) self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 12 ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : int = AutoTokenizer.from_pretrained(_A ) self.assertIsInstance(_A , (RobertaTokenizer, RobertaTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 20 ) def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Tuple = AutoConfig.from_pretrained(_A ) self.assertIsInstance(_A , _A ) # Check that tokenizer_type ≠ model_type UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained(_A , config=_A ) self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 12 ) def lowercase_ ( self : str ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(_A , '''vocab.txt''' ) ) UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained(_A , tokenizer_type='''bert''' , use_fast=_A ) self.assertIsInstance(_A , _A ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(_A , '''vocab.json''' ) ) shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(_A , '''merges.txt''' ) ) UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(_A , tokenizer_type='''gpt2''' , use_fast=_A ) self.assertIsInstance(_A , _A ) @require_tokenizers def lowercase_ ( self : str ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(_A , '''vocab.txt''' ) ) UpperCAmelCase__ : str = AutoTokenizer.from_pretrained(_A , tokenizer_type='''bert''' ) self.assertIsInstance(_A , _A ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(_A , '''vocab.json''' ) ) shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(_A , '''merges.txt''' ) ) UpperCAmelCase__ : Any = AutoTokenizer.from_pretrained(_A , tokenizer_type='''gpt2''' ) self.assertIsInstance(_A , _A ) def lowercase_ ( self : Optional[Any] ): '''simple docstring''' with pytest.raises(_A ): AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' ) @require_tokenizers def lowercase_ ( self : int ): '''simple docstring''' for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: UpperCAmelCase__ : Optional[int] = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' ) self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) ) if isinstance(_A , _A ): self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , _A ) else: self.assertEqual(tokenizer.do_lower_case , _A ) self.assertEqual(tokenizer.model_max_length , 512 ) @require_tokenizers def lowercase_ ( self : List[str] ): '''simple docstring''' for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: with self.assertRaisesRegex( _A , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ): UpperCAmelCase__ : Dict = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' ) def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : List[Any] = TOKENIZER_MAPPING.values() UpperCAmelCase__ : Any = [] for slow_tok, fast_tok in tokenizers: if slow_tok is not None: tokenizer_names.append(slow_tok.__name__ ) if fast_tok is not None: tokenizer_names.append(fast_tok.__name__ ) for tokenizer_name in tokenizer_names: # must find the right class tokenizer_class_from_name(_A ) @require_tokenizers def lowercase_ ( self : Optional[int] ): '''simple docstring''' self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=_A ) , _A ) self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , _A ) @require_tokenizers def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : int = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=_A ) UpperCAmelCase__ : Any = '''Hello, world. How are you?''' UpperCAmelCase__ : Dict = tokenizer.tokenize(_A ) self.assertEqual('''[UNK]''' , tokens[0] ) UpperCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=_A ) UpperCAmelCase__ : Union[str, Any] = tokenizer.tokenize(_A ) self.assertEqual('''[UNK]''' , tokens[0] ) @require_tokenizers def lowercase_ ( self : str ): '''simple docstring''' UpperCAmelCase__ : List[str] = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' ) self.assertEqual(type(_A ) , _A ) self.assertEqual(tokenizer.model_max_length , 512 ) self.assertEqual(tokenizer.vocab_size , 30_000 ) self.assertEqual(tokenizer.unk_token , '''[UNK]''' ) self.assertEqual(tokenizer.padding_side , '''right''' ) self.assertEqual(tokenizer.truncation_side , '''right''' ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(_A ) self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_A ) UpperCAmelCase__ : int = AutoTokenizer.from_pretrained(_A ) self.assertIsInstance(_A , tokenizer.__class__ ) self.assertEqual(tokenizera.vocab_size , 12 ) def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained('''ctrl''' ) # There is no fast CTRL so this always gives us a slow tokenizer. self.assertIsInstance(_A , _A ) def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : str = get_tokenizer_config('''bert-base-cased''' ) UpperCAmelCase__ : Optional[int] = config.pop('''_commit_hash''' , _A ) # If we ever update bert-base-cased tokenizer config, this dict here will need to be updated. self.assertEqual(_A , {'''do_lower_case''': False} ) # This model does not have a tokenizer_config so we get back an empty dict. UpperCAmelCase__ : Tuple = get_tokenizer_config(_A ) self.assertDictEqual(_A , {} ) # A tokenizer saved with `save_pretrained` always creates a tokenizer config. UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(_A ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_A ) UpperCAmelCase__ : List[Any] = get_tokenizer_config(_A ) # Check the class of the tokenizer was properly saved (note that it always saves the slow class). self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' ) def lowercase_ ( self : Dict ): '''simple docstring''' try: AutoConfig.register('''custom''' , _A ) AutoTokenizer.register(_A , slow_tokenizer_class=_A ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_A ): AutoTokenizer.register(_A , slow_tokenizer_class=_A ) UpperCAmelCase__ : Optional[int] = CustomTokenizer.from_pretrained(_A ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_A ) UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(_A ) self.assertIsInstance(_A , _A ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] @require_tokenizers def lowercase_ ( self : Any ): '''simple docstring''' try: AutoConfig.register('''custom''' , _A ) # Can register in two steps AutoTokenizer.register(_A , slow_tokenizer_class=_A ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) ) AutoTokenizer.register(_A , fast_tokenizer_class=_A ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) del TOKENIZER_MAPPING._extra_content[CustomConfig] # Can register in one step AutoTokenizer.register( _A , slow_tokenizer_class=_A , fast_tokenizer_class=_A ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_A ): AutoTokenizer.register(_A , fast_tokenizer_class=_A ) # We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer # and that model does not have a tokenizer.json with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase__ : Any = BertTokenizerFast.from_pretrained(_A ) bert_tokenizer.save_pretrained(_A ) UpperCAmelCase__ : Optional[int] = CustomTokenizerFast.from_pretrained(_A ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_A ) UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(_A ) self.assertIsInstance(_A , _A ) UpperCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained(_A , use_fast=_A ) self.assertIsInstance(_A , _A ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def lowercase_ ( self : Optional[int] ): '''simple docstring''' with self.assertRaises(_A ): UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(_A ): UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A ) UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A ) self.assertTrue(tokenizer.special_attribute_present ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_A ) UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(_A , trust_remote_code=_A ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) # Test we can also load the slow version UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A , use_fast=_A ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_A ) UpperCAmelCase__ : Any = AutoTokenizer.from_pretrained(_A , trust_remote_code=_A , use_fast=_A ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) else: self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' ) @require_tokenizers def lowercase_ ( self : int ): '''simple docstring''' class lowerCamelCase_ ( __a ): lowerCAmelCase__ = False class lowerCamelCase_ ( __a ): lowerCAmelCase__ = NewTokenizer lowerCAmelCase__ = False try: AutoConfig.register('''custom''' , _A ) AutoTokenizer.register(_A , slow_tokenizer_class=_A ) AutoTokenizer.register(_A , fast_tokenizer_class=_A ) # If remote code is not set, the default is to use local UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertFalse(tokenizer.special_attribute_present ) UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=_A ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertFalse(tokenizer.special_attribute_present ) # If remote code is disabled, we load the local one. UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertFalse(tokenizer.special_attribute_present ) UpperCAmelCase__ : str = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A , use_fast=_A ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertFalse(tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertTrue(tokenizer.special_attribute_present ) UpperCAmelCase__ : Any = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A , use_fast=_A ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertTrue(tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=_A ) self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) # Test we can also load the slow version UpperCAmelCase__ : Optional[Any] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=_A , use_fast=_A ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) else: self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) def lowercase_ ( self : Tuple ): '''simple docstring''' with self.assertRaisesRegex( _A , '''bert-base is not a local folder and is not a valid model identifier''' ): UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained('''bert-base''' ) def lowercase_ ( self : Dict ): '''simple docstring''' with self.assertRaisesRegex( _A , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(_A , revision='''aaaaaa''' ) def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) with RequestCounter() as counter: UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
299
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = { '''facebook/deit-base-distilled-patch16-224''': ( '''https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json''' ), # See all DeiT models at https://huggingface.co/models?filter=deit } class lowerCamelCase_ ( __a ): lowerCAmelCase__ = 'deit' def __init__( self : List[str] , _A : Optional[Any]=768 , _A : int=12 , _A : Optional[Any]=12 , _A : Optional[Any]=3_072 , _A : int="gelu" , _A : Tuple=0.0 , _A : Optional[int]=0.0 , _A : List[Any]=0.0_2 , _A : int=1e-12 , _A : Optional[int]=224 , _A : List[str]=16 , _A : List[Any]=3 , _A : Optional[int]=True , _A : Dict=16 , **_A : Union[str, Any] , ): '''simple docstring''' super().__init__(**_A ) UpperCAmelCase__ : List[Any] = hidden_size UpperCAmelCase__ : Any = num_hidden_layers UpperCAmelCase__ : str = num_attention_heads UpperCAmelCase__ : Optional[Any] = intermediate_size UpperCAmelCase__ : List[str] = hidden_act UpperCAmelCase__ : str = hidden_dropout_prob UpperCAmelCase__ : Any = attention_probs_dropout_prob UpperCAmelCase__ : List[Any] = initializer_range UpperCAmelCase__ : List[str] = layer_norm_eps UpperCAmelCase__ : List[Any] = image_size UpperCAmelCase__ : int = patch_size UpperCAmelCase__ : str = num_channels UpperCAmelCase__ : List[str] = qkv_bias UpperCAmelCase__ : Union[str, Any] = encoder_stride class lowerCamelCase_ ( __a ): lowerCAmelCase__ = version.parse('1.11' ) @property def lowercase_ ( self : str ): '''simple docstring''' return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def lowercase_ ( self : Dict ): '''simple docstring''' return 1e-4
299
'''simple docstring''' def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> float: UpperCAmelCase__ : Tuple = [redshift, radiation_density, matter_density, dark_energy] if any(p < 0 for p in parameters ): raise ValueError('''All input parameters must be positive''' ) if any(p > 1 for p in parameters[1:4] ): raise ValueError('''Relative densities cannot be greater than one''' ) else: UpperCAmelCase__ : List[str] = 1 - (matter_density + radiation_density + dark_energy) UpperCAmelCase__ : List[str] = ( radiation_density * (redshift + 1) ** 4 + matter_density * (redshift + 1) ** 3 + curvature * (redshift + 1) ** 2 + dark_energy ) UpperCAmelCase__ : Any = hubble_constant * e_a ** (1 / 2) return hubble if __name__ == "__main__": import doctest # run doctest doctest.testmod() # demo LCDM approximation UpperCamelCase__ = 0.3 print( hubble_parameter( hubble_constant=68.3, radiation_density=1e-4, matter_density=matter_density, dark_energy=1 - matter_density, redshift=0, ) )
299
1
'''simple docstring''' def a__ ( lowerCAmelCase__ ) -> str: return "".join(chr(ord(lowerCAmelCase__ ) - 32 ) if '''a''' <= char <= '''z''' else char for char in word ) if __name__ == "__main__": from doctest import testmod testmod()
299
'''simple docstring''' import gc import math import unittest import torch from diffusers import UNetaDModel from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin UpperCamelCase__ = logging.get_logger(__name__) enable_full_determinism() class lowerCamelCase_ ( __a , __a , unittest.TestCase ): lowerCAmelCase__ = UNetaDModel lowerCAmelCase__ = 'sample' @property def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = 4 UpperCAmelCase__ : str = 3 UpperCAmelCase__ : str = (32, 32) UpperCAmelCase__ : List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(_A ) UpperCAmelCase__ : Tuple = torch.tensor([10] ).to(_A ) return {"sample": noise, "timestep": time_step} @property def lowercase_ ( self : int ): '''simple docstring''' return (3, 32, 32) @property def lowercase_ ( self : Dict ): '''simple docstring''' return (3, 32, 32) def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = { '''block_out_channels''': (32, 64), '''down_block_types''': ('''DownBlock2D''', '''AttnDownBlock2D'''), '''up_block_types''': ('''AttnUpBlock2D''', '''UpBlock2D'''), '''attention_head_dim''': 3, '''out_channels''': 3, '''in_channels''': 3, '''layers_per_block''': 2, '''sample_size''': 32, } UpperCAmelCase__ : Tuple = self.dummy_input return init_dict, inputs_dict class lowerCamelCase_ ( __a , __a , unittest.TestCase ): lowerCAmelCase__ = UNetaDModel lowerCAmelCase__ = 'sample' @property def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : List[str] = 4 UpperCAmelCase__ : Dict = 4 UpperCAmelCase__ : List[str] = (32, 32) UpperCAmelCase__ : List[str] = floats_tensor((batch_size, num_channels) + sizes ).to(_A ) UpperCAmelCase__ : List[Any] = torch.tensor([10] ).to(_A ) return {"sample": noise, "timestep": time_step} @property def lowercase_ ( self : Tuple ): '''simple docstring''' return (4, 32, 32) @property def lowercase_ ( self : List[str] ): '''simple docstring''' return (4, 32, 32) def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : List[str] = { '''sample_size''': 32, '''in_channels''': 4, '''out_channels''': 4, '''layers_per_block''': 2, '''block_out_channels''': (32, 64), '''attention_head_dim''': 32, '''down_block_types''': ('''DownBlock2D''', '''DownBlock2D'''), '''up_block_types''': ('''UpBlock2D''', '''UpBlock2D'''), } UpperCAmelCase__ : Optional[Any] = self.dummy_input return init_dict, inputs_dict def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : int = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A ) self.assertIsNotNone(_A ) self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 ) model.to(_A ) UpperCAmelCase__ : Dict = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' ) def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Any = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A ) model.to(_A ) UpperCAmelCase__ : Dict = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' ) def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A ) model_accelerate.to(_A ) model_accelerate.eval() UpperCAmelCase__ : Tuple = torch.randn( 1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , ) UpperCAmelCase__ : Union[str, Any] = noise.to(_A ) UpperCAmelCase__ : Optional[Any] = torch.tensor([10] * noise.shape[0] ).to(_A ) UpperCAmelCase__ : Any = model_accelerate(_A , _A )['''sample'''] # two models don't need to stay in the device at the same time del model_accelerate torch.cuda.empty_cache() gc.collect() UpperCAmelCase__ , UpperCAmelCase__ : Dict = UNetaDModel.from_pretrained( '''fusing/unet-ldm-dummy-update''' , output_loading_info=_A , low_cpu_mem_usage=_A ) model_normal_load.to(_A ) model_normal_load.eval() UpperCAmelCase__ : Optional[int] = model_normal_load(_A , _A )['''sample'''] assert torch_all_close(_A , _A , rtol=1e-3 ) def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' ) model.eval() model.to(_A ) UpperCAmelCase__ : Union[str, Any] = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) UpperCAmelCase__ : str = noise.to(_A ) UpperCAmelCase__ : str = torch.tensor([10] * noise.shape[0] ).to(_A ) with torch.no_grad(): UpperCAmelCase__ : Optional[int] = model(_A , _A ).sample UpperCAmelCase__ : List[Any] = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off UpperCAmelCase__ : Tuple = torch.tensor([-1_3.3_2_5_8, -2_0.1_1_0_0, -1_5.9_8_7_3, -1_7.6_6_1_7, -2_3.0_5_9_6, -1_7.9_4_1_9, -1_3.3_6_7_5, -1_6.1_8_8_9, -1_2.3_8_0_0] ) # fmt: on self.assertTrue(torch_all_close(_A , _A , rtol=1e-3 ) ) class lowerCamelCase_ ( __a , __a , unittest.TestCase ): lowerCAmelCase__ = UNetaDModel lowerCAmelCase__ = 'sample' @property def lowercase_ ( self : Any , _A : str=(32, 32) ): '''simple docstring''' UpperCAmelCase__ : Tuple = 4 UpperCAmelCase__ : List[str] = 3 UpperCAmelCase__ : str = floats_tensor((batch_size, num_channels) + sizes ).to(_A ) UpperCAmelCase__ : Dict = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=_A ) return {"sample": noise, "timestep": time_step} @property def lowercase_ ( self : List[str] ): '''simple docstring''' return (3, 32, 32) @property def lowercase_ ( self : List[Any] ): '''simple docstring''' return (3, 32, 32) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : List[str] = { '''block_out_channels''': [32, 64, 64, 64], '''in_channels''': 3, '''layers_per_block''': 1, '''out_channels''': 3, '''time_embedding_type''': '''fourier''', '''norm_eps''': 1e-6, '''mid_block_scale_factor''': math.sqrt(2.0 ), '''norm_num_groups''': None, '''down_block_types''': [ '''SkipDownBlock2D''', '''AttnSkipDownBlock2D''', '''SkipDownBlock2D''', '''SkipDownBlock2D''', ], '''up_block_types''': [ '''SkipUpBlock2D''', '''SkipUpBlock2D''', '''AttnSkipUpBlock2D''', '''SkipUpBlock2D''', ], } UpperCAmelCase__ : Tuple = self.dummy_input return init_dict, inputs_dict @slow def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : str = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' , output_loading_info=_A ) self.assertIsNotNone(_A ) self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 ) model.to(_A ) UpperCAmelCase__ : List[str] = self.dummy_input UpperCAmelCase__ : Dict = floats_tensor((4, 3) + (256, 256) ).to(_A ) UpperCAmelCase__ : Optional[Any] = noise UpperCAmelCase__ : Any = model(**_A ) assert image is not None, "Make sure output is not None" @slow def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : int = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' ) model.to(_A ) UpperCAmelCase__ : Optional[Any] = 4 UpperCAmelCase__ : List[str] = 3 UpperCAmelCase__ : Dict = (256, 256) UpperCAmelCase__ : Optional[int] = torch.ones((batch_size, num_channels) + sizes ).to(_A ) UpperCAmelCase__ : Union[str, Any] = torch.tensor(batch_size * [1e-4] ).to(_A ) with torch.no_grad(): UpperCAmelCase__ : Optional[int] = model(_A , _A ).sample UpperCAmelCase__ : Any = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off UpperCAmelCase__ : Tuple = torch.tensor([-4_8_4_2.8_6_9_1, -6_4_9_9.6_6_3_1, -3_8_0_0.1_9_5_3, -7_9_7_8.2_6_8_6, -1_0_9_8_0.7_1_2_9, -2_0_0_2_8.8_5_3_5, 8_1_4_8.2_8_2_2, 2_3_4_2.2_9_0_5, 5_6_7.7_6_0_8] ) # fmt: on self.assertTrue(torch_all_close(_A , _A , rtol=1e-2 ) ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Dict = UNetaDModel.from_pretrained('''fusing/ncsnpp-ffhq-ve-dummy-update''' ) model.to(_A ) UpperCAmelCase__ : str = 4 UpperCAmelCase__ : Any = 3 UpperCAmelCase__ : int = (32, 32) UpperCAmelCase__ : Optional[Any] = torch.ones((batch_size, num_channels) + sizes ).to(_A ) UpperCAmelCase__ : Optional[Any] = torch.tensor(batch_size * [1e-4] ).to(_A ) with torch.no_grad(): UpperCAmelCase__ : int = model(_A , _A ).sample UpperCAmelCase__ : Dict = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off UpperCAmelCase__ : Any = torch.tensor([-0.0_3_2_5, -0.0_9_0_0, -0.0_8_6_9, -0.0_3_3_2, -0.0_7_2_5, -0.0_2_7_0, -0.0_1_0_1, 0.0_2_2_7, 0.0_2_5_6] ) # fmt: on self.assertTrue(torch_all_close(_A , _A , rtol=1e-2 ) ) def lowercase_ ( self : Tuple ): '''simple docstring''' pass
299
1
'''simple docstring''' import math def a__ ( lowerCAmelCase__ ) -> bool: if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(lowerCAmelCase__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def a__ ( lowerCAmelCase__ = 1_00_01 ) -> int: try: UpperCAmelCase__ : List[str] = int(lowerCAmelCase__ ) except (TypeError, ValueError): raise TypeError('''Parameter nth must be int or castable to int.''' ) from None if nth <= 0: raise ValueError('''Parameter nth must be greater than or equal to one.''' ) UpperCAmelCase__ : list[int] = [] UpperCAmelCase__ : List[Any] = 2 while len(lowerCAmelCase__ ) < nth: if is_prime(lowerCAmelCase__ ): primes.append(lowerCAmelCase__ ) num += 1 else: num += 1 return primes[len(lowerCAmelCase__ ) - 1] if __name__ == "__main__": print(F"""{solution() = }""")
299
'''simple docstring''' from __future__ import annotations def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> tuple[float, list[float]]: UpperCAmelCase__ : Optional[Any] = list(range(len(lowerCAmelCase__ ) ) ) UpperCAmelCase__ : Optional[Any] = [v / w for v, w in zip(lowerCAmelCase__ , lowerCAmelCase__ )] index.sort(key=lambda lowerCAmelCase__ : ratio[i] , reverse=lowerCAmelCase__ ) UpperCAmelCase__ : float = 0 UpperCAmelCase__ : list[float] = [0] * len(lowerCAmelCase__ ) for i in index: if weight[i] <= capacity: UpperCAmelCase__ : List[str] = 1 max_value += value[i] capacity -= weight[i] else: UpperCAmelCase__ : Tuple = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
299
1
'''simple docstring''' import argparse import fairseq import torch from torch import nn from transformers import ( MBartaaTokenizer, MBartConfig, MBartForCausalLM, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } UpperCamelCase__ = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]: for attribute in key.split('''.''' ): UpperCAmelCase__ : Dict = getattr(lowerCAmelCase__ , lowerCAmelCase__ ) if weight_type is not None: UpperCAmelCase__ : Any = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape else: UpperCAmelCase__ : Optional[Any] = hf_pointer.shape assert hf_shape == value.shape, ( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": UpperCAmelCase__ : Dict = value elif weight_type == "weight_g": UpperCAmelCase__ : List[str] = value elif weight_type == "weight_v": UpperCAmelCase__ : Optional[Any] = value elif weight_type == "bias": UpperCAmelCase__ : Union[str, Any] = value else: UpperCAmelCase__ : Dict = value logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]: UpperCAmelCase__ : int = [] UpperCAmelCase__ : Optional[int] = fairseq_model.state_dict() UpperCAmelCase__ : str = hf_model.feature_extractor UpperCAmelCase__ : Optional[int] = hf_model.adapter for name, value in fairseq_dict.items(): UpperCAmelCase__ : Optional[int] = False if "conv_layers" in name: load_conv_layer( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == '''group''' , ) UpperCAmelCase__ : Any = True elif any(x in name for x in ['''adaptor''', '''w2v_encoder.proj.''', '''w2v_proj_ln.'''] ): load_adapter(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) UpperCAmelCase__ : Optional[int] = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: UpperCAmelCase__ : Any = True if "*" in mapped_key: UpperCAmelCase__ : str = name.split(lowerCAmelCase__ )[0].split('''.''' )[-2] UpperCAmelCase__ : str = mapped_key.replace('''*''' , lowerCAmelCase__ ) if "weight_g" in name: UpperCAmelCase__ : List[str] = '''weight_g''' elif "weight_v" in name: UpperCAmelCase__ : Dict = '''weight_v''' elif "bias" in name: UpperCAmelCase__ : Any = '''bias''' elif "weight" in name: UpperCAmelCase__ : List[str] = '''weight''' else: UpperCAmelCase__ : int = None set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) continue if not is_used: unused_weights.append(lowerCAmelCase__ ) logger.warning(F"""Unused weights: {unused_weights}""" ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any: UpperCAmelCase__ : Dict = full_name.split('''conv_layers.''' )[-1] UpperCAmelCase__ : int = name.split('''.''' ) UpperCAmelCase__ : int = int(items[0] ) UpperCAmelCase__ : Dict = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) UpperCAmelCase__ : Union[str, Any] = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) UpperCAmelCase__ : Union[str, Any] = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) UpperCAmelCase__ : Union[str, Any] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) UpperCAmelCase__ : str = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]: UpperCAmelCase__ : Any = full_name.split('''adaptor.''' )[-1] UpperCAmelCase__ : Optional[int] = name.split('''.''' ) if items[1].isdigit(): UpperCAmelCase__ : Optional[Any] = int(items[1] ) else: UpperCAmelCase__ : Union[str, Any] = None if "adaptor" not in full_name: if "proj_ln" in full_name: # has to be layer norm if "bias" in name: assert ( value.shape == adapter.proj_layer_norm.bias.data.shape ), F"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.""" UpperCAmelCase__ : Any = value logger.info(F"""Adapter proj layer norm bias was initialized from {full_name}.""" ) if "weight" in name: assert ( value.shape == adapter.proj_layer_norm.weight.data.shape ), F"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.""" UpperCAmelCase__ : int = value else: # has to be projection layer if "bias" in name: assert ( value.shape == adapter.proj.bias.data.shape ), F"""{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.""" UpperCAmelCase__ : Optional[Any] = value logger.info(F"""Adapter proj layer bias was initialized from {full_name}.""" ) if "weight" in name: assert ( value.shape == adapter.proj.weight.data.shape ), F"""{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.""" UpperCAmelCase__ : Any = value logger.info(F"""Adapter proj layer weight was initialized from {full_name}.""" ) elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): if "bias" in name: assert ( value.shape == adapter.layers[layer_id].conv.bias.data.shape ), F"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.""" UpperCAmelCase__ : Optional[int] = value logger.info(F"""Adapter layer {layer_id} bias was initialized from {full_name}.""" ) elif "weight" in name: assert ( value.shape == adapter.layers[layer_id].conv.weight.data.shape ), F"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.""" UpperCAmelCase__ : int = value logger.info(F"""Adapter layer {layer_id} bias was initialized from {full_name}.""" ) else: unused_weights.append(lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ ) -> Tuple: UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = emb.weight.shape UpperCAmelCase__ : Any = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ , bias=lowerCAmelCase__ ) UpperCAmelCase__ : Any = emb.weight.data return lin_layer @torch.no_grad() def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> List[Any]: UpperCAmelCase__ : Optional[int] = WavaVecaConfig.from_pretrained( lowerCAmelCase__ , add_adapter=lowerCAmelCase__ , adapter_stride=lowerCAmelCase__ , adapter_kernel_size=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , output_hidden_size=lowerCAmelCase__ , ) UpperCAmelCase__ : Optional[int] = MBartConfig.from_pretrained(lowerCAmelCase__ ) # load model UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={ '''config_yaml''': config_yaml_path, '''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path, '''load_pretrained_decoder_from''': None, } , ) UpperCAmelCase__ : List[str] = model[0].eval() # load feature extractor UpperCAmelCase__ : str = WavaVecaFeatureExtractor.from_pretrained(lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ ) # set weights for wav2vec2 encoder UpperCAmelCase__ : Union[str, Any] = WavaVecaModel(lowerCAmelCase__ ) recursively_load_weights_wavaveca(model.encoder , lowerCAmelCase__ ) # load decoder weights UpperCAmelCase__ : Tuple = MBartForCausalLM(lowerCAmelCase__ ) UpperCAmelCase__ , UpperCAmelCase__ : List[str] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=lowerCAmelCase__ ) logger.warning(F"""The following keys are missing when loading the decoder weights: {missing_keys}""" ) logger.warning(F"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" ) UpperCAmelCase__ : Dict = SpeechEncoderDecoderModel(encoder=lowerCAmelCase__ , decoder=lowerCAmelCase__ ) UpperCAmelCase__ : Optional[int] = False UpperCAmelCase__ : str = MBartaaTokenizer(lowerCAmelCase__ ) tokenizer.save_pretrained(lowerCAmelCase__ ) UpperCAmelCase__ : List[Any] = hf_wavavec.config.to_dict() UpperCAmelCase__ : int = tokenizer.pad_token_id UpperCAmelCase__ : Optional[int] = tokenizer.bos_token_id UpperCAmelCase__ : List[Any] = tokenizer.eos_token_id UpperCAmelCase__ : List[str] = '''mbart50''' UpperCAmelCase__ : List[str] = '''wav2vec2''' UpperCAmelCase__ : Optional[int] = tokenizer.eos_token_id UpperCAmelCase__ : Union[str, Any] = 25_00_04 UpperCAmelCase__ : Dict = tokenizer.eos_token_id UpperCAmelCase__ : List[Any] = SpeechEncoderDecoderConfig.from_dict(lowerCAmelCase__ ) hf_wavavec.save_pretrained(lowerCAmelCase__ ) feature_extractor.save_pretrained(lowerCAmelCase__ ) if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_yaml_path''', default=None, type=str, help='''Path to yaml file of fine-tuned model''') parser.add_argument( '''--encoder_config_path''', default='''facebook/wav2vec2-xls-r-1b''', type=str, help='''Path to hf encoder wav2vec2 checkpoint config''', ) parser.add_argument( '''--decoder_config_path''', default='''facebook/mbart-large-50-one-to-many-mmt''', type=str, help='''Path to hf decoder checkpoint config''', ) parser.add_argument('''--add_adapter''', default=True, type=bool, help='''whethere to add model adapter layers''') parser.add_argument('''--adapter_stride''', default=2, type=int, help='''stride of adapter layers''') parser.add_argument('''--adapter_kernel_size''', default=3, type=int, help='''kernel size of adapter layers''') parser.add_argument('''--encoder_output_dim''', default=1_0_2_4, type=int, help='''encoder output dim''') parser.add_argument('''--start_token_id''', default=2_5_0_0_0_4, type=int, help='''`decoder_start_token_id` of model config''') UpperCamelCase__ = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, args.config_yaml_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, add_adapter=args.add_adapter, adapter_kernel_size=args.adapter_kernel_size, adapter_stride=args.adapter_stride, decoder_start_token_id=args.start_token_id, encoder_output_dim=args.encoder_output_dim, )
299
'''simple docstring''' from ..utils import DummyObject, requires_backends class lowerCamelCase_ ( metaclass=__a ): lowerCAmelCase__ = ['torch', 'transformers', 'onnx'] def __init__( self : int , *_A : Tuple , **_A : Union[str, Any] ): '''simple docstring''' requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase_ ( cls : Union[str, Any] , *_A : List[Any] , **_A : Any ): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase_ ( cls : int , *_A : Union[str, Any] , **_A : Optional[Any] ): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) class lowerCamelCase_ ( metaclass=__a ): lowerCAmelCase__ = ['torch', 'transformers', 'onnx'] def __init__( self : Any , *_A : List[str] , **_A : Tuple ): '''simple docstring''' requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase_ ( cls : Tuple , *_A : Tuple , **_A : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase_ ( cls : List[Any] , *_A : List[str] , **_A : Dict ): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) class lowerCamelCase_ ( metaclass=__a ): lowerCAmelCase__ = ['torch', 'transformers', 'onnx'] def __init__( self : Dict , *_A : Any , **_A : int ): '''simple docstring''' requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase_ ( cls : List[Any] , *_A : List[Any] , **_A : Optional[int] ): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase_ ( cls : int , *_A : Dict , **_A : Optional[Any] ): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) class lowerCamelCase_ ( metaclass=__a ): lowerCAmelCase__ = ['torch', 'transformers', 'onnx'] def __init__( self : List[Any] , *_A : Optional[int] , **_A : Optional[Any] ): '''simple docstring''' requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase_ ( cls : Dict , *_A : Any , **_A : Tuple ): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase_ ( cls : int , *_A : Union[str, Any] , **_A : Dict ): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) class lowerCamelCase_ ( metaclass=__a ): lowerCAmelCase__ = ['torch', 'transformers', 'onnx'] def __init__( self : List[Any] , *_A : Optional[int] , **_A : Dict ): '''simple docstring''' requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase_ ( cls : Dict , *_A : str , **_A : Dict ): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase_ ( cls : Union[str, Any] , *_A : Optional[int] , **_A : int ): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) class lowerCamelCase_ ( metaclass=__a ): lowerCAmelCase__ = ['torch', 'transformers', 'onnx'] def __init__( self : Optional[Any] , *_A : Union[str, Any] , **_A : Dict ): '''simple docstring''' requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase_ ( cls : List[str] , *_A : str , **_A : List[str] ): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase_ ( cls : Dict , *_A : str , **_A : Any ): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
299
1
'''simple docstring''' import copy import inspect import unittest from transformers import PretrainedConfig, SwiftFormerConfig from transformers.testing_utils import ( require_torch, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwiftFormerForImageClassification, SwiftFormerModel from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class lowerCamelCase_ : def __init__( self : Tuple , _A : Tuple , _A : Any=13 , _A : int=3 , _A : int=True , _A : Union[str, Any]=True , _A : Optional[Any]=0.1 , _A : List[Any]=0.1 , _A : List[Any]=224 , _A : int=1_000 , _A : Any=[3, 3, 6, 4] , _A : str=[48, 56, 112, 220] , ): '''simple docstring''' UpperCAmelCase__ : int = parent UpperCAmelCase__ : Optional[Any] = batch_size UpperCAmelCase__ : Any = num_channels UpperCAmelCase__ : Dict = is_training UpperCAmelCase__ : Any = use_labels UpperCAmelCase__ : Any = hidden_dropout_prob UpperCAmelCase__ : int = attention_probs_dropout_prob UpperCAmelCase__ : List[str] = num_labels UpperCAmelCase__ : Any = image_size UpperCAmelCase__ : Optional[Any] = layer_depths UpperCAmelCase__ : List[str] = embed_dims def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase__ : str = None if self.use_labels: UpperCAmelCase__ : str = ids_tensor([self.batch_size] , self.num_labels ) UpperCAmelCase__ : Optional[Any] = self.get_config() return config, pixel_values, labels def lowercase_ ( self : Tuple ): '''simple docstring''' return SwiftFormerConfig( depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='''gelu''' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_A , layer_scale_init_value=1e-5 , ) def lowercase_ ( self : List[Any] , _A : Optional[Any] , _A : Optional[int] , _A : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Any = SwiftFormerModel(config=_A ) model.to(_A ) model.eval() UpperCAmelCase__ : List[str] = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) ) def lowercase_ ( self : Dict , _A : Any , _A : Tuple , _A : int ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.num_labels UpperCAmelCase__ : Any = SwiftFormerForImageClassification(_A ) model.to(_A ) model.eval() UpperCAmelCase__ : List[Any] = model(_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) UpperCAmelCase__ : str = SwiftFormerForImageClassification(_A ) model.to(_A ) model.eval() UpperCAmelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase__ : Dict = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase_ ( self : int ): '''simple docstring''' ((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) : List[Any] = self.prepare_config_and_inputs() UpperCAmelCase__ : int = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class lowerCamelCase_ ( __a , __a , unittest.TestCase ): lowerCAmelCase__ = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else () lowerCAmelCase__ = ( {'feature-extraction': SwiftFormerModel, 'image-classification': SwiftFormerForImageClassification} if is_torch_available() else {} ) lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = SwiftFormerModelTester(self ) UpperCAmelCase__ : Dict = ConfigTester( self , config_class=_A , has_text_modality=_A , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , ) def lowercase_ ( self : List[str] ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='''SwiftFormer does not use inputs_embeds''' ) def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' pass def lowercase_ ( self : str ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : str = model_class(_A ) UpperCAmelCase__ : Dict = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_A , nn.Linear ) ) def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : List[Any] = model_class(_A ) UpperCAmelCase__ : Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase__ : Optional[Any] = [*signature.parameters.keys()] UpperCAmelCase__ : Union[str, Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _A ) def lowercase_ ( self : str ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_A ) @slow def lowercase_ ( self : Optional[Any] ): '''simple docstring''' for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : List[str] = SwiftFormerModel.from_pretrained(_A ) self.assertIsNotNone(_A ) @unittest.skip(reason='''SwiftFormer does not output attentions''' ) def lowercase_ ( self : Tuple ): '''simple docstring''' pass def lowercase_ ( self : int ): '''simple docstring''' def check_hidden_states_output(_A : Tuple , _A : Optional[Any] , _A : Union[str, Any] ): UpperCAmelCase__ : List[Any] = model_class(_A ) model.to(_A ) model.eval() with torch.no_grad(): UpperCAmelCase__ : str = model(**self._prepare_for_class(_A , _A ) ) UpperCAmelCase__ : Union[str, Any] = outputs.hidden_states UpperCAmelCase__ : List[Any] = 8 self.assertEqual(len(_A ) , _A ) # TODO # SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width) # with the width and height being successively divided by 2, after every 2 blocks for i in range(len(_A ) ): self.assertEqual( hidden_states[i].shape , torch.Size( [ self.model_tester.batch_size, self.model_tester.embed_dims[i // 2], (self.model_tester.image_size // 4) // 2 ** (i // 2), (self.model_tester.image_size // 4) // 2 ** (i // 2), ] ) , ) UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : Dict = True check_hidden_states_output(_A , _A , _A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase__ : int = True check_hidden_states_output(_A , _A , _A ) def lowercase_ ( self : Dict ): '''simple docstring''' def _config_zero_init(_A : int ): UpperCAmelCase__ : Union[str, Any] = copy.deepcopy(_A ) for key in configs_no_init.__dict__.keys(): if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key: setattr(_A , _A , 1e-10 ) if isinstance(getattr(_A , _A , _A ) , _A ): UpperCAmelCase__ : Any = _config_zero_init(getattr(_A , _A ) ) setattr(_A , _A , _A ) return configs_no_init UpperCAmelCase__ , UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase__ : int = _config_zero_init(_A ) for model_class in self.all_model_classes: UpperCAmelCase__ : Any = model_class(config=_A ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def lowercase_ ( self : str ): '''simple docstring''' pass def a__ ( ) -> Union[str, Any]: UpperCAmelCase__ : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class lowerCamelCase_ ( unittest.TestCase ): @cached_property def lowercase_ ( self : List[Any] ): '''simple docstring''' return ViTImageProcessor.from_pretrained('''MBZUAI/swiftformer-xs''' ) if is_vision_available() else None @slow def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Any = SwiftFormerForImageClassification.from_pretrained('''MBZUAI/swiftformer-xs''' ).to(_A ) UpperCAmelCase__ : Tuple = self.default_image_processor UpperCAmelCase__ : Any = prepare_img() UpperCAmelCase__ : str = image_processor(images=_A , return_tensors='''pt''' ).to(_A ) # forward pass with torch.no_grad(): UpperCAmelCase__ : Optional[int] = model(**_A ) # verify the logits UpperCAmelCase__ : Optional[Any] = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , _A ) UpperCAmelCase__ : Optional[int] = torch.tensor([[-2.17_03e00, 2.11_07e00, -2.08_11e00]] ).to(_A ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1e-4 ) )
299
'''simple docstring''' from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase__ = {'''configuration_mmbt''': ['''MMBTConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings'''] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
299
1
'''simple docstring''' import argparse import math import traceback import dateutil.parser as date_parser import requests def a__ ( lowerCAmelCase__ ) -> List[str]: UpperCAmelCase__ : Tuple = {} UpperCAmelCase__ : Optional[int] = job['''started_at'''] UpperCAmelCase__ : Optional[int] = job['''completed_at'''] UpperCAmelCase__ : List[Any] = date_parser.parse(lowerCAmelCase__ ) UpperCAmelCase__ : Dict = date_parser.parse(lowerCAmelCase__ ) UpperCAmelCase__ : Optional[Any] = round((end_datetime - start_datetime).total_seconds() / 6_0.0 ) UpperCAmelCase__ : Union[str, Any] = start UpperCAmelCase__ : Optional[Any] = end UpperCAmelCase__ : List[str] = duration_in_min return job_info def a__ ( lowerCAmelCase__ , lowerCAmelCase__=None ) -> Union[str, Any]: UpperCAmelCase__ : Tuple = None if token is not None: UpperCAmelCase__ : Tuple = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F"""Bearer {token}"""} UpperCAmelCase__ : int = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100""" UpperCAmelCase__ : Any = requests.get(lowerCAmelCase__ , headers=lowerCAmelCase__ ).json() UpperCAmelCase__ : List[str] = {} try: job_time.update({job['''name''']: extract_time_from_single_job(lowerCAmelCase__ ) for job in result['''jobs''']} ) UpperCAmelCase__ : Tuple = math.ceil((result['''total_count'''] - 1_00) / 1_00 ) for i in range(lowerCAmelCase__ ): UpperCAmelCase__ : str = requests.get(url + F"""&page={i + 2}""" , headers=lowerCAmelCase__ ).json() job_time.update({job['''name''']: extract_time_from_single_job(lowerCAmelCase__ ) for job in result['''jobs''']} ) return job_time except Exception: print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" ) return {} if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''') UpperCamelCase__ = parser.parse_args() UpperCamelCase__ = get_job_time(args.workflow_run_id) UpperCamelCase__ = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True)) for k, v in job_time.items(): print(F"""{k}: {v['duration']}""")
299
'''simple docstring''' import inspect import tempfile from collections import OrderedDict, UserDict from collections.abc import MutableMapping from contextlib import ExitStack, contextmanager from dataclasses import fields from enum import Enum from typing import Any, ContextManager, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy if is_flax_available(): import jax.numpy as jnp class lowerCamelCase_ ( __a ): def __get__( self : str , _A : Tuple , _A : List[str]=None ): '''simple docstring''' if obj is None: return self if self.fget is None: raise AttributeError('''unreadable attribute''' ) UpperCAmelCase__ : Union[str, Any] = '''__cached_''' + self.fget.__name__ UpperCAmelCase__ : Any = getattr(_A , _A , _A ) if cached is None: UpperCAmelCase__ : Dict = self.fget(_A ) setattr(_A , _A , _A ) return cached def a__ ( lowerCAmelCase__ ) -> Optional[int]: UpperCAmelCase__ : Tuple = val.lower() if val in {"y", "yes", "t", "true", "on", "1"}: return 1 if val in {"n", "no", "f", "false", "off", "0"}: return 0 raise ValueError(F"""invalid truth value {val!r}""" ) def a__ ( lowerCAmelCase__ ) -> Optional[Any]: if is_torch_fx_proxy(lowerCAmelCase__ ): return True if is_torch_available(): import torch if isinstance(lowerCAmelCase__ , torch.Tensor ): return True if is_tf_available(): import tensorflow as tf if isinstance(lowerCAmelCase__ , tf.Tensor ): return True if is_flax_available(): import jax.numpy as jnp from jax.core import Tracer if isinstance(lowerCAmelCase__ , (jnp.ndarray, Tracer) ): return True return isinstance(lowerCAmelCase__ , np.ndarray ) def a__ ( lowerCAmelCase__ ) -> Any: return isinstance(lowerCAmelCase__ , np.ndarray ) def a__ ( lowerCAmelCase__ ) -> int: return _is_numpy(lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ ) -> Optional[Any]: import torch return isinstance(lowerCAmelCase__ , torch.Tensor ) def a__ ( lowerCAmelCase__ ) -> List[str]: return False if not is_torch_available() else _is_torch(lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ ) -> Optional[Any]: import torch return isinstance(lowerCAmelCase__ , torch.device ) def a__ ( lowerCAmelCase__ ) -> List[str]: return False if not is_torch_available() else _is_torch_device(lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ ) -> Any: import torch if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): if hasattr(lowerCAmelCase__ , lowerCAmelCase__ ): UpperCAmelCase__ : Any = getattr(lowerCAmelCase__ , lowerCAmelCase__ ) else: return False return isinstance(lowerCAmelCase__ , torch.dtype ) def a__ ( lowerCAmelCase__ ) -> Optional[int]: return False if not is_torch_available() else _is_torch_dtype(lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ ) -> List[Any]: import tensorflow as tf return isinstance(lowerCAmelCase__ , tf.Tensor ) def a__ ( lowerCAmelCase__ ) -> List[str]: return False if not is_tf_available() else _is_tensorflow(lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ ) -> Any: import tensorflow as tf # the `is_symbolic_tensor` predicate is only available starting with TF 2.14 if hasattr(lowerCAmelCase__ , '''is_symbolic_tensor''' ): return tf.is_symbolic_tensor(lowerCAmelCase__ ) return type(lowerCAmelCase__ ) == tf.Tensor def a__ ( lowerCAmelCase__ ) -> Union[str, Any]: return False if not is_tf_available() else _is_tf_symbolic_tensor(lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ ) -> Tuple: import jax.numpy as jnp # noqa: F811 return isinstance(lowerCAmelCase__ , jnp.ndarray ) def a__ ( lowerCAmelCase__ ) -> List[Any]: return False if not is_flax_available() else _is_jax(lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ ) -> Tuple: if isinstance(lowerCAmelCase__ , (dict, UserDict) ): return {k: to_py_obj(lowerCAmelCase__ ) for k, v in obj.items()} elif isinstance(lowerCAmelCase__ , (list, tuple) ): return [to_py_obj(lowerCAmelCase__ ) for o in obj] elif is_tf_tensor(lowerCAmelCase__ ): return obj.numpy().tolist() elif is_torch_tensor(lowerCAmelCase__ ): return obj.detach().cpu().tolist() elif is_jax_tensor(lowerCAmelCase__ ): return np.asarray(lowerCAmelCase__ ).tolist() elif isinstance(lowerCAmelCase__ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays return obj.tolist() else: return obj def a__ ( lowerCAmelCase__ ) -> Tuple: if isinstance(lowerCAmelCase__ , (dict, UserDict) ): return {k: to_numpy(lowerCAmelCase__ ) for k, v in obj.items()} elif isinstance(lowerCAmelCase__ , (list, tuple) ): return np.array(lowerCAmelCase__ ) elif is_tf_tensor(lowerCAmelCase__ ): return obj.numpy() elif is_torch_tensor(lowerCAmelCase__ ): return obj.detach().cpu().numpy() elif is_jax_tensor(lowerCAmelCase__ ): return np.asarray(lowerCAmelCase__ ) else: return obj class lowerCamelCase_ ( __a ): def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : List[str] = fields(self ) # Safety and consistency checks if not len(_A ): raise ValueError(f"""{self.__class__.__name__} has no fields.""" ) if not all(field.default is None for field in class_fields[1:] ): raise ValueError(f"""{self.__class__.__name__} should not have more than one required field.""" ) UpperCAmelCase__ : Dict = getattr(self , class_fields[0].name ) UpperCAmelCase__ : Any = all(getattr(self , field.name ) is None for field in class_fields[1:] ) if other_fields_are_none and not is_tensor(_A ): if isinstance(_A , _A ): UpperCAmelCase__ : List[Any] = first_field.items() UpperCAmelCase__ : Optional[int] = True else: try: UpperCAmelCase__ : Optional[int] = iter(_A ) UpperCAmelCase__ : Optional[int] = True except TypeError: UpperCAmelCase__ : Optional[Any] = False # if we provided an iterator as first field and the iterator is a (key, value) iterator # set the associated fields if first_field_iterator: for idx, element in enumerate(_A ): if ( not isinstance(_A , (list, tuple) ) or not len(_A ) == 2 or not isinstance(element[0] , _A ) ): if idx == 0: # If we do not have an iterator of key/values, set it as attribute UpperCAmelCase__ : List[Any] = first_field else: # If we have a mixed iterator, raise an error raise ValueError( f"""Cannot set key/value for {element}. It needs to be a tuple (key, value).""" ) break setattr(self , element[0] , element[1] ) if element[1] is not None: UpperCAmelCase__ : List[str] = element[1] elif first_field is not None: UpperCAmelCase__ : Optional[Any] = first_field else: for field in class_fields: UpperCAmelCase__ : Optional[int] = getattr(self , field.name ) if v is not None: UpperCAmelCase__ : str = v def __delitem__( self : Union[str, Any] , *_A : Any , **_A : str ): '''simple docstring''' raise Exception(f"""You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.""" ) def lowercase_ ( self : Any , *_A : List[str] , **_A : Tuple ): '''simple docstring''' raise Exception(f"""You cannot use ``setdefault`` on a {self.__class__.__name__} instance.""" ) def lowercase_ ( self : Optional[Any] , *_A : Any , **_A : Tuple ): '''simple docstring''' raise Exception(f"""You cannot use ``pop`` on a {self.__class__.__name__} instance.""" ) def lowercase_ ( self : Optional[Any] , *_A : Dict , **_A : List[Any] ): '''simple docstring''' raise Exception(f"""You cannot use ``update`` on a {self.__class__.__name__} instance.""" ) def __getitem__( self : List[str] , _A : Any ): '''simple docstring''' if isinstance(_A , _A ): UpperCAmelCase__ : Union[str, Any] = dict(self.items() ) return inner_dict[k] else: return self.to_tuple()[k] def __setattr__( self : int , _A : Union[str, Any] , _A : str ): '''simple docstring''' if name in self.keys() and value is not None: # Don't call self.__setitem__ to avoid recursion errors super().__setitem__(_A , _A ) super().__setattr__(_A , _A ) def __setitem__( self : Any , _A : Optional[int] , _A : List[str] ): '''simple docstring''' super().__setitem__(_A , _A ) # Don't call self.__setattr__ to avoid recursion errors super().__setattr__(_A , _A ) def lowercase_ ( self : Optional[Any] ): '''simple docstring''' return tuple(self[k] for k in self.keys() ) class lowerCamelCase_ ( __a , __a ): @classmethod def lowercase_ ( cls : Optional[Any] , _A : Optional[Any] ): '''simple docstring''' raise ValueError( f"""{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}""" ) class lowerCamelCase_ ( __a ): lowerCAmelCase__ = 'longest' lowerCAmelCase__ = 'max_length' lowerCAmelCase__ = 'do_not_pad' class lowerCamelCase_ ( __a ): lowerCAmelCase__ = 'pt' lowerCAmelCase__ = 'tf' lowerCAmelCase__ = 'np' lowerCAmelCase__ = 'jax' class lowerCamelCase_ : def __init__( self : List[Any] , _A : List[ContextManager] ): '''simple docstring''' UpperCAmelCase__ : str = context_managers UpperCAmelCase__ : int = ExitStack() def __enter__( self : str ): '''simple docstring''' for context_manager in self.context_managers: self.stack.enter_context(_A ) def __exit__( self : Dict , *_A : List[Any] , **_A : str ): '''simple docstring''' self.stack.__exit__(*_A , **_A ) def a__ ( lowerCAmelCase__ ) -> Any: UpperCAmelCase__ : int = infer_framework(lowerCAmelCase__ ) if framework == "tf": UpperCAmelCase__ : Optional[Any] = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": UpperCAmelCase__ : List[Any] = inspect.signature(model_class.forward ) # PyTorch models else: UpperCAmelCase__ : List[Any] = inspect.signature(model_class.__call__ ) # Flax models for p in signature.parameters: if p == "return_loss" and signature.parameters[p].default is True: return True return False def a__ ( lowerCAmelCase__ ) -> Optional[int]: UpperCAmelCase__ : Dict = model_class.__name__ UpperCAmelCase__ : Union[str, Any] = infer_framework(lowerCAmelCase__ ) if framework == "tf": UpperCAmelCase__ : Tuple = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": UpperCAmelCase__ : List[str] = inspect.signature(model_class.forward ) # PyTorch models else: UpperCAmelCase__ : int = inspect.signature(model_class.__call__ ) # Flax models if "QuestionAnswering" in model_name: return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")] else: return [p for p in signature.parameters if "label" in p] def a__ ( lowerCAmelCase__ , lowerCAmelCase__ = "" , lowerCAmelCase__ = "." ) -> Any: def _flatten_dict(lowerCAmelCase__ , lowerCAmelCase__="" , lowerCAmelCase__="." ): for k, v in d.items(): UpperCAmelCase__ : int = str(lowerCAmelCase__ ) + delimiter + str(lowerCAmelCase__ ) if parent_key else k if v and isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): yield from flatten_dict(lowerCAmelCase__ , lowerCAmelCase__ , delimiter=lowerCAmelCase__ ).items() else: yield key, v return dict(_flatten_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) ) @contextmanager def a__ ( lowerCAmelCase__ , lowerCAmelCase__ = False ) -> int: if use_temp_dir: with tempfile.TemporaryDirectory() as tmp_dir: yield tmp_dir else: yield working_dir def a__ ( lowerCAmelCase__ , lowerCAmelCase__=None ) -> Optional[Any]: if is_numpy_array(lowerCAmelCase__ ): return np.transpose(lowerCAmelCase__ , axes=lowerCAmelCase__ ) elif is_torch_tensor(lowerCAmelCase__ ): return array.T if axes is None else array.permute(*lowerCAmelCase__ ) elif is_tf_tensor(lowerCAmelCase__ ): import tensorflow as tf return tf.transpose(lowerCAmelCase__ , perm=lowerCAmelCase__ ) elif is_jax_tensor(lowerCAmelCase__ ): return jnp.transpose(lowerCAmelCase__ , axes=lowerCAmelCase__ ) else: raise ValueError(F"""Type not supported for transpose: {type(lowerCAmelCase__ )}.""" ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple: if is_numpy_array(lowerCAmelCase__ ): return np.reshape(lowerCAmelCase__ , lowerCAmelCase__ ) elif is_torch_tensor(lowerCAmelCase__ ): return array.reshape(*lowerCAmelCase__ ) elif is_tf_tensor(lowerCAmelCase__ ): import tensorflow as tf return tf.reshape(lowerCAmelCase__ , lowerCAmelCase__ ) elif is_jax_tensor(lowerCAmelCase__ ): return jnp.reshape(lowerCAmelCase__ , lowerCAmelCase__ ) else: raise ValueError(F"""Type not supported for reshape: {type(lowerCAmelCase__ )}.""" ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__=None ) -> List[Any]: if is_numpy_array(lowerCAmelCase__ ): return np.squeeze(lowerCAmelCase__ , axis=lowerCAmelCase__ ) elif is_torch_tensor(lowerCAmelCase__ ): return array.squeeze() if axis is None else array.squeeze(dim=lowerCAmelCase__ ) elif is_tf_tensor(lowerCAmelCase__ ): import tensorflow as tf return tf.squeeze(lowerCAmelCase__ , axis=lowerCAmelCase__ ) elif is_jax_tensor(lowerCAmelCase__ ): return jnp.squeeze(lowerCAmelCase__ , axis=lowerCAmelCase__ ) else: raise ValueError(F"""Type not supported for squeeze: {type(lowerCAmelCase__ )}.""" ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]: if is_numpy_array(lowerCAmelCase__ ): return np.expand_dims(lowerCAmelCase__ , lowerCAmelCase__ ) elif is_torch_tensor(lowerCAmelCase__ ): return array.unsqueeze(dim=lowerCAmelCase__ ) elif is_tf_tensor(lowerCAmelCase__ ): import tensorflow as tf return tf.expand_dims(lowerCAmelCase__ , axis=lowerCAmelCase__ ) elif is_jax_tensor(lowerCAmelCase__ ): return jnp.expand_dims(lowerCAmelCase__ , axis=lowerCAmelCase__ ) else: raise ValueError(F"""Type not supported for expand_dims: {type(lowerCAmelCase__ )}.""" ) def a__ ( lowerCAmelCase__ ) -> int: if is_numpy_array(lowerCAmelCase__ ): return np.size(lowerCAmelCase__ ) elif is_torch_tensor(lowerCAmelCase__ ): return array.numel() elif is_tf_tensor(lowerCAmelCase__ ): import tensorflow as tf return tf.size(lowerCAmelCase__ ) elif is_jax_tensor(lowerCAmelCase__ ): return array.size else: raise ValueError(F"""Type not supported for expand_dims: {type(lowerCAmelCase__ )}.""" ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]: for key, value in auto_map.items(): if isinstance(lowerCAmelCase__ , (tuple, list) ): UpperCAmelCase__ : int = [F"""{repo_id}--{v}""" if (v is not None and '''--''' not in v) else v for v in value] elif value is not None and "--" not in value: UpperCAmelCase__ : str = F"""{repo_id}--{value}""" return auto_map def a__ ( lowerCAmelCase__ ) -> Tuple: for base_class in inspect.getmro(lowerCAmelCase__ ): UpperCAmelCase__ : Optional[int] = base_class.__module__ UpperCAmelCase__ : Optional[int] = base_class.__name__ if module.startswith('''tensorflow''' ) or module.startswith('''keras''' ) or name == "TFPreTrainedModel": return "tf" elif module.startswith('''torch''' ) or name == "PreTrainedModel": return "pt" elif module.startswith('''flax''' ) or module.startswith('''jax''' ) or name == "FlaxPreTrainedModel": return "flax" else: raise TypeError(F"""Could not infer framework from class {model_class}.""" )
299
1
'''simple docstring''' import doctest import glob import importlib import inspect import os import re from contextlib import contextmanager from functools import wraps from unittest.mock import patch import numpy as np import pytest from absl.testing import parameterized import datasets from datasets import load_metric from .utils import for_all_test_methods, local, slow # mark all tests as integration UpperCamelCase__ = pytest.mark.integration UpperCamelCase__ = {'''comet'''} UpperCamelCase__ = importlib.util.find_spec('''fairseq''') is not None UpperCamelCase__ = {'''code_eval'''} UpperCamelCase__ = os.name == '''nt''' UpperCamelCase__ = {'''bertscore''', '''frugalscore''', '''perplexity'''} UpperCamelCase__ = importlib.util.find_spec('''transformers''') is not None def a__ ( lowerCAmelCase__ ) -> str: @wraps(lowerCAmelCase__ ) def wrapper(self , lowerCAmelCase__ ): if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ: self.skipTest('''"test requires Fairseq"''' ) else: test_case(self , lowerCAmelCase__ ) return wrapper def a__ ( lowerCAmelCase__ ) -> List[Any]: @wraps(lowerCAmelCase__ ) def wrapper(self , lowerCAmelCase__ ): if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS: self.skipTest('''"test requires transformers"''' ) else: test_case(self , lowerCAmelCase__ ) return wrapper def a__ ( lowerCAmelCase__ ) -> Optional[Any]: @wraps(lowerCAmelCase__ ) def wrapper(self , lowerCAmelCase__ ): if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS: self.skipTest('''"test not supported on Windows"''' ) else: test_case(self , lowerCAmelCase__ ) return wrapper def a__ ( ) -> Dict: UpperCAmelCase__ : str = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('''./metrics/*/''' )] return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished @parameterized.named_parameters(get_local_metric_names() ) @for_all_test_methods( __a , __a , __a ) @local class lowerCamelCase_ ( parameterized.TestCase ): lowerCAmelCase__ = {} lowerCAmelCase__ = None @pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' ) @pytest.mark.filterwarnings('''ignore:load_metric is deprecated:FutureWarning''' ) def lowercase_ ( self : List[Any] , _A : int ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = '''[...]''' UpperCAmelCase__ : Any = importlib.import_module( datasets.load.metric_module_factory(os.path.join('''metrics''' , _A ) ).module_path ) UpperCAmelCase__ : Optional[Any] = datasets.load.import_main_class(metric_module.__name__ , dataset=_A ) # check parameters UpperCAmelCase__ : Optional[int] = inspect.signature(metric._compute ).parameters self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs # run doctest with self.patch_intensive_calls(_A , metric_module.__name__ ): with self.use_local_metrics(): try: UpperCAmelCase__ : Union[str, Any] = doctest.testmod(_A , verbose=_A , raise_on_error=_A ) except doctest.UnexpectedException as e: raise e.exc_info[1] # raise the exception that doctest caught self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @slow def lowercase_ ( self : Dict , _A : str ): '''simple docstring''' UpperCAmelCase__ : List[str] = '''[...]''' UpperCAmelCase__ : Tuple = importlib.import_module( datasets.load.metric_module_factory(os.path.join('''metrics''' , _A ) ).module_path ) # run doctest with self.use_local_metrics(): UpperCAmelCase__ : Dict = doctest.testmod(_A , verbose=_A , raise_on_error=_A ) self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @contextmanager def lowercase_ ( self : Tuple , _A : Optional[int] , _A : Tuple ): '''simple docstring''' if metric_name in self.INTENSIVE_CALLS_PATCHER: with self.INTENSIVE_CALLS_PATCHER[metric_name](_A ): yield else: yield @contextmanager def lowercase_ ( self : List[str] ): '''simple docstring''' def load_local_metric(_A : str , *_A : Dict , **_A : str ): return load_metric(os.path.join('''metrics''' , _A ) , *_A , **_A ) with patch('''datasets.load_metric''' ) as mock_load_metric: UpperCAmelCase__ : Union[str, Any] = load_local_metric yield @classmethod def lowercase_ ( cls : Union[str, Any] , _A : Optional[int] ): '''simple docstring''' def wrapper(_A : Optional[Any] ): UpperCAmelCase__ : List[str] = contextmanager(_A ) UpperCAmelCase__ : Optional[Any] = patcher return patcher return wrapper @LocalMetricTest.register_intensive_calls_patcher('''bleurt''' ) def a__ ( lowerCAmelCase__ ) -> Optional[Any]: import tensorflow.compat.va as tf from bleurt.score import Predictor tf.flags.DEFINE_string('''sv''' , '''''' , '''''' ) # handle pytest cli flags class lowerCamelCase_ ( __a ): def lowercase_ ( self : Any , _A : Dict ): '''simple docstring''' assert len(input_dict['''input_ids'''] ) == 2 return np.array([1.0_3, 1.0_4] ) # mock predict_fn which is supposed to do a forward pass with a bleurt model with patch('''bleurt.score._create_predictor''' ) as mock_create_predictor: UpperCAmelCase__ : Optional[Any] = MockedPredictor() yield @LocalMetricTest.register_intensive_calls_patcher('''bertscore''' ) def a__ ( lowerCAmelCase__ ) -> Optional[int]: import torch def bert_cos_score_idf(lowerCAmelCase__ , lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ ): return torch.tensor([[1.0, 1.0, 1.0]] * len(lowerCAmelCase__ ) ) # mock get_model which is supposed to do download a bert model # mock bert_cos_score_idf which is supposed to do a forward pass with a bert model with patch('''bert_score.scorer.get_model''' ), patch( '''bert_score.scorer.bert_cos_score_idf''' ) as mock_bert_cos_score_idf: UpperCAmelCase__ : List[str] = bert_cos_score_idf yield @LocalMetricTest.register_intensive_calls_patcher('''comet''' ) def a__ ( lowerCAmelCase__ ) -> Optional[int]: def load_from_checkpoint(lowerCAmelCase__ ): class lowerCamelCase_ : def lowercase_ ( self : str , _A : Tuple , *_A : Dict , **_A : Tuple ): '''simple docstring''' assert len(_A ) == 2 UpperCAmelCase__ : List[Any] = [0.1_9, 0.9_2] return scores, sum(_A ) / len(_A ) return Model() # mock load_from_checkpoint which is supposed to do download a bert model # mock load_from_checkpoint which is supposed to do download a bert model with patch('''comet.download_model''' ) as mock_download_model: UpperCAmelCase__ : List[Any] = None with patch('''comet.load_from_checkpoint''' ) as mock_load_from_checkpoint: UpperCAmelCase__ : Optional[Any] = load_from_checkpoint yield def a__ ( ) -> List[str]: UpperCAmelCase__ : Union[str, Any] = load_metric(os.path.join('''metrics''' , '''seqeval''' ) ) UpperCAmelCase__ : List[Any] = '''ERROR''' UpperCAmelCase__ : List[str] = F"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}""" with pytest.raises(lowerCAmelCase__ , match=re.escape(lowerCAmelCase__ ) ): metric.compute(predictions=[] , references=[] , scheme=lowerCAmelCase__ )
299
'''simple docstring''' import argparse from typing import List import evaluate import numpy as np import torch from datasets import DatasetDict, load_dataset # New Code # # We'll be using StratifiedKFold for this example from sklearn.model_selection import StratifiedKFold from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to perform Cross Validation, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## UpperCamelCase__ = 1_6 UpperCamelCase__ = 3_2 def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 16 ) -> Dict: UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained('''bert-base-cased''' ) UpperCAmelCase__ : str = DatasetDict( { '''train''': dataset['''train'''].select(lowerCAmelCase__ ), '''validation''': dataset['''train'''].select(lowerCAmelCase__ ), '''test''': dataset['''validation'''], } ) def tokenize_function(lowerCAmelCase__ ): # max_length=None => use the model max length (it's actually the default) UpperCAmelCase__ : Optional[int] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): UpperCAmelCase__ : Dict = datasets.map( lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCAmelCase__ : int = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(lowerCAmelCase__ ): # On TPU it's best to pad everything to the same length or training will be very slow. UpperCAmelCase__ : Optional[Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": UpperCAmelCase__ : Any = 16 elif accelerator.mixed_precision != "no": UpperCAmelCase__ : Dict = 8 else: UpperCAmelCase__ : List[Any] = None return tokenizer.pad( lowerCAmelCase__ , padding='''longest''' , max_length=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_tensors='''pt''' , ) # Instantiate dataloaders. UpperCAmelCase__ : List[Any] = DataLoader( tokenized_datasets['''train'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ ) UpperCAmelCase__ : List[str] = DataLoader( tokenized_datasets['''validation'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ ) UpperCAmelCase__ : List[Any] = DataLoader( tokenized_datasets['''test'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ ) return train_dataloader, eval_dataloader, test_dataloader def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str: # New Code # UpperCAmelCase__ : List[str] = [] # Download the dataset UpperCAmelCase__ : Union[str, Any] = load_dataset('''glue''' , '''mrpc''' ) # Create our splits UpperCAmelCase__ : str = StratifiedKFold(n_splits=int(args.num_folds ) ) # Initialize accelerator UpperCAmelCase__ : Dict = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCAmelCase__ : Any = config['''lr'''] UpperCAmelCase__ : Any = int(config['''num_epochs'''] ) UpperCAmelCase__ : Any = int(config['''seed'''] ) UpperCAmelCase__ : Dict = int(config['''batch_size'''] ) UpperCAmelCase__ : Any = evaluate.load('''glue''' , '''mrpc''' ) # If the batch size is too big we use gradient accumulation UpperCAmelCase__ : Optional[Any] = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: UpperCAmelCase__ : Any = batch_size // MAX_GPU_BATCH_SIZE UpperCAmelCase__ : List[Any] = MAX_GPU_BATCH_SIZE set_seed(lowerCAmelCase__ ) # New Code # # Create our folds: UpperCAmelCase__ : Union[str, Any] = kfold.split(np.zeros(datasets['''train'''].num_rows ) , datasets['''train''']['''label'''] ) UpperCAmelCase__ : Dict = [] # Iterate over them for i, (train_idxs, valid_idxs) in enumerate(lowerCAmelCase__ ): UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = get_fold_dataloaders( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCAmelCase__ : List[str] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=lowerCAmelCase__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). UpperCAmelCase__ : Optional[Any] = model.to(accelerator.device ) # Instantiate optimizer UpperCAmelCase__ : Union[str, Any] = AdamW(params=model.parameters() , lr=lowerCAmelCase__ ) # Instantiate scheduler UpperCAmelCase__ : Any = get_linear_schedule_with_warmup( optimizer=lowerCAmelCase__ , num_warmup_steps=1_00 , num_training_steps=(len(lowerCAmelCase__ ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = accelerator.prepare( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # Now we train the model for epoch in range(lowerCAmelCase__ ): model.train() for step, batch in enumerate(lowerCAmelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) UpperCAmelCase__ : Union[str, Any] = model(**lowerCAmelCase__ ) UpperCAmelCase__ : Dict = outputs.loss UpperCAmelCase__ : Dict = loss / gradient_accumulation_steps accelerator.backward(lowerCAmelCase__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowerCAmelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCAmelCase__ : str = model(**lowerCAmelCase__ ) UpperCAmelCase__ : Any = outputs.logits.argmax(dim=-1 ) UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=lowerCAmelCase__ , references=lowerCAmelCase__ , ) UpperCAmelCase__ : str = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" , lowerCAmelCase__ ) # New Code # # We also run predictions on the test set at the very end UpperCAmelCase__ : int = [] for step, batch in enumerate(lowerCAmelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCAmelCase__ : str = model(**lowerCAmelCase__ ) UpperCAmelCase__ : Union[str, Any] = outputs.logits UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) fold_predictions.append(predictions.cpu() ) if i == 0: # We need all of the test predictions test_references.append(references.cpu() ) # Use accelerator.print to print only on the main process. test_predictions.append(torch.cat(lowerCAmelCase__ , dim=0 ) ) # We now need to release all our memory and get rid of the current model, optimizer, etc accelerator.free_memory() # New Code # # Finally we check the accuracy of our folded results: UpperCAmelCase__ : Union[str, Any] = torch.cat(lowerCAmelCase__ , dim=0 ) UpperCAmelCase__ : Tuple = torch.stack(lowerCAmelCase__ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 ) UpperCAmelCase__ : Optional[Any] = metric.compute(predictions=lowerCAmelCase__ , references=lowerCAmelCase__ ) accelerator.print('''Average test metrics from all folds:''' , lowerCAmelCase__ ) def a__ ( ) -> Any: UpperCAmelCase__ : Tuple = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) # New Code # parser.add_argument('''--num_folds''' , type=lowerCAmelCase__ , default=3 , help='''The number of splits to perform across the dataset''' ) UpperCAmelCase__ : Tuple = parser.parse_args() UpperCAmelCase__ : Any = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(lowerCAmelCase__ , lowerCAmelCase__ ) if __name__ == "__main__": main()
299
1
'''simple docstring''' import gc import math import unittest import torch from diffusers import UNetaDModel from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin UpperCamelCase__ = logging.get_logger(__name__) enable_full_determinism() class lowerCamelCase_ ( __a , __a , unittest.TestCase ): lowerCAmelCase__ = UNetaDModel lowerCAmelCase__ = 'sample' @property def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = 4 UpperCAmelCase__ : str = 3 UpperCAmelCase__ : str = (32, 32) UpperCAmelCase__ : List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(_A ) UpperCAmelCase__ : Tuple = torch.tensor([10] ).to(_A ) return {"sample": noise, "timestep": time_step} @property def lowercase_ ( self : int ): '''simple docstring''' return (3, 32, 32) @property def lowercase_ ( self : Dict ): '''simple docstring''' return (3, 32, 32) def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = { '''block_out_channels''': (32, 64), '''down_block_types''': ('''DownBlock2D''', '''AttnDownBlock2D'''), '''up_block_types''': ('''AttnUpBlock2D''', '''UpBlock2D'''), '''attention_head_dim''': 3, '''out_channels''': 3, '''in_channels''': 3, '''layers_per_block''': 2, '''sample_size''': 32, } UpperCAmelCase__ : Tuple = self.dummy_input return init_dict, inputs_dict class lowerCamelCase_ ( __a , __a , unittest.TestCase ): lowerCAmelCase__ = UNetaDModel lowerCAmelCase__ = 'sample' @property def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : List[str] = 4 UpperCAmelCase__ : Dict = 4 UpperCAmelCase__ : List[str] = (32, 32) UpperCAmelCase__ : List[str] = floats_tensor((batch_size, num_channels) + sizes ).to(_A ) UpperCAmelCase__ : List[Any] = torch.tensor([10] ).to(_A ) return {"sample": noise, "timestep": time_step} @property def lowercase_ ( self : Tuple ): '''simple docstring''' return (4, 32, 32) @property def lowercase_ ( self : List[str] ): '''simple docstring''' return (4, 32, 32) def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : List[str] = { '''sample_size''': 32, '''in_channels''': 4, '''out_channels''': 4, '''layers_per_block''': 2, '''block_out_channels''': (32, 64), '''attention_head_dim''': 32, '''down_block_types''': ('''DownBlock2D''', '''DownBlock2D'''), '''up_block_types''': ('''UpBlock2D''', '''UpBlock2D'''), } UpperCAmelCase__ : Optional[Any] = self.dummy_input return init_dict, inputs_dict def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : int = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A ) self.assertIsNotNone(_A ) self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 ) model.to(_A ) UpperCAmelCase__ : Dict = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' ) def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Any = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A ) model.to(_A ) UpperCAmelCase__ : Dict = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' ) def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A ) model_accelerate.to(_A ) model_accelerate.eval() UpperCAmelCase__ : Tuple = torch.randn( 1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , ) UpperCAmelCase__ : Union[str, Any] = noise.to(_A ) UpperCAmelCase__ : Optional[Any] = torch.tensor([10] * noise.shape[0] ).to(_A ) UpperCAmelCase__ : Any = model_accelerate(_A , _A )['''sample'''] # two models don't need to stay in the device at the same time del model_accelerate torch.cuda.empty_cache() gc.collect() UpperCAmelCase__ , UpperCAmelCase__ : Dict = UNetaDModel.from_pretrained( '''fusing/unet-ldm-dummy-update''' , output_loading_info=_A , low_cpu_mem_usage=_A ) model_normal_load.to(_A ) model_normal_load.eval() UpperCAmelCase__ : Optional[int] = model_normal_load(_A , _A )['''sample'''] assert torch_all_close(_A , _A , rtol=1e-3 ) def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' ) model.eval() model.to(_A ) UpperCAmelCase__ : Union[str, Any] = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) UpperCAmelCase__ : str = noise.to(_A ) UpperCAmelCase__ : str = torch.tensor([10] * noise.shape[0] ).to(_A ) with torch.no_grad(): UpperCAmelCase__ : Optional[int] = model(_A , _A ).sample UpperCAmelCase__ : List[Any] = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off UpperCAmelCase__ : Tuple = torch.tensor([-1_3.3_2_5_8, -2_0.1_1_0_0, -1_5.9_8_7_3, -1_7.6_6_1_7, -2_3.0_5_9_6, -1_7.9_4_1_9, -1_3.3_6_7_5, -1_6.1_8_8_9, -1_2.3_8_0_0] ) # fmt: on self.assertTrue(torch_all_close(_A , _A , rtol=1e-3 ) ) class lowerCamelCase_ ( __a , __a , unittest.TestCase ): lowerCAmelCase__ = UNetaDModel lowerCAmelCase__ = 'sample' @property def lowercase_ ( self : Any , _A : str=(32, 32) ): '''simple docstring''' UpperCAmelCase__ : Tuple = 4 UpperCAmelCase__ : List[str] = 3 UpperCAmelCase__ : str = floats_tensor((batch_size, num_channels) + sizes ).to(_A ) UpperCAmelCase__ : Dict = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=_A ) return {"sample": noise, "timestep": time_step} @property def lowercase_ ( self : List[str] ): '''simple docstring''' return (3, 32, 32) @property def lowercase_ ( self : List[Any] ): '''simple docstring''' return (3, 32, 32) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : List[str] = { '''block_out_channels''': [32, 64, 64, 64], '''in_channels''': 3, '''layers_per_block''': 1, '''out_channels''': 3, '''time_embedding_type''': '''fourier''', '''norm_eps''': 1e-6, '''mid_block_scale_factor''': math.sqrt(2.0 ), '''norm_num_groups''': None, '''down_block_types''': [ '''SkipDownBlock2D''', '''AttnSkipDownBlock2D''', '''SkipDownBlock2D''', '''SkipDownBlock2D''', ], '''up_block_types''': [ '''SkipUpBlock2D''', '''SkipUpBlock2D''', '''AttnSkipUpBlock2D''', '''SkipUpBlock2D''', ], } UpperCAmelCase__ : Tuple = self.dummy_input return init_dict, inputs_dict @slow def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : str = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' , output_loading_info=_A ) self.assertIsNotNone(_A ) self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 ) model.to(_A ) UpperCAmelCase__ : List[str] = self.dummy_input UpperCAmelCase__ : Dict = floats_tensor((4, 3) + (256, 256) ).to(_A ) UpperCAmelCase__ : Optional[Any] = noise UpperCAmelCase__ : Any = model(**_A ) assert image is not None, "Make sure output is not None" @slow def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : int = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' ) model.to(_A ) UpperCAmelCase__ : Optional[Any] = 4 UpperCAmelCase__ : List[str] = 3 UpperCAmelCase__ : Dict = (256, 256) UpperCAmelCase__ : Optional[int] = torch.ones((batch_size, num_channels) + sizes ).to(_A ) UpperCAmelCase__ : Union[str, Any] = torch.tensor(batch_size * [1e-4] ).to(_A ) with torch.no_grad(): UpperCAmelCase__ : Optional[int] = model(_A , _A ).sample UpperCAmelCase__ : Any = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off UpperCAmelCase__ : Tuple = torch.tensor([-4_8_4_2.8_6_9_1, -6_4_9_9.6_6_3_1, -3_8_0_0.1_9_5_3, -7_9_7_8.2_6_8_6, -1_0_9_8_0.7_1_2_9, -2_0_0_2_8.8_5_3_5, 8_1_4_8.2_8_2_2, 2_3_4_2.2_9_0_5, 5_6_7.7_6_0_8] ) # fmt: on self.assertTrue(torch_all_close(_A , _A , rtol=1e-2 ) ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Dict = UNetaDModel.from_pretrained('''fusing/ncsnpp-ffhq-ve-dummy-update''' ) model.to(_A ) UpperCAmelCase__ : str = 4 UpperCAmelCase__ : Any = 3 UpperCAmelCase__ : int = (32, 32) UpperCAmelCase__ : Optional[Any] = torch.ones((batch_size, num_channels) + sizes ).to(_A ) UpperCAmelCase__ : Optional[Any] = torch.tensor(batch_size * [1e-4] ).to(_A ) with torch.no_grad(): UpperCAmelCase__ : int = model(_A , _A ).sample UpperCAmelCase__ : Dict = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off UpperCAmelCase__ : Any = torch.tensor([-0.0_3_2_5, -0.0_9_0_0, -0.0_8_6_9, -0.0_3_3_2, -0.0_7_2_5, -0.0_2_7_0, -0.0_1_0_1, 0.0_2_2_7, 0.0_2_5_6] ) # fmt: on self.assertTrue(torch_all_close(_A , _A , rtol=1e-2 ) ) def lowercase_ ( self : Tuple ): '''simple docstring''' pass
299
'''simple docstring''' import json import os import shutil import tempfile import unittest from multiprocessing import get_context from pathlib import Path import datasets import numpy as np from datasets import load_dataset from parameterized import parameterized from transformers import AutoProcessor from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available from ..wavaveca.test_feature_extraction_wavaveca import floats_list if is_pyctcdecode_available(): from huggingface_hub import snapshot_download from pyctcdecode import BeamSearchDecoderCTC from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput if is_torch_available(): from transformers import WavaVecaForCTC @require_pyctcdecode class lowerCamelCase_ ( unittest.TestCase ): def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Any = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split() UpperCAmelCase__ : Optional[Any] = dict(zip(_A , range(len(_A ) ) ) ) UpperCAmelCase__ : Tuple = { '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>''', } UpperCAmelCase__ : Optional[int] = { '''feature_size''': 1, '''padding_value''': 0.0, '''sampling_rate''': 16_000, '''return_attention_mask''': False, '''do_normalize''': True, } UpperCAmelCase__ : Union[str, Any] = tempfile.mkdtemp() UpperCAmelCase__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) UpperCAmelCase__ : int = os.path.join(self.tmpdirname , _A ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_A ) + '''\n''' ) with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_A ) + '''\n''' ) # load decoder from hub UpperCAmelCase__ : Any = '''hf-internal-testing/ngram-beam-search-decoder''' def lowercase_ ( self : int , **_A : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Dict = self.add_kwargs_tokens_map.copy() kwargs.update(_A ) return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_A ) def lowercase_ ( self : str , **_A : Any ): '''simple docstring''' return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_A ) def lowercase_ ( self : str , **_A : Any ): '''simple docstring''' return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_A ) def lowercase_ ( self : Any ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.get_tokenizer() UpperCAmelCase__ : Any = self.get_feature_extractor() UpperCAmelCase__ : Tuple = self.get_decoder() UpperCAmelCase__ : Tuple = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase__ : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname ) # tokenizer self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , _A ) # feature extractor self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , _A ) # decoder self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels ) self.assertEqual( processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , ) self.assertIsInstance(processor.decoder , _A ) def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM( tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) processor.save_pretrained(self.tmpdirname ) # make sure that error is thrown when decoder alphabet doesn't match UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM.from_pretrained( self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 ) # decoder self.assertEqual(processor.language_model.alpha , 5.0 ) self.assertEqual(processor.language_model.beta , 3.0 ) self.assertEqual(processor.language_model.score_boundary , -7.0 ) self.assertEqual(processor.language_model.unk_score_offset , 3 ) def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.get_tokenizer() # add token to trigger raise tokenizer.add_tokens(['''xx'''] ) with self.assertRaisesRegex(_A , '''include''' ): WavaVecaProcessorWithLM( tokenizer=_A , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Any = self.get_feature_extractor() UpperCAmelCase__ : Optional[int] = self.get_tokenizer() UpperCAmelCase__ : Any = self.get_decoder() UpperCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A ) UpperCAmelCase__ : List[Any] = floats_list((3, 1_000) ) UpperCAmelCase__ : Dict = feature_extractor(_A , return_tensors='''np''' ) UpperCAmelCase__ : str = processor(_A , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.get_feature_extractor() UpperCAmelCase__ : str = self.get_tokenizer() UpperCAmelCase__ : str = self.get_decoder() UpperCAmelCase__ : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A ) UpperCAmelCase__ : Union[str, Any] = '''This is a test string''' UpperCAmelCase__ : Optional[int] = processor(text=_A ) UpperCAmelCase__ : List[str] = tokenizer(_A ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowercase_ ( self : Dict , _A : Optional[int]=(2, 10, 16) , _A : List[str]=77 ): '''simple docstring''' np.random.seed(_A ) return np.random.rand(*_A ) def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.get_feature_extractor() UpperCAmelCase__ : Union[str, Any] = self.get_tokenizer() UpperCAmelCase__ : Optional[Any] = self.get_decoder() UpperCAmelCase__ : Tuple = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A ) UpperCAmelCase__ : int = self._get_dummy_logits(shape=(10, 16) , seed=13 ) UpperCAmelCase__ : List[Any] = processor.decode(_A ) UpperCAmelCase__ : List[Any] = decoder.decode_beams(_A )[0] self.assertEqual(decoded_decoder[0] , decoded_processor.text ) self.assertEqual('''</s> <s> </s>''' , decoded_processor.text ) self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score ) self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score ) @parameterized.expand([[None], ['''fork'''], ['''spawn''']] ) def lowercase_ ( self : Any , _A : str ): '''simple docstring''' UpperCAmelCase__ : Any = self.get_feature_extractor() UpperCAmelCase__ : Tuple = self.get_tokenizer() UpperCAmelCase__ : Tuple = self.get_decoder() UpperCAmelCase__ : Any = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A ) UpperCAmelCase__ : Optional[Any] = self._get_dummy_logits() # note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM. # otherwise, the LM won't be available to the pool's sub-processes. # manual logic used to allow parameterized test for both pool=None and pool=Pool(...) if pool_context is None: UpperCAmelCase__ : Union[str, Any] = processor.batch_decode(_A ) else: with get_context(_A ).Pool() as pool: UpperCAmelCase__ : Union[str, Any] = processor.batch_decode(_A , _A ) UpperCAmelCase__ : str = list(_A ) with get_context('''fork''' ).Pool() as p: UpperCAmelCase__ : Dict = decoder.decode_beams_batch(_A , _A ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = [], [], [] for beams in decoded_beams: texts_decoder.append(beams[0][0] ) logit_scores_decoder.append(beams[0][-2] ) lm_scores_decoder.append(beams[0][-1] ) self.assertListEqual(_A , decoded_processor.text ) self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text ) self.assertListEqual(_A , decoded_processor.logit_score ) self.assertListEqual(_A , decoded_processor.lm_score ) def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : str = self.get_feature_extractor() UpperCAmelCase__ : List[Any] = self.get_tokenizer() UpperCAmelCase__ : int = self.get_decoder() UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A ) UpperCAmelCase__ : str = self._get_dummy_logits() UpperCAmelCase__ : Optional[int] = 15 UpperCAmelCase__ : Dict = -2_0.0 UpperCAmelCase__ : Optional[Any] = -4.0 UpperCAmelCase__ : Union[str, Any] = processor.batch_decode( _A , beam_width=_A , beam_prune_logp=_A , token_min_logp=_A , ) UpperCAmelCase__ : List[Any] = decoded_processor_out.text UpperCAmelCase__ : List[str] = list(_A ) with get_context('''fork''' ).Pool() as pool: UpperCAmelCase__ : Tuple = decoder.decode_beams_batch( _A , _A , beam_width=_A , beam_prune_logp=_A , token_min_logp=_A , ) UpperCAmelCase__ : Optional[int] = [d[0][0] for d in decoded_decoder_out] UpperCAmelCase__ : Optional[Any] = [d[0][2] for d in decoded_decoder_out] UpperCAmelCase__ : Optional[int] = [d[0][3] for d in decoded_decoder_out] self.assertListEqual(_A , _A ) self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , _A ) self.assertTrue(np.array_equal(_A , decoded_processor_out.logit_score ) ) self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] , _A , atol=1e-3 ) ) self.assertTrue(np.array_equal(_A , decoded_processor_out.lm_score ) ) self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] , _A , atol=1e-3 ) ) def lowercase_ ( self : str ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.get_feature_extractor() UpperCAmelCase__ : Optional[Any] = self.get_tokenizer() UpperCAmelCase__ : Dict = self.get_decoder() UpperCAmelCase__ : int = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A ) UpperCAmelCase__ : Optional[int] = self._get_dummy_logits() UpperCAmelCase__ : List[str] = 2.0 UpperCAmelCase__ : Union[str, Any] = 5.0 UpperCAmelCase__ : str = -2_0.0 UpperCAmelCase__ : Optional[int] = True UpperCAmelCase__ : Union[str, Any] = processor.batch_decode( _A , alpha=_A , beta=_A , unk_score_offset=_A , lm_score_boundary=_A , ) UpperCAmelCase__ : Union[str, Any] = decoded_processor_out.text UpperCAmelCase__ : Tuple = list(_A ) decoder.reset_params( alpha=_A , beta=_A , unk_score_offset=_A , lm_score_boundary=_A , ) with get_context('''fork''' ).Pool() as pool: UpperCAmelCase__ : Optional[Any] = decoder.decode_beams_batch( _A , _A , ) UpperCAmelCase__ : str = [d[0][0] for d in decoded_decoder_out] self.assertListEqual(_A , _A ) self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , _A ) UpperCAmelCase__ : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key] self.assertEqual(lm_model.alpha , 2.0 ) self.assertEqual(lm_model.beta , 5.0 ) self.assertEqual(lm_model.unk_score_offset , -2_0.0 ) self.assertEqual(lm_model.score_boundary , _A ) def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase__ : Dict = processor.decoder.model_container[processor.decoder._model_key] UpperCAmelCase__ : Optional[int] = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() UpperCAmelCase__ : Dict = os.listdir(_A ) UpperCAmelCase__ : Optional[Any] = ['''alphabet.json''', '''language_model'''] downloaded_decoder_files.sort() expected_decoder_files.sort() # test that only decoder relevant files from # https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main # are downloaded and none of the rest (e.g. README.md, ...) self.assertListEqual(_A , _A ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : str = snapshot_download('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase__ : Any = WavaVecaProcessorWithLM.from_pretrained(_A ) UpperCAmelCase__ : Optional[int] = processor.decoder.model_container[processor.decoder._model_key] UpperCAmelCase__ : str = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() UpperCAmelCase__ : List[str] = os.listdir(_A ) UpperCAmelCase__ : Any = os.listdir(_A ) local_decoder_files.sort() expected_decoder_files.sort() # test that both decoder form hub and local files in cache are the same self.assertListEqual(_A , _A ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Dict = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase__ : Dict = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase__ : Tuple = floats_list((3, 1_000) ) UpperCAmelCase__ : int = processor_wavaveca(_A , return_tensors='''np''' ) UpperCAmelCase__ : List[str] = processor_auto(_A , return_tensors='''np''' ) for key in input_wavaveca.keys(): self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 ) UpperCAmelCase__ : Tuple = self._get_dummy_logits() UpperCAmelCase__ : List[str] = processor_wavaveca.batch_decode(_A ) UpperCAmelCase__ : int = processor_auto.batch_decode(_A ) self.assertListEqual(decoded_wavaveca.text , decoded_auto.text ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : int = self.get_feature_extractor() UpperCAmelCase__ : int = self.get_tokenizer() UpperCAmelCase__ : Optional[Any] = self.get_decoder() UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A ) self.assertListEqual( processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , ) @staticmethod def lowercase_ ( _A : Dict , _A : str ): '''simple docstring''' UpperCAmelCase__ : int = [d[key] for d in offsets] return retrieved_list def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase__ : str = self._get_dummy_logits()[0] UpperCAmelCase__ : List[str] = processor.decode(_A , output_word_offsets=_A ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(_A , _A ) ) self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Any = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase__ : Dict = self._get_dummy_logits() UpperCAmelCase__ : Dict = processor.batch_decode(_A , output_word_offsets=_A ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(_A , _A ) ) self.assertListEqual( [''' '''.join(self.get_from_offsets(_A , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] ) @slow @require_torch @require_torchaudio def lowercase_ ( self : Optional[Any] ): '''simple docstring''' import torch UpperCAmelCase__ : Any = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=_A ) UpperCAmelCase__ : Dict = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=16_000 ) ) UpperCAmelCase__ : List[Any] = iter(_A ) UpperCAmelCase__ : Optional[Any] = next(_A ) UpperCAmelCase__ : Any = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) UpperCAmelCase__ : int = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) # compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train UpperCAmelCase__ : int = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values with torch.no_grad(): UpperCAmelCase__ : Dict = model(_A ).logits.cpu().numpy() UpperCAmelCase__ : int = processor.decode(logits[0] , output_word_offsets=_A ) UpperCAmelCase__ : Any = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate UpperCAmelCase__ : Any = [ { '''start_time''': d['''start_offset'''] * time_offset, '''end_time''': d['''end_offset'''] * time_offset, '''word''': d['''word'''], } for d in output['''word_offsets'''] ] UpperCAmelCase__ : int = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL''' # output words self.assertEqual(''' '''.join(self.get_from_offsets(_A , '''word''' ) ) , _A ) self.assertEqual(''' '''.join(self.get_from_offsets(_A , '''word''' ) ) , output.text ) # output times UpperCAmelCase__ : List[Any] = torch.tensor(self.get_from_offsets(_A , '''start_time''' ) ) UpperCAmelCase__ : List[str] = torch.tensor(self.get_from_offsets(_A , '''end_time''' ) ) # fmt: off UpperCAmelCase__ : int = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] ) UpperCAmelCase__ : List[str] = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] ) # fmt: on self.assertTrue(torch.allclose(_A , _A , atol=0.0_1 ) ) self.assertTrue(torch.allclose(_A , _A , atol=0.0_1 ) )
299
1
'''simple docstring''' import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class lowerCamelCase_ ( __a ): lowerCAmelCase__ = (DDPMScheduler,) def lowercase_ ( self : Optional[Any] , **_A : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = { '''num_train_timesteps''': 1_000, '''beta_start''': 0.0_0_0_1, '''beta_end''': 0.0_2, '''beta_schedule''': '''linear''', '''variance_type''': '''fixed_small''', '''clip_sample''': True, } config.update(**_A ) return config def lowercase_ ( self : Optional[int] ): '''simple docstring''' for timesteps in [1, 5, 100, 1_000]: self.check_over_configs(num_train_timesteps=_A ) def lowercase_ ( self : List[Any] ): '''simple docstring''' for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ): self.check_over_configs(beta_start=_A , beta_end=_A ) def lowercase_ ( self : Dict ): '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=_A ) def lowercase_ ( self : Dict ): '''simple docstring''' for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=_A ) def lowercase_ ( self : str ): '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=_A ) def lowercase_ ( self : Dict ): '''simple docstring''' self.check_over_configs(thresholding=_A ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=_A , prediction_type=_A , sample_max_value=_A , ) def lowercase_ ( self : Tuple ): '''simple docstring''' for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=_A ) def lowercase_ ( self : Dict ): '''simple docstring''' for t in [0, 500, 999]: self.check_over_forward(time_step=_A ) def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = self.scheduler_classes[0] UpperCAmelCase__ : List[Any] = self.get_scheduler_config() UpperCAmelCase__ : int = scheduler_class(**_A ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5 def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : List[str] = self.scheduler_classes[0] UpperCAmelCase__ : Optional[int] = self.get_scheduler_config() UpperCAmelCase__ : Union[str, Any] = scheduler_class(**_A ) UpperCAmelCase__ : List[Any] = len(_A ) UpperCAmelCase__ : Union[str, Any] = self.dummy_model() UpperCAmelCase__ : List[Any] = self.dummy_sample_deter UpperCAmelCase__ : int = torch.manual_seed(0 ) for t in reversed(range(_A ) ): # 1. predict noise residual UpperCAmelCase__ : str = model(_A , _A ) # 2. predict previous mean of sample x_t-1 UpperCAmelCase__ : List[Any] = scheduler.step(_A , _A , _A , generator=_A ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance UpperCAmelCase__ : Optional[Any] = pred_prev_sample UpperCAmelCase__ : Dict = torch.sum(torch.abs(_A ) ) UpperCAmelCase__ : str = torch.mean(torch.abs(_A ) ) assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2 assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3 def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.scheduler_classes[0] UpperCAmelCase__ : Dict = self.get_scheduler_config(prediction_type='''v_prediction''' ) UpperCAmelCase__ : Tuple = scheduler_class(**_A ) UpperCAmelCase__ : List[Any] = len(_A ) UpperCAmelCase__ : int = self.dummy_model() UpperCAmelCase__ : Union[str, Any] = self.dummy_sample_deter UpperCAmelCase__ : str = torch.manual_seed(0 ) for t in reversed(range(_A ) ): # 1. predict noise residual UpperCAmelCase__ : Dict = model(_A , _A ) # 2. predict previous mean of sample x_t-1 UpperCAmelCase__ : str = scheduler.step(_A , _A , _A , generator=_A ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance UpperCAmelCase__ : List[str] = pred_prev_sample UpperCAmelCase__ : Tuple = torch.sum(torch.abs(_A ) ) UpperCAmelCase__ : Tuple = torch.mean(torch.abs(_A ) ) assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2 assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3 def lowercase_ ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.scheduler_classes[0] UpperCAmelCase__ : int = self.get_scheduler_config() UpperCAmelCase__ : Union[str, Any] = scheduler_class(**_A ) UpperCAmelCase__ : Optional[Any] = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=_A ) UpperCAmelCase__ : Tuple = scheduler.timesteps for i, timestep in enumerate(_A ): if i == len(_A ) - 1: UpperCAmelCase__ : Union[str, Any] = -1 else: UpperCAmelCase__ : Optional[Any] = timesteps[i + 1] UpperCAmelCase__ : List[str] = scheduler.previous_timestep(_A ) UpperCAmelCase__ : Optional[Any] = prev_t.item() self.assertEqual(_A , _A ) def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : str = self.scheduler_classes[0] UpperCAmelCase__ : str = self.get_scheduler_config() UpperCAmelCase__ : Union[str, Any] = scheduler_class(**_A ) UpperCAmelCase__ : int = [100, 87, 50, 51, 0] with self.assertRaises(_A , msg='''`custom_timesteps` must be in descending order.''' ): scheduler.set_timesteps(timesteps=_A ) def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : Dict = self.scheduler_classes[0] UpperCAmelCase__ : List[Any] = self.get_scheduler_config() UpperCAmelCase__ : Tuple = scheduler_class(**_A ) UpperCAmelCase__ : List[str] = [100, 87, 50, 1, 0] UpperCAmelCase__ : Union[str, Any] = len(_A ) with self.assertRaises(_A , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ): scheduler.set_timesteps(num_inference_steps=_A , timesteps=_A ) def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : int = self.scheduler_classes[0] UpperCAmelCase__ : str = self.get_scheduler_config() UpperCAmelCase__ : str = scheduler_class(**_A ) UpperCAmelCase__ : Optional[Any] = [scheduler.config.num_train_timesteps] with self.assertRaises( _A , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ): scheduler.set_timesteps(timesteps=_A )
299
'''simple docstring''' # Logistic Regression from scratch # In[62]: # In[63]: # importing all the required libraries import numpy as np from matplotlib import pyplot as plt from sklearn import datasets def a__ ( lowerCAmelCase__ ) -> List[Any]: return 1 / (1 + np.exp(-z )) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple: return (-y * np.log(lowerCAmelCase__ ) - (1 - y) * np.log(1 - h )).mean() def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict: UpperCAmelCase__ : str = np.dot(lowerCAmelCase__ , lowerCAmelCase__ ) return np.sum(y * scores - np.log(1 + np.exp(lowerCAmelCase__ ) ) ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=7_00_00 ) -> List[Any]: UpperCAmelCase__ : Tuple = np.zeros(x.shape[1] ) for iterations in range(lowerCAmelCase__ ): UpperCAmelCase__ : List[Any] = np.dot(lowerCAmelCase__ , lowerCAmelCase__ ) UpperCAmelCase__ : List[str] = sigmoid_function(lowerCAmelCase__ ) UpperCAmelCase__ : int = np.dot(x.T , h - y ) / y.size UpperCAmelCase__ : Optional[int] = theta - alpha * gradient # updating the weights UpperCAmelCase__ : Dict = np.dot(lowerCAmelCase__ , lowerCAmelCase__ ) UpperCAmelCase__ : int = sigmoid_function(lowerCAmelCase__ ) UpperCAmelCase__ : Tuple = cost_function(lowerCAmelCase__ , lowerCAmelCase__ ) if iterations % 1_00 == 0: print(F"""loss: {j} \t""" ) # printing the loss after every 100 iterations return theta # In[68]: if __name__ == "__main__": UpperCamelCase__ = datasets.load_iris() UpperCamelCase__ = iris.data[:, :2] UpperCamelCase__ = (iris.target != 0) * 1 UpperCamelCase__ = 0.1 UpperCamelCase__ = logistic_reg(alpha, x, y, max_iterations=7_0_0_0_0) print('''theta: ''', theta) # printing the theta i.e our weights vector def a__ ( lowerCAmelCase__ ) -> Dict: return sigmoid_function( np.dot(lowerCAmelCase__ , lowerCAmelCase__ ) ) # predicting the value of probability from the logistic regression algorithm plt.figure(figsize=(1_0, 6)) plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''') plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''') ((UpperCamelCase__) , (UpperCamelCase__)) = (x[:, 0].min(), x[:, 0].max()) ((UpperCamelCase__) , (UpperCamelCase__)) = (x[:, 1].min(), x[:, 1].max()) ((UpperCamelCase__) , (UpperCamelCase__)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max)) UpperCamelCase__ = np.c_[xxa.ravel(), xxa.ravel()] UpperCamelCase__ = predict_prob(grid).reshape(xxa.shape) plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''') plt.legend() plt.show()
299
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = { '''google/pegasus-large''': '''https://huggingface.co/google/pegasus-large/resolve/main/config.json''', # See all PEGASUS models at https://huggingface.co/models?filter=pegasus } class lowerCamelCase_ ( __a ): lowerCAmelCase__ = 'pegasus' lowerCAmelCase__ = ['past_key_values'] lowerCAmelCase__ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self : Optional[Any] , _A : int=50_265 , _A : Union[str, Any]=1_024 , _A : Tuple=12 , _A : List[str]=4_096 , _A : str=16 , _A : str=12 , _A : Optional[int]=4_096 , _A : int=16 , _A : Union[str, Any]=0.0 , _A : Tuple=0.0 , _A : str=True , _A : List[Any]=True , _A : Union[str, Any]="gelu" , _A : Union[str, Any]=1_024 , _A : Dict=0.1 , _A : Union[str, Any]=0.0 , _A : Dict=0.0 , _A : List[Any]=0.0_2 , _A : Dict=0 , _A : str=False , _A : str=0 , _A : Tuple=1 , _A : Optional[Any]=1 , **_A : Union[str, Any] , ): '''simple docstring''' UpperCAmelCase__ : List[str] = vocab_size UpperCAmelCase__ : List[Any] = max_position_embeddings UpperCAmelCase__ : Optional[Any] = d_model UpperCAmelCase__ : List[str] = encoder_ffn_dim UpperCAmelCase__ : Dict = encoder_layers UpperCAmelCase__ : List[str] = encoder_attention_heads UpperCAmelCase__ : Optional[Any] = decoder_ffn_dim UpperCAmelCase__ : Tuple = decoder_layers UpperCAmelCase__ : Optional[int] = decoder_attention_heads UpperCAmelCase__ : List[str] = dropout UpperCAmelCase__ : int = attention_dropout UpperCAmelCase__ : List[Any] = activation_dropout UpperCAmelCase__ : Tuple = activation_function UpperCAmelCase__ : str = init_std UpperCAmelCase__ : Optional[Any] = encoder_layerdrop UpperCAmelCase__ : Dict = decoder_layerdrop UpperCAmelCase__ : int = use_cache UpperCAmelCase__ : Optional[int] = encoder_layers UpperCAmelCase__ : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=_A , eos_token_id=_A , is_encoder_decoder=_A , decoder_start_token_id=_A , forced_eos_token_id=_A , **_A , ) @property def lowercase_ ( self : Any ): '''simple docstring''' return self.encoder_attention_heads @property def lowercase_ ( self : int ): '''simple docstring''' return self.d_model
299
'''simple docstring''' from __future__ import annotations import copy import tempfile import unittest from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available from transformers.testing_utils import ( DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tensorflow_probability, require_tf, slow, ) from ..bert.test_modeling_bert import BertModelTester if is_tf_available(): from transformers import ( TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelForTableQuestionAnswering, TFAutoModelForTokenClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFFunnelBaseModel, TFFunnelModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, TFTapasForQuestionAnswering, ) from transformers.models.auto.modeling_tf_auto import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_MAPPING, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCamelCase_ ( __a ): lowerCAmelCase__ = 'new-model' if is_tf_available(): class lowerCamelCase_ ( __a ): lowerCAmelCase__ = NewModelConfig @require_tf class lowerCamelCase_ ( unittest.TestCase ): @slow def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : List[str] = '''bert-base-cased''' UpperCAmelCase__ : int = AutoConfig.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) UpperCAmelCase__ : Dict = TFAutoModel.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) @slow def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : str = '''bert-base-cased''' UpperCAmelCase__ : Any = AutoConfig.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) UpperCAmelCase__ : List[str] = TFAutoModelForPreTraining.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) @slow def lowercase_ ( self : int ): '''simple docstring''' for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : int = AutoConfig.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) UpperCAmelCase__ : str = TFAutoModelForCausalLM.from_pretrained(_A ) UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = TFAutoModelForCausalLM.from_pretrained(_A , output_loading_info=_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) @slow def lowercase_ ( self : List[Any] ): '''simple docstring''' for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : List[Any] = AutoConfig.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) UpperCAmelCase__ : List[Any] = TFAutoModelWithLMHead.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) @slow def lowercase_ ( self : Optional[Any] ): '''simple docstring''' for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : int = AutoConfig.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) UpperCAmelCase__ : List[Any] = TFAutoModelForMaskedLM.from_pretrained(_A ) UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = TFAutoModelForMaskedLM.from_pretrained(_A , output_loading_info=_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) @slow def lowercase_ ( self : Optional[int] ): '''simple docstring''' for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : Optional[Any] = AutoConfig.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) UpperCAmelCase__ : Dict = TFAutoModelForSeqaSeqLM.from_pretrained(_A ) UpperCAmelCase__ , UpperCAmelCase__ : Dict = TFAutoModelForSeqaSeqLM.from_pretrained(_A , output_loading_info=_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) @slow def lowercase_ ( self : Any ): '''simple docstring''' for model_name in ["bert-base-uncased"]: UpperCAmelCase__ : Any = AutoConfig.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) UpperCAmelCase__ : Any = TFAutoModelForSequenceClassification.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) @slow def lowercase_ ( self : Any ): '''simple docstring''' for model_name in ["bert-base-uncased"]: UpperCAmelCase__ : Optional[Any] = AutoConfig.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) UpperCAmelCase__ : Dict = TFAutoModelForQuestionAnswering.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) @slow @require_tensorflow_probability def lowercase_ ( self : Optional[int] ): '''simple docstring''' for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]: UpperCAmelCase__ : List[str] = AutoConfig.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) UpperCAmelCase__ : List[str] = TFAutoModelForTableQuestionAnswering.from_pretrained(_A ) UpperCAmelCase__ , UpperCAmelCase__ : Dict = TFAutoModelForTableQuestionAnswering.from_pretrained( _A , output_loading_info=_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : List[Any] = TFAutoModelWithLMHead.from_pretrained(_A ) self.assertIsInstance(_A , _A ) self.assertEqual(model.num_parameters() , 14_410 ) self.assertEqual(model.num_parameters(only_trainable=_A ) , 14_410 ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = TFAutoModelWithLMHead.from_pretrained(_A ) self.assertIsInstance(_A , _A ) self.assertEqual(model.num_parameters() , 14_410 ) self.assertEqual(model.num_parameters(only_trainable=_A ) , 14_410 ) def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : int = TFAutoModel.from_pretrained('''sgugger/funnel-random-tiny''' ) self.assertIsInstance(_A , _A ) UpperCAmelCase__ : Any = copy.deepcopy(model.config ) UpperCAmelCase__ : Tuple = ['''FunnelBaseModel'''] UpperCAmelCase__ : int = TFAutoModel.from_config(_A ) self.assertIsInstance(_A , _A ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(_A ) UpperCAmelCase__ : str = TFAutoModel.from_pretrained(_A ) self.assertIsInstance(_A , _A ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' try: AutoConfig.register('''new-model''' , _A ) UpperCAmelCase__ : List[Any] = [ TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSequenceClassification, TFAutoModelForTokenClassification, ] for auto_class in auto_classes: with self.subTest(auto_class.__name__ ): # Wrong config class will raise an error with self.assertRaises(_A ): auto_class.register(_A , _A ) auto_class.register(_A , _A ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_A ): auto_class.register(_A , _A ) # Now that the config is registered, it can be used as any other config with the auto-API UpperCAmelCase__ : Tuple = BertModelTester(self ).get_config() UpperCAmelCase__ : str = NewModelConfig(**tiny_config.to_dict() ) UpperCAmelCase__ : str = auto_class.from_config(_A ) self.assertIsInstance(_A , _A ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(_A ) UpperCAmelCase__ : str = auto_class.from_pretrained(_A ) self.assertIsInstance(_A , _A ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"] for mapping in ( TF_MODEL_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, ): if NewModelConfig in mapping._extra_content: del mapping._extra_content[NewModelConfig] def lowercase_ ( self : str ): '''simple docstring''' with self.assertRaisesRegex( _A , '''bert-base is not a local folder and is not a valid model identifier''' ): UpperCAmelCase__ : Dict = TFAutoModel.from_pretrained('''bert-base''' ) def lowercase_ ( self : Tuple ): '''simple docstring''' with self.assertRaisesRegex( _A , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): UpperCAmelCase__ : int = TFAutoModel.from_pretrained(_A , revision='''aaaaaa''' ) def lowercase_ ( self : Tuple ): '''simple docstring''' with self.assertRaisesRegex( _A , '''hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin''' , ): UpperCAmelCase__ : List[Any] = TFAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' with self.assertRaisesRegex(_A , '''Use `from_pt=True` to load this model''' ): UpperCAmelCase__ : int = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' ) def lowercase_ ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : List[str] = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) with RequestCounter() as counter: UpperCAmelCase__ : Union[str, Any] = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 ) # With a sharded checkpoint UpperCAmelCase__ : Optional[Any] = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' ) with RequestCounter() as counter: UpperCAmelCase__ : List[Any] = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
299
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_owlvit import OwlViTImageProcessor UpperCamelCase__ = logging.get_logger(__name__) class lowerCamelCase_ ( __a ): def __init__( self : Optional[Any] , *_A : Tuple , **_A : Optional[Any] ): '''simple docstring''' warnings.warn( '''The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use OwlViTImageProcessor instead.''' , _A , ) super().__init__(*_A , **_A )
299
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetaImageProcessor class lowerCamelCase_ ( unittest.TestCase ): def __init__( self : List[str] , _A : List[Any] , _A : Union[str, Any]=7 , _A : List[str]=3 , _A : str=30 , _A : Tuple=400 , _A : Optional[int]=True , _A : List[str]=None , _A : int=True , _A : int=[0.5, 0.5, 0.5] , _A : Optional[int]=[0.5, 0.5, 0.5] , _A : List[Any]=True , _A : str=1 / 255 , _A : Tuple=True , ): '''simple docstring''' UpperCAmelCase__ : str = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1_333} UpperCAmelCase__ : Optional[Any] = parent UpperCAmelCase__ : Optional[Any] = batch_size UpperCAmelCase__ : List[str] = num_channels UpperCAmelCase__ : List[Any] = min_resolution UpperCAmelCase__ : List[str] = max_resolution UpperCAmelCase__ : Tuple = do_resize UpperCAmelCase__ : Union[str, Any] = size UpperCAmelCase__ : Dict = do_normalize UpperCAmelCase__ : Union[str, Any] = image_mean UpperCAmelCase__ : Optional[int] = image_std UpperCAmelCase__ : Dict = do_rescale UpperCAmelCase__ : Union[str, Any] = rescale_factor UpperCAmelCase__ : int = do_pad def lowercase_ ( self : Any ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def lowercase_ ( self : Any , _A : Union[str, Any] , _A : Union[str, Any]=False ): '''simple docstring''' if not batched: UpperCAmelCase__ : Optional[int] = image_inputs[0] if isinstance(_A , Image.Image ): UpperCAmelCase__ , UpperCAmelCase__ : str = image.size else: UpperCAmelCase__ , UpperCAmelCase__ : int = image.shape[1], image.shape[2] if w < h: UpperCAmelCase__ : Optional[Any] = int(self.size['''shortest_edge'''] * h / w ) UpperCAmelCase__ : List[Any] = self.size['''shortest_edge'''] elif w > h: UpperCAmelCase__ : int = self.size['''shortest_edge'''] UpperCAmelCase__ : Dict = int(self.size['''shortest_edge'''] * w / h ) else: UpperCAmelCase__ : List[str] = self.size['''shortest_edge'''] UpperCAmelCase__ : Dict = self.size['''shortest_edge'''] else: UpperCAmelCase__ : int = [] for image in image_inputs: UpperCAmelCase__ , UpperCAmelCase__ : str = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) UpperCAmelCase__ : Union[str, Any] = max(_A , key=lambda _A : item[0] )[0] UpperCAmelCase__ : Union[str, Any] = max(_A , key=lambda _A : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class lowerCamelCase_ ( __a , unittest.TestCase ): lowerCAmelCase__ = DetaImageProcessor if is_vision_available() else None def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : List[Any] = DetaImageProcessingTester(self ) @property def lowercase_ ( self : int ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_A , '''image_mean''' ) ) self.assertTrue(hasattr(_A , '''image_std''' ) ) self.assertTrue(hasattr(_A , '''do_normalize''' ) ) self.assertTrue(hasattr(_A , '''do_resize''' ) ) self.assertTrue(hasattr(_A , '''do_rescale''' ) ) self.assertTrue(hasattr(_A , '''do_pad''' ) ) self.assertTrue(hasattr(_A , '''size''' ) ) def lowercase_ ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1_333} ) self.assertEqual(image_processor.do_pad , _A ) def lowercase_ ( self : Dict ): '''simple docstring''' pass def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A ) for image in image_inputs: self.assertIsInstance(_A , Image.Image ) # Test not batched input UpperCAmelCase__ : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values UpperCAmelCase__ , UpperCAmelCase__ : int = self.image_processor_tester.get_expected_values(_A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase__ , UpperCAmelCase__ : str = self.image_processor_tester.get_expected_values(_A , batched=_A ) UpperCAmelCase__ : Union[str, Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A ) for image in image_inputs: self.assertIsInstance(_A , np.ndarray ) # Test not batched input UpperCAmelCase__ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.image_processor_tester.get_expected_values(_A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase__ : List[str] = image_processing(_A , return_tensors='''pt''' ).pixel_values UpperCAmelCase__ , UpperCAmelCase__ : int = self.image_processor_tester.get_expected_values(_A , batched=_A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A ) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor ) # Test not batched input UpperCAmelCase__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.image_processor_tester.get_expected_values(_A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase__ : List[Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values UpperCAmelCase__ , UpperCAmelCase__ : Any = self.image_processor_tester.get_expected_values(_A , batched=_A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def lowercase_ ( self : str ): '''simple docstring''' UpperCAmelCase__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f: UpperCAmelCase__ : str = json.loads(f.read() ) UpperCAmelCase__ : Tuple = {'''image_id''': 39_769, '''annotations''': target} # encode them UpperCAmelCase__ : Optional[int] = DetaImageProcessor() UpperCAmelCase__ : str = image_processing(images=_A , annotations=_A , return_tensors='''pt''' ) # verify pixel values UpperCAmelCase__ : Optional[int] = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding['''pixel_values'''].shape , _A ) UpperCAmelCase__ : Any = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _A , atol=1e-4 ) ) # verify area UpperCAmelCase__ : List[Any] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _A ) ) # verify boxes UpperCAmelCase__ : int = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _A ) UpperCAmelCase__ : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _A , atol=1e-3 ) ) # verify image_id UpperCAmelCase__ : str = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _A ) ) # verify is_crowd UpperCAmelCase__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _A ) ) # verify class_labels UpperCAmelCase__ : Union[str, Any] = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _A ) ) # verify orig_size UpperCAmelCase__ : int = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _A ) ) # verify size UpperCAmelCase__ : int = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _A ) ) @slow def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f: UpperCAmelCase__ : int = json.loads(f.read() ) UpperCAmelCase__ : str = {'''file_name''': '''000000039769.png''', '''image_id''': 39_769, '''segments_info''': target} UpperCAmelCase__ : Dict = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' ) # encode them UpperCAmelCase__ : Any = DetaImageProcessor(format='''coco_panoptic''' ) UpperCAmelCase__ : str = image_processing(images=_A , annotations=_A , masks_path=_A , return_tensors='''pt''' ) # verify pixel values UpperCAmelCase__ : str = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding['''pixel_values'''].shape , _A ) UpperCAmelCase__ : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _A , atol=1e-4 ) ) # verify area UpperCAmelCase__ : Any = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _A ) ) # verify boxes UpperCAmelCase__ : Dict = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _A ) UpperCAmelCase__ : List[str] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _A , atol=1e-3 ) ) # verify image_id UpperCAmelCase__ : Optional[int] = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _A ) ) # verify is_crowd UpperCAmelCase__ : Any = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _A ) ) # verify class_labels UpperCAmelCase__ : Tuple = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _A ) ) # verify masks UpperCAmelCase__ : Dict = 822_873 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , _A ) # verify orig_size UpperCAmelCase__ : str = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _A ) ) # verify size UpperCAmelCase__ : Optional[Any] = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _A ) )
299
1
'''simple docstring''' import inspect import unittest from transformers import DecisionTransformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import DecisionTransformerModel from transformers.models.decision_transformer.modeling_decision_transformer import ( DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) class lowerCamelCase_ : def __init__( self : Optional[Any] , _A : int , _A : Union[str, Any]=13 , _A : Tuple=7 , _A : Dict=6 , _A : int=17 , _A : List[str]=23 , _A : Dict=11 , _A : Tuple=True , ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = parent UpperCAmelCase__ : Optional[int] = batch_size UpperCAmelCase__ : str = seq_length UpperCAmelCase__ : Dict = act_dim UpperCAmelCase__ : Tuple = state_dim UpperCAmelCase__ : Union[str, Any] = hidden_size UpperCAmelCase__ : int = max_length UpperCAmelCase__ : Any = is_training def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = floats_tensor((self.batch_size, self.seq_length, self.state_dim) ) UpperCAmelCase__ : Any = floats_tensor((self.batch_size, self.seq_length, self.act_dim) ) UpperCAmelCase__ : Dict = floats_tensor((self.batch_size, self.seq_length, 1) ) UpperCAmelCase__ : List[str] = floats_tensor((self.batch_size, self.seq_length, 1) ) UpperCAmelCase__ : Union[str, Any] = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_000 ) UpperCAmelCase__ : Any = random_attention_mask((self.batch_size, self.seq_length) ) UpperCAmelCase__ : Any = self.get_config() return ( config, states, actions, rewards, returns_to_go, timesteps, attention_mask, ) def lowercase_ ( self : List[str] ): '''simple docstring''' return DecisionTransformerConfig( batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , ) def lowercase_ ( self : List[Any] , _A : List[Any] , _A : Optional[int] , _A : List[str] , _A : Tuple , _A : Optional[Any] , _A : List[str] , _A : int , ): '''simple docstring''' UpperCAmelCase__ : str = DecisionTransformerModel(config=_A ) model.to(_A ) model.eval() UpperCAmelCase__ : Tuple = model(_A , _A , _A , _A , _A , _A ) self.parent.assertEqual(result.state_preds.shape , states.shape ) self.parent.assertEqual(result.action_preds.shape , actions.shape ) self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions def lowercase_ ( self : str ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.prepare_config_and_inputs() ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : Optional[int] = config_and_inputs UpperCAmelCase__ : int = { '''states''': states, '''actions''': actions, '''rewards''': rewards, '''returns_to_go''': returns_to_go, '''timesteps''': timesteps, '''attention_mask''': attention_mask, } return config, inputs_dict @require_torch class lowerCamelCase_ ( __a , __a , __a , unittest.TestCase ): lowerCAmelCase__ = (DecisionTransformerModel,) if is_torch_available() else () lowerCAmelCase__ = () lowerCAmelCase__ = {'feature-extraction': DecisionTransformerModel} if is_torch_available() else {} # Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids lowerCAmelCase__ = False # Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = DecisionTransformerModelTester(self ) UpperCAmelCase__ : Optional[Any] = ConfigTester(self , config_class=_A , hidden_size=37 ) def lowercase_ ( self : Dict ): '''simple docstring''' self.config_tester.run_common_tests() def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) @slow def lowercase_ ( self : List[Any] ): '''simple docstring''' for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : Dict = DecisionTransformerModel.from_pretrained(_A ) self.assertIsNotNone(_A ) def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : Union[str, Any] = model_class(_A ) UpperCAmelCase__ : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase__ : List[str] = [*signature.parameters.keys()] UpperCAmelCase__ : Tuple = [ '''states''', '''actions''', '''rewards''', '''returns_to_go''', '''timesteps''', '''attention_mask''', ] self.assertListEqual(arg_names[: len(_A )] , _A ) @require_torch class lowerCamelCase_ ( unittest.TestCase ): @slow def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = 2 # number of steps of autoregressive prediction we will perform UpperCAmelCase__ : Optional[int] = 10 # defined by the RL environment, may be normalized UpperCAmelCase__ : List[str] = DecisionTransformerModel.from_pretrained('''edbeeching/decision-transformer-gym-hopper-expert''' ) UpperCAmelCase__ : List[str] = model.to(_A ) UpperCAmelCase__ : List[str] = model.config torch.manual_seed(0 ) UpperCAmelCase__ : Optional[Any] = torch.randn(1 , 1 , config.state_dim ).to(device=_A , dtype=torch.floataa ) # env.reset() UpperCAmelCase__ : Optional[int] = torch.tensor( [[0.2_4_2_7_9_3, -0.2_8_6_9_3_0_7_4, 0.8_7_4_2_6_1_3], [0.6_7_8_1_5_2_7_4, -0.0_8_1_0_1_0_8_5, -0.1_2_9_5_2_1_4_7]] , device=_A ) UpperCAmelCase__ : Dict = torch.tensor(_A , device=_A , dtype=torch.floataa ).reshape(1 , 1 , 1 ) UpperCAmelCase__ : Dict = state UpperCAmelCase__ : Optional[int] = torch.zeros(1 , 0 , config.act_dim , device=_A , dtype=torch.floataa ) UpperCAmelCase__ : Optional[int] = torch.zeros(1 , 0 , device=_A , dtype=torch.floataa ) UpperCAmelCase__ : Union[str, Any] = torch.tensor(0 , device=_A , dtype=torch.long ).reshape(1 , 1 ) for step in range(_A ): UpperCAmelCase__ : Union[str, Any] = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=_A )] , dim=1 ) UpperCAmelCase__ : Any = torch.cat([rewards, torch.zeros(1 , 1 , device=_A )] , dim=1 ) UpperCAmelCase__ : Optional[int] = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device ) with torch.no_grad(): UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = model( states=_A , actions=_A , rewards=_A , returns_to_go=_A , timesteps=_A , attention_mask=_A , return_dict=_A , ) self.assertEqual(action_pred.shape , actions.shape ) self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = ( # env.step(action) torch.randn(1 , 1 , config.state_dim ).to(device=_A , dtype=torch.floataa ), 1.0, False, {}, ) UpperCAmelCase__ : Union[str, Any] = action_pred[0, -1] UpperCAmelCase__ : Optional[int] = torch.cat([states, state] , dim=1 ) UpperCAmelCase__ : Optional[int] = returns_to_go[0, -1] - reward UpperCAmelCase__ : List[Any] = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 ) UpperCAmelCase__ : Union[str, Any] = torch.cat( [timesteps, torch.ones((1, 1) , device=_A , dtype=torch.long ) * (step + 1)] , dim=1 )
299
'''simple docstring''' from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def a__ ( lowerCAmelCase__ ) -> None: UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = analyze_text(lowerCAmelCase__ ) UpperCAmelCase__ : List[Any] = list(''' ''' + ascii_lowercase ) # what is our total sum of probabilities. UpperCAmelCase__ : str = sum(single_char_strings.values() ) # one length string UpperCAmelCase__ : int = 0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: UpperCAmelCase__ : Optional[int] = single_char_strings[ch] UpperCAmelCase__ : int = my_str / all_sum my_fir_sum += prob * math.loga(lowerCAmelCase__ ) # entropy formula. # print entropy print(F"""{round(-1 * my_fir_sum ):.1f}""" ) # two len string UpperCAmelCase__ : str = sum(two_char_strings.values() ) UpperCAmelCase__ : Optional[Any] = 0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: UpperCAmelCase__ : Optional[int] = cha + cha if sequence in two_char_strings: UpperCAmelCase__ : Dict = two_char_strings[sequence] UpperCAmelCase__ : Optional[int] = int(lowerCAmelCase__ ) / all_sum my_sec_sum += prob * math.loga(lowerCAmelCase__ ) # print second entropy print(F"""{round(-1 * my_sec_sum ):.1f}""" ) # print the difference between them print(F"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" ) def a__ ( lowerCAmelCase__ ) -> tuple[dict, dict]: UpperCAmelCase__ : Union[str, Any] = Counter() # type: ignore UpperCAmelCase__ : Tuple = Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0 , len(lowerCAmelCase__ ) - 1 ): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def a__ ( ) -> Tuple: import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
299
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase__ = { '''configuration_upernet''': ['''UperNetConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = [ '''UperNetForSemanticSegmentation''', '''UperNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_upernet import UperNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel else: import sys UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
299
'''simple docstring''' from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_config_file''': '''tokenizer_config.json''', } UpperCamelCase__ = { '''vocab_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json''' }, '''merges_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt''' }, '''tokenizer_config_file''': { '''facebook/blenderbot_small-90M''': ( '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json''' ) }, } UpperCamelCase__ = { '''facebook/blenderbot_small-90M''': 5_1_2, } class lowerCamelCase_ ( __a ): lowerCAmelCase__ = VOCAB_FILES_NAMES lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase__ = BlenderbotSmallTokenizer def __init__( self : List[Any] , _A : List[Any]=None , _A : Optional[Any]=None , _A : Optional[int]="<|endoftext|>" , _A : List[str]="<|endoftext|>" , _A : List[str]="<|endoftext|>" , _A : Any=False , _A : Union[str, Any]=True , **_A : Optional[int] , ): '''simple docstring''' super().__init__( ByteLevelBPETokenizer( vocab=_A , merges=_A , add_prefix_space=_A , trim_offsets=_A , ) , bos_token=_A , eos_token=_A , unk_token=_A , **_A , ) UpperCAmelCase__ : List[Any] = add_prefix_space def lowercase_ ( self : str , _A : Any , _A : Any=None ): '''simple docstring''' UpperCAmelCase__ : Dict = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def lowercase_ ( self : Optional[int] , _A : List[int] , _A : Optional[List[int]] = None ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = [self.sep_token_id] UpperCAmelCase__ : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
299
1
'''simple docstring''' import json import os import tempfile import unittest import unittest.mock as mock from pathlib import Path from requests.exceptions import HTTPError from transformers.utils import ( CONFIG_NAME, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TRANSFORMERS_CACHE, WEIGHTS_NAME, cached_file, get_file_from_repo, has_file, ) UpperCamelCase__ = '''hf-internal-testing/tiny-random-bert''' UpperCamelCase__ = os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''') UpperCamelCase__ = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6''' class lowerCamelCase_ ( unittest.TestCase ): def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = cached_file(_A , _A ) # Should have downloaded the file in here self.assertTrue(os.path.isdir(_A ) ) # Cache should contain at least those three subfolders: for subfolder in ["blobs", "refs", "snapshots"]: self.assertTrue(os.path.isdir(os.path.join(_A , _A ) ) ) with open(os.path.join(_A , '''refs''' , '''main''' ) ) as f: UpperCAmelCase__ : int = f.read() self.assertEqual(_A , os.path.join(_A , '''snapshots''' , _A , _A ) ) self.assertTrue(os.path.isfile(_A ) ) # File is cached at the same place the second time. UpperCAmelCase__ : str = cached_file(_A , _A ) self.assertEqual(_A , _A ) # Using a specific revision to test the full commit hash. UpperCAmelCase__ : Dict = cached_file(_A , _A , revision='''9b8c223''' ) self.assertEqual(_A , os.path.join(_A , '''snapshots''' , _A , _A ) ) def lowercase_ ( self : int ): '''simple docstring''' with self.assertRaisesRegex(_A , '''is not a valid model identifier''' ): UpperCAmelCase__ : int = cached_file('''tiny-random-bert''' , _A ) with self.assertRaisesRegex(_A , '''is not a valid git identifier''' ): UpperCAmelCase__ : str = cached_file(_A , _A , revision='''aaaa''' ) with self.assertRaisesRegex(_A , '''does not appear to have a file named''' ): UpperCAmelCase__ : str = cached_file(_A , '''conf''' ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' with self.assertRaisesRegex(_A , '''does not appear to have a file named''' ): UpperCAmelCase__ : Union[str, Any] = cached_file(_A , '''conf''' ) with open(os.path.join(_A , '''refs''' , '''main''' ) ) as f: UpperCAmelCase__ : Any = f.read() self.assertTrue(os.path.isfile(os.path.join(_A , '''.no_exist''' , _A , '''conf''' ) ) ) UpperCAmelCase__ : List[Any] = cached_file(_A , '''conf''' , _raise_exceptions_for_missing_entries=_A ) self.assertIsNone(_A ) UpperCAmelCase__ : Optional[int] = cached_file(_A , '''conf''' , local_files_only=_A , _raise_exceptions_for_missing_entries=_A ) self.assertIsNone(_A ) UpperCAmelCase__ : int = mock.Mock() UpperCAmelCase__ : Optional[Any] = 500 UpperCAmelCase__ : Any = {} UpperCAmelCase__ : Optional[int] = HTTPError UpperCAmelCase__ : Optional[Any] = {} # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('''requests.Session.request''' , return_value=_A ) as mock_head: UpperCAmelCase__ : List[str] = cached_file(_A , '''conf''' , _raise_exceptions_for_connection_errors=_A ) self.assertIsNone(_A ) # This check we did call the fake head request mock_head.assert_called() def lowercase_ ( self : Dict ): '''simple docstring''' self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _A ) ) self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _A ) ) self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _A ) ) def lowercase_ ( self : str ): '''simple docstring''' self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) ) # The function raises if the repository does not exist. with self.assertRaisesRegex(_A , '''is not a valid model identifier''' ): get_file_from_repo('''bert-base-case''' , _A ) # The function raises if the revision does not exist. with self.assertRaisesRegex(_A , '''is not a valid git identifier''' ): get_file_from_repo('''bert-base-cased''' , _A , revision='''ahaha''' ) UpperCAmelCase__ : Any = get_file_from_repo('''bert-base-cased''' , _A ) # The name is the cached name which is not very easy to test, so instead we load the content. UpperCAmelCase__ : Any = json.loads(open(_A , '''r''' ).read() ) self.assertEqual(config['''hidden_size'''] , 768 ) def lowercase_ ( self : List[Any] ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase__ : Union[str, Any] = Path(_A ) / '''a.txt''' filename.touch() self.assertEqual(get_file_from_repo(_A , '''a.txt''' ) , str(_A ) ) self.assertIsNone(get_file_from_repo(_A , '''b.txt''' ) )
299
'''simple docstring''' import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece @require_tokenizers class lowerCamelCase_ ( __a , unittest.TestCase ): lowerCAmelCase__ = XLMRobertaTokenizer lowerCAmelCase__ = XLMRobertaTokenizerFast lowerCAmelCase__ = True lowerCAmelCase__ = True def lowercase_ ( self : Dict ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing UpperCAmelCase__ : Union[str, Any] = XLMRobertaTokenizer(_A , keep_accents=_A ) tokenizer.save_pretrained(self.tmpdirname ) def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = '''<pad>''' UpperCAmelCase__ : Dict = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A ) def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(vocab_keys[-1] , '''<mask>''' ) self.assertEqual(len(_A ) , 1_002 ) def lowercase_ ( self : int ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1_002 ) def lowercase_ ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : str = XLMRobertaTokenizer(_A , keep_accents=_A ) UpperCAmelCase__ : int = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) UpperCAmelCase__ : Union[str, Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( _A , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) UpperCAmelCase__ : Dict = tokenizer.convert_tokens_to_ids(_A ) self.assertListEqual( _A , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) UpperCAmelCase__ : Optional[int] = tokenizer.convert_ids_to_tokens(_A ) self.assertListEqual( _A , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) def lowercase_ ( self : str ): '''simple docstring''' if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return UpperCAmelCase__ : List[str] = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): UpperCAmelCase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(_A , **_A ) UpperCAmelCase__ : Optional[int] = self.tokenizer_class.from_pretrained(_A , **_A ) UpperCAmelCase__ : List[str] = tempfile.mkdtemp() UpperCAmelCase__ : Any = tokenizer_r.save_pretrained(_A ) UpperCAmelCase__ : Tuple = tokenizer_p.save_pretrained(_A ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) UpperCAmelCase__ : Optional[int] = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f ) self.assertSequenceEqual(_A , _A ) # Checks everything loads correctly in the same way UpperCAmelCase__ : Any = tokenizer_r.from_pretrained(_A ) UpperCAmelCase__ : Dict = tokenizer_p.from_pretrained(_A ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_A , _A ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(_A ) # Save tokenizer rust, legacy_format=True UpperCAmelCase__ : Union[str, Any] = tempfile.mkdtemp() UpperCAmelCase__ : Union[str, Any] = tokenizer_r.save_pretrained(_A , legacy_format=_A ) UpperCAmelCase__ : List[str] = tokenizer_p.save_pretrained(_A ) # Checks it save with the same files self.assertSequenceEqual(_A , _A ) # Checks everything loads correctly in the same way UpperCAmelCase__ : List[str] = tokenizer_r.from_pretrained(_A ) UpperCAmelCase__ : List[str] = tokenizer_p.from_pretrained(_A ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_A , _A ) ) shutil.rmtree(_A ) # Save tokenizer rust, legacy_format=False UpperCAmelCase__ : Union[str, Any] = tempfile.mkdtemp() UpperCAmelCase__ : Dict = tokenizer_r.save_pretrained(_A , legacy_format=_A ) UpperCAmelCase__ : str = tokenizer_p.save_pretrained(_A ) # Checks it saved the tokenizer.json file self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way UpperCAmelCase__ : Union[str, Any] = tokenizer_r.from_pretrained(_A ) UpperCAmelCase__ : Optional[Any] = tokenizer_p.from_pretrained(_A ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_A , _A ) ) shutil.rmtree(_A ) @cached_property def lowercase_ ( self : Optional[Any] ): '''simple docstring''' return XLMRobertaTokenizer.from_pretrained('''xlm-roberta-base''' ) def lowercase_ ( self : Any ): '''simple docstring''' with tempfile.NamedTemporaryFile() as f: shutil.copyfile(_A , f.name ) UpperCAmelCase__ : int = XLMRobertaTokenizer(f.name , keep_accents=_A ) UpperCAmelCase__ : str = pickle.dumps(_A ) pickle.loads(_A ) def lowercase_ ( self : int ): '''simple docstring''' if not self.test_rust_tokenizer: return UpperCAmelCase__ : Optional[Any] = self.get_tokenizer() UpperCAmelCase__ : Union[str, Any] = self.get_rust_tokenizer() UpperCAmelCase__ : Dict = '''I was born in 92000, and this is falsé.''' UpperCAmelCase__ : Dict = tokenizer.tokenize(_A ) UpperCAmelCase__ : List[Any] = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) UpperCAmelCase__ : int = tokenizer.encode(_A , add_special_tokens=_A ) UpperCAmelCase__ : Optional[Any] = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) UpperCAmelCase__ : Any = self.get_rust_tokenizer() UpperCAmelCase__ : List[Any] = tokenizer.encode(_A ) UpperCAmelCase__ : Union[str, Any] = rust_tokenizer.encode(_A ) self.assertListEqual(_A , _A ) @slow def lowercase_ ( self : str ): '''simple docstring''' UpperCAmelCase__ : str = '''Hello World!''' UpperCAmelCase__ : Tuple = [0, 35_378, 6_661, 38, 2] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(_A , self.big_tokenizer.encode(_A ) ) @slow def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : List[str] = ( '''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will''' ''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth''' ) UpperCAmelCase__ : Any = [ 0, 3_293, 83, 10, 4_552, 4_989, 7_986, 678, 10, 5_915, 111, 179_459, 124_850, 4, 6_044, 237, 12, 6, 5, 6, 4, 6_780, 705, 15, 1_388, 44, 378, 10_114, 711, 152, 20, 6, 5, 22_376, 642, 1_221, 15_190, 34_153, 450, 5_608, 959, 1_119, 57_702, 136, 186, 47, 1_098, 29_367, 47, # 4426, # What fairseq tokenizes from "<unk>": "_<" # 3678, # What fairseq tokenizes from "<unk>": "unk" # 2740, # What fairseq tokenizes from "<unk>": ">" 3, # What we tokenize from "<unk>": "<unk>" 6, # Residue from the tokenization: an extra sentencepiece underline 4, 6_044, 237, 6_284, 50_901, 528, 31, 90, 34, 927, 2, ] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(_A , self.big_tokenizer.encode(_A ) ) @slow def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : int = {'''input_ids''': [[0, 11_062, 82_772, 7, 15, 82_772, 538, 51_529, 237, 17_198, 1_290, 206, 9, 215_175, 1_314, 136, 17_198, 1_290, 206, 9, 56_359, 42, 122_009, 9, 16_466, 16, 87_344, 4_537, 9, 4_717, 78_381, 6, 159_958, 7, 15, 24_480, 618, 4, 527, 22_693, 5_428, 4, 2_777, 24_480, 9_874, 4, 43_523, 594, 4, 803, 18_392, 33_189, 18, 4, 43_523, 24_447, 12_399, 100, 24_955, 83_658, 9_626, 144_057, 15, 839, 22_335, 16, 136, 24_955, 83_658, 83_479, 15, 39_102, 724, 16, 678, 645, 2_789, 1_328, 4_589, 42, 122_009, 115_774, 23, 805, 1_328, 46_876, 7, 136, 53_894, 1_940, 42_227, 41_159, 17_721, 823, 425, 4, 27_512, 98_722, 206, 136, 5_531, 4_970, 919, 17_336, 5, 2], [0, 20_080, 618, 83, 82_775, 47, 479, 9, 1_517, 73, 53_894, 333, 80_581, 110_117, 18_811, 5_256, 1_295, 51, 152_526, 297, 7_986, 390, 124_416, 538, 35_431, 214, 98, 15_044, 25_737, 136, 7_108, 43_701, 23, 756, 135_355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63_773, 119_455, 6, 147_797, 88_203, 7, 645, 70, 21, 3_285, 10_269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_A , model_name='''xlm-roberta-base''' , revision='''d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3''' , )
299
1
'''simple docstring''' import collections import json import math import os import re import time from fnmatch import fnmatch from typing import Dict import requests from slack_sdk import WebClient UpperCamelCase__ = WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN''']) def a__ ( lowerCAmelCase__ ) -> str: UpperCAmelCase__ : Optional[int] = test_results.split(''' ''' ) UpperCAmelCase__ : Tuple = 0 UpperCAmelCase__ : Tuple = 0 # When the output is short enough, the output is surrounded by = signs: "== OUTPUT ==" # When it is too long, those signs are not present. UpperCAmelCase__ : List[Any] = expressions[-2] if '''=''' in expressions[-1] else expressions[-1] for i, expression in enumerate(lowerCAmelCase__ ): if "failed" in expression: failed += int(expressions[i - 1] ) if "passed" in expression: success += int(expressions[i - 1] ) return failed, success, time_spent def a__ ( lowerCAmelCase__ ) -> Tuple: UpperCAmelCase__ : int = {} UpperCAmelCase__ : List[str] = None UpperCAmelCase__ : Optional[Any] = False for line in failures_short_lines.split('''\n''' ): if re.search(R'''_ \[doctest\]''' , lowerCAmelCase__ ): UpperCAmelCase__ : Tuple = True UpperCAmelCase__ : Optional[Any] = line.split(''' ''' )[2] elif in_error and not line.split(''' ''' )[0].isdigit(): UpperCAmelCase__ : List[Any] = line UpperCAmelCase__ : Union[str, Any] = False return failures class lowerCamelCase_ : def __init__( self : Tuple , _A : str , _A : Dict ): '''simple docstring''' UpperCAmelCase__ : Dict = title UpperCAmelCase__ : Optional[int] = doc_test_results['''time_spent'''].split(''',''' )[0] UpperCAmelCase__ : Optional[int] = doc_test_results['''success'''] UpperCAmelCase__ : Optional[Any] = doc_test_results['''failures'''] UpperCAmelCase__ : Union[str, Any] = self.n_success + self.n_failures # Failures and success of the modeling tests UpperCAmelCase__ : Tuple = doc_test_results @property def lowercase_ ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = [self._time_spent] UpperCAmelCase__ : List[str] = 0 for time in time_spent: UpperCAmelCase__ : Tuple = time.split(''':''' ) # Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute. if len(_A ) == 1: UpperCAmelCase__ : str = [0, 0, time_parts[0]] UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] ) total_secs += hours * 3_600 + minutes * 60 + seconds UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = total_secs // 3_600, (total_secs % 3_600) // 60, total_secs % 60 return f"""{int(_A )}h{int(_A )}m{int(_A )}s""" @property def lowercase_ ( self : Any ): '''simple docstring''' return {"type": "header", "text": {"type": "plain_text", "text": self.title}} @property def lowercase_ ( self : List[Any] ): '''simple docstring''' return { "type": "section", "text": { "type": "plain_text", "text": f"""🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.""", "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": f"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""", }, } @property def lowercase_ ( self : Tuple ): '''simple docstring''' return { "type": "section", "text": { "type": "plain_text", "text": ( f"""There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in""" f""" {self.time}.""" ), "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": f"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""", }, } @property def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Dict = 40 UpperCAmelCase__ : Dict = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(_A , _A )} UpperCAmelCase__ : Dict = '''''' for category, failures in category_failures.items(): if len(_A ) == 0: continue if report != "": report += "\n\n" report += f"""*{category} failures*:""".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n" report += "`" report += "`\n`".join(_A ) report += "`" return { "type": "section", "text": { "type": "mrkdwn", "text": f"""The following examples had failures:\n\n\n{report}\n""", }, } @property def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : List[str] = [self.header] if self.n_failures > 0: blocks.append(self.failures ) if self.n_failures > 0: blocks.extend([self.category_failures] ) if self.n_failures == 0: blocks.append(self.no_failures ) return json.dumps(_A ) @staticmethod def lowercase_ ( ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = [ { '''type''': '''section''', '''text''': { '''type''': '''plain_text''', '''text''': '''There was an issue running the tests.''', }, '''accessory''': { '''type''': '''button''', '''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True}, '''url''': f"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""", }, } ] print('''Sending the following payload''' ) print(json.dumps({'''blocks''': json.loads(_A )} ) ) client.chat_postMessage( channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text='''There was an issue running the tests.''' , blocks=_A , ) def lowercase_ ( self : int ): '''simple docstring''' print('''Sending the following payload''' ) print(json.dumps({'''blocks''': json.loads(self.payload )} ) ) UpperCAmelCase__ : Optional[Any] = f"""{self.n_failures} failures out of {self.n_tests} tests,""" if self.n_failures else '''All tests passed.''' UpperCAmelCase__ : Any = client.chat_postMessage( channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , blocks=self.payload , text=_A , ) def lowercase_ ( self : Union[str, Any] , _A : Union[str, Any] , _A : Tuple , _A : List[Any] , _A : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = '''''' for key, value in failures.items(): UpperCAmelCase__ : str = value[:200] + ''' [Truncated]''' if len(_A ) > 250 else value failures_text += f"""*{key}*\n_{value}_\n\n""" UpperCAmelCase__ : int = job_name UpperCAmelCase__ : int = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}} if job_link is not None: UpperCAmelCase__ : List[str] = { '''type''': '''button''', '''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True}, '''url''': job_link, } return [ {"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}}, content, {"type": "section", "text": {"type": "mrkdwn", "text": failures_text}}, ] def lowercase_ ( self : List[str] ): '''simple docstring''' if self.thread_ts is None: raise ValueError('''Can only post reply if a post has been made.''' ) UpperCAmelCase__ : Optional[int] = self.doc_test_results.pop('''job_link''' ) self.doc_test_results.pop('''failures''' ) self.doc_test_results.pop('''success''' ) self.doc_test_results.pop('''time_spent''' ) UpperCAmelCase__ : Tuple = sorted(self.doc_test_results.items() , key=lambda _A : t[0] ) for job, job_result in sorted_dict: if len(job_result['''failures'''] ): UpperCAmelCase__ : Any = f"""*Num failures* :{len(job_result["failed"] )} \n""" UpperCAmelCase__ : Tuple = job_result['''failures'''] UpperCAmelCase__ : List[Any] = self.get_reply_blocks(_A , _A , _A , text=_A ) print('''Sending the following reply''' ) print(json.dumps({'''blocks''': blocks} ) ) client.chat_postMessage( channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text=f"""Results for {job}""" , blocks=_A , thread_ts=self.thread_ts['''ts'''] , ) time.sleep(1 ) def a__ ( ) -> int: UpperCAmelCase__ : Any = os.environ['''GITHUB_RUN_ID'''] UpperCAmelCase__ : List[str] = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100""" UpperCAmelCase__ : Optional[int] = requests.get(lowerCAmelCase__ ).json() UpperCAmelCase__ : List[Any] = {} try: jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} ) UpperCAmelCase__ : int = math.ceil((result['''total_count'''] - 1_00) / 1_00 ) for i in range(lowerCAmelCase__ ): UpperCAmelCase__ : Optional[Any] = requests.get(url + F"""&page={i + 2}""" ).json() jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} ) return jobs except Exception as e: print('''Unknown error, could not fetch links.''' , lowerCAmelCase__ ) return {} def a__ ( lowerCAmelCase__ ) -> List[Any]: UpperCAmelCase__ : Union[str, Any] = {} if os.path.exists(lowerCAmelCase__ ): UpperCAmelCase__ : List[str] = os.listdir(lowerCAmelCase__ ) for file in files: try: with open(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , encoding='''utf-8''' ) as f: UpperCAmelCase__ : int = f.read() except UnicodeDecodeError as e: raise ValueError(F"""Could not open {os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )}.""" ) from e return _artifact def a__ ( ) -> Optional[Any]: class lowerCamelCase_ : def __init__( self : Optional[int] , _A : str ): '''simple docstring''' UpperCAmelCase__ : str = name UpperCAmelCase__ : List[str] = [] def __str__( self : Tuple ): '''simple docstring''' return self.name def lowercase_ ( self : Union[str, Any] , _A : str ): '''simple docstring''' self.paths.append({'''name''': self.name, '''path''': path} ) UpperCAmelCase__ : Dict[str, Artifact] = {} UpperCAmelCase__ : int = filter(os.path.isdir , os.listdir() ) for directory in directories: UpperCAmelCase__ : int = directory if artifact_name not in _available_artifacts: UpperCAmelCase__ : Dict = Artifact(lowerCAmelCase__ ) _available_artifacts[artifact_name].add_path(lowerCAmelCase__ ) return _available_artifacts if __name__ == "__main__": UpperCamelCase__ = get_job_links() UpperCamelCase__ = retrieve_available_artifacts() UpperCamelCase__ = collections.OrderedDict( [ ('''*.py''', '''API Examples'''), ('''*.md''', '''MD Examples'''), ] ) # This dict will contain all the information relative to each doc test category: # - failed: list of failed tests # - failures: dict in the format 'test': 'error_message' UpperCamelCase__ = { v: { '''failed''': [], '''failures''': {}, } for v in docs.values() } # Link to the GitHub Action job UpperCamelCase__ = github_actions_job_links.get('''run_doctests''') UpperCamelCase__ = available_artifacts['''doc_tests_gpu_test_reports'''].paths[0] UpperCamelCase__ = retrieve_artifact(artifact_path['''name''']) if "stats" in artifact: UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = handle_test_results(artifact['''stats''']) UpperCamelCase__ = failed UpperCamelCase__ = success UpperCamelCase__ = time_spent[1:-1] + ''', ''' UpperCamelCase__ = extract_first_line_failure(artifact['''failures_short''']) for line in artifact["summary_short"].split('''\n'''): if re.search('''FAILED''', line): UpperCamelCase__ = line.replace('''FAILED ''', '''''') UpperCamelCase__ = line.split()[0].replace('''\n''', '''''') if "::" in line: UpperCamelCase__ , UpperCamelCase__ = line.split('''::''') else: UpperCamelCase__ , UpperCamelCase__ = line, line for file_regex in docs.keys(): if fnmatch(file_path, file_regex): UpperCamelCase__ = docs[file_regex] doc_test_results[category]["failed"].append(test) UpperCamelCase__ = all_failures[test] if test in all_failures else '''N/A''' UpperCamelCase__ = failure break UpperCamelCase__ = Message('''🤗 Results of the doc tests.''', doc_test_results) message.post() message.post_reply()
299
'''simple docstring''' from __future__ import annotations import math def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int: if depth < 0: raise ValueError('''Depth cannot be less than 0''' ) if not scores: raise ValueError('''Scores cannot be empty''' ) if depth == height: return scores[node_index] return ( max( minimax(depth + 1 , node_index * 2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , ) if is_max else min( minimax(depth + 1 , node_index * 2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , ) ) def a__ ( ) -> None: UpperCAmelCase__ : Union[str, Any] = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23] UpperCAmelCase__ : Optional[Any] = math.log(len(lowerCAmelCase__ ) , 2 ) print(F"""Optimal value : {minimax(0 , 0 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )}""" ) if __name__ == "__main__": import doctest doctest.testmod() main()
299
1
'''simple docstring''' import functools from typing import Any def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> bool: # Validation if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or len(lowerCAmelCase__ ) == 0: raise ValueError('''the string should be not empty string''' ) if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or not all( isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and len(lowerCAmelCase__ ) > 0 for item in words ): raise ValueError('''the words should be a list of non-empty strings''' ) # Build trie UpperCAmelCase__ : dict[str, Any] = {} UpperCAmelCase__ : Dict = '''WORD_KEEPER''' for word in words: UpperCAmelCase__ : Tuple = trie for c in word: if c not in trie_node: UpperCAmelCase__ : str = {} UpperCAmelCase__ : Optional[Any] = trie_node[c] UpperCAmelCase__ : int = True UpperCAmelCase__ : str = len(lowerCAmelCase__ ) # Dynamic programming method @functools.cache def is_breakable(lowerCAmelCase__ ) -> bool: if index == len_string: return True UpperCAmelCase__ : Optional[int] = trie for i in range(lowerCAmelCase__ , lowerCAmelCase__ ): UpperCAmelCase__ : Tuple = trie_node.get(string[i] , lowerCAmelCase__ ) if trie_node is None: return False if trie_node.get(lowerCAmelCase__ , lowerCAmelCase__ ) and is_breakable(i + 1 ): return True return False return is_breakable(0 ) if __name__ == "__main__": import doctest doctest.testmod()
299
'''simple docstring''' class lowerCamelCase_ : def __init__( self : Union[str, Any] , _A : int ): '''simple docstring''' UpperCAmelCase__ : str = n UpperCAmelCase__ : Union[str, Any] = [None] * self.n UpperCAmelCase__ : Tuple = 0 # index of the first element UpperCAmelCase__ : int = 0 UpperCAmelCase__ : int = 0 def __len__( self : Optional[Any] ): '''simple docstring''' return self.size def lowercase_ ( self : Dict ): '''simple docstring''' return self.size == 0 def lowercase_ ( self : List[str] ): '''simple docstring''' return False if self.is_empty() else self.array[self.front] def lowercase_ ( self : List[Any] , _A : int ): '''simple docstring''' if self.size >= self.n: raise Exception('''QUEUE IS FULL''' ) UpperCAmelCase__ : str = data UpperCAmelCase__ : Optional[Any] = (self.rear + 1) % self.n self.size += 1 return self def lowercase_ ( self : List[Any] ): '''simple docstring''' if self.size == 0: raise Exception('''UNDERFLOW''' ) UpperCAmelCase__ : Any = self.array[self.front] UpperCAmelCase__ : List[Any] = None UpperCAmelCase__ : Tuple = (self.front + 1) % self.n self.size -= 1 return temp
299
1
'''simple docstring''' from __future__ import annotations def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> tuple[float, list[float]]: UpperCAmelCase__ : Optional[Any] = list(range(len(lowerCAmelCase__ ) ) ) UpperCAmelCase__ : Optional[Any] = [v / w for v, w in zip(lowerCAmelCase__ , lowerCAmelCase__ )] index.sort(key=lambda lowerCAmelCase__ : ratio[i] , reverse=lowerCAmelCase__ ) UpperCAmelCase__ : float = 0 UpperCAmelCase__ : list[float] = [0] * len(lowerCAmelCase__ ) for i in index: if weight[i] <= capacity: UpperCAmelCase__ : List[str] = 1 max_value += value[i] capacity -= weight[i] else: UpperCAmelCase__ : Tuple = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
299
'''simple docstring''' def a__ ( lowerCAmelCase__ ) -> Optional[Any]: UpperCAmelCase__ : Optional[Any] = len(lowerCAmelCase__ ) for i in range(length - 1 ): UpperCAmelCase__ : Optional[Any] = i for k in range(i + 1 , lowerCAmelCase__ ): if collection[k] < collection[least]: UpperCAmelCase__ : Dict = k if least != i: UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = (collection[i], collection[least]) return collection if __name__ == "__main__": UpperCamelCase__ = input('''Enter numbers separated by a comma:\n''').strip() UpperCamelCase__ = [int(item) for item in user_input.split(''',''')] print(selection_sort(unsorted))
299
1
'''simple docstring''' from __future__ import annotations import math def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int: if depth < 0: raise ValueError('''Depth cannot be less than 0''' ) if len(lowerCAmelCase__ ) == 0: raise ValueError('''Scores cannot be empty''' ) if depth == height: return scores[node_index] if is_max: return max( minimax(depth + 1 , node_index * 2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , ) return min( minimax(depth + 1 , node_index * 2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , ) def a__ ( ) -> None: UpperCAmelCase__ : Optional[int] = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23] UpperCAmelCase__ : Any = math.log(len(lowerCAmelCase__ ) , 2 ) print('''Optimal value : ''' , end='''''' ) print(minimax(0 , 0 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
299
'''simple docstring''' from collections.abc import Iterable from typing import Any class lowerCamelCase_ : def __init__( self : List[Any] , _A : int | None = None ): '''simple docstring''' UpperCAmelCase__ : List[Any] = value UpperCAmelCase__ : Node | None = None # Added in order to delete a node easier UpperCAmelCase__ : Node | None = None UpperCAmelCase__ : Node | None = None def __repr__( self : Optional[Any] ): '''simple docstring''' from pprint import pformat if self.left is None and self.right is None: return str(self.value ) return pformat({f"""{self.value}""": (self.left, self.right)} , indent=1 ) class lowerCamelCase_ : def __init__( self : Optional[Any] , _A : Node | None = None ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = root def __str__( self : Union[str, Any] ): '''simple docstring''' return str(self.root ) def lowercase_ ( self : str , _A : Node , _A : Node | None ): '''simple docstring''' if new_children is not None: # reset its kids UpperCAmelCase__ : Dict = node.parent if node.parent is not None: # reset its parent if self.is_right(_A ): # If it is the right children UpperCAmelCase__ : str = new_children else: UpperCAmelCase__ : Optional[int] = new_children else: UpperCAmelCase__ : Union[str, Any] = new_children def lowercase_ ( self : Union[str, Any] , _A : Node ): '''simple docstring''' if node.parent and node.parent.right: return node == node.parent.right return False def lowercase_ ( self : int ): '''simple docstring''' return self.root is None def lowercase_ ( self : List[str] , _A : Any ): '''simple docstring''' UpperCAmelCase__ : Dict = Node(_A ) # create a new Node if self.empty(): # if Tree is empty UpperCAmelCase__ : List[Any] = new_node # set its root else: # Tree is not empty UpperCAmelCase__ : str = self.root # from root if parent_node is None: return while True: # While we don't get to a leaf if value < parent_node.value: # We go left if parent_node.left is None: UpperCAmelCase__ : Optional[Any] = new_node # We insert the new node in a leaf break else: UpperCAmelCase__ : Any = parent_node.left else: if parent_node.right is None: UpperCAmelCase__ : str = new_node break else: UpperCAmelCase__ : List[str] = parent_node.right UpperCAmelCase__ : Tuple = parent_node def lowercase_ ( self : Optional[Any] , *_A : Tuple ): '''simple docstring''' for value in values: self.__insert(_A ) def lowercase_ ( self : Union[str, Any] , _A : int ): '''simple docstring''' if self.empty(): raise IndexError('''Warning: Tree is empty! please use another.''' ) else: UpperCAmelCase__ : List[Any] = self.root # use lazy evaluation here to avoid NoneType Attribute error while node is not None and node.value is not value: UpperCAmelCase__ : str = node.left if value < node.value else node.right return node def lowercase_ ( self : List[Any] , _A : Node | None = None ): '''simple docstring''' if node is None: if self.root is None: return None UpperCAmelCase__ : int = self.root if not self.empty(): while node.right is not None: UpperCAmelCase__ : Tuple = node.right return node def lowercase_ ( self : List[Any] , _A : Node | None = None ): '''simple docstring''' if node is None: UpperCAmelCase__ : Optional[int] = self.root if self.root is None: return None if not self.empty(): UpperCAmelCase__ : Optional[int] = self.root while node.left is not None: UpperCAmelCase__ : Tuple = node.left return node def lowercase_ ( self : List[Any] , _A : int ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.search(_A ) # Look for the node with that label if node is not None: if node.left is None and node.right is None: # If it has no children self.__reassign_nodes(_A , _A ) elif node.left is None: # Has only right children self.__reassign_nodes(_A , node.right ) elif node.right is None: # Has only left children self.__reassign_nodes(_A , node.left ) else: UpperCAmelCase__ : Union[str, Any] = self.get_max( node.left ) # Gets the max value of the left branch self.remove(tmp_node.value ) # type: ignore UpperCAmelCase__ : Optional[Any] = ( tmp_node.value # type: ignore ) # Assigns the value to the node to delete and keep tree structure def lowercase_ ( self : List[str] , _A : Node | None ): '''simple docstring''' if node is not None: yield node # Preorder Traversal yield from self.preorder_traverse(node.left ) yield from self.preorder_traverse(node.right ) def lowercase_ ( self : str , _A : Any=None ): '''simple docstring''' if traversal_function is None: return self.preorder_traverse(self.root ) else: return traversal_function(self.root ) def lowercase_ ( self : Dict , _A : list , _A : Node | None ): '''simple docstring''' if node: self.inorder(_A , node.left ) arr.append(node.value ) self.inorder(_A , node.right ) def lowercase_ ( self : Optional[Any] , _A : int , _A : Node ): '''simple docstring''' UpperCAmelCase__ : list[int] = [] self.inorder(_A , _A ) # append all values to list using inorder traversal return arr[k - 1] def a__ ( lowerCAmelCase__ ) -> list[Node]: UpperCAmelCase__ : Union[str, Any] = [] if curr_node is not None: UpperCAmelCase__ : str = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node] return node_list def a__ ( ) -> None: UpperCAmelCase__ : List[Any] = (8, 3, 6, 1, 10, 14, 13, 4, 7) UpperCAmelCase__ : str = BinarySearchTree() for i in testlist: t.insert(lowerCAmelCase__ ) # Prints all the elements of the list in order traversal print(lowerCAmelCase__ ) if t.search(6 ) is not None: print('''The value 6 exists''' ) else: print('''The value 6 doesn\'t exist''' ) if t.search(-1 ) is not None: print('''The value -1 exists''' ) else: print('''The value -1 doesn\'t exist''' ) if not t.empty(): print('''Max Value: ''' , t.get_max().value ) # type: ignore print('''Min Value: ''' , t.get_min().value ) # type: ignore for i in testlist: t.remove(lowerCAmelCase__ ) print(lowerCAmelCase__ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
299
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) UpperCamelCase__ = { '''configuration_blip''': [ '''BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BlipConfig''', '''BlipTextConfig''', '''BlipVisionConfig''', ], '''processing_blip''': ['''BlipProcessor'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = ['''BlipImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = [ '''BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BlipModel''', '''BlipPreTrainedModel''', '''BlipForConditionalGeneration''', '''BlipForQuestionAnswering''', '''BlipVisionModel''', '''BlipTextModel''', '''BlipForImageTextRetrieval''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = [ '''TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFBlipModel''', '''TFBlipPreTrainedModel''', '''TFBlipForConditionalGeneration''', '''TFBlipForQuestionAnswering''', '''TFBlipVisionModel''', '''TFBlipTextModel''', '''TFBlipForImageTextRetrieval''', ] if TYPE_CHECKING: from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig from .processing_blip import BlipProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_blip import BlipImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip import ( BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, BlipModel, BlipPreTrainedModel, BlipTextModel, BlipVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blip import ( TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFBlipForConditionalGeneration, TFBlipForImageTextRetrieval, TFBlipForQuestionAnswering, TFBlipModel, TFBlipPreTrainedModel, TFBlipTextModel, TFBlipVisionModel, ) else: import sys UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
299
'''simple docstring''' import argparse import torch from transformers import ( SpeechTaConfig, SpeechTaFeatureExtractor, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaProcessor, SpeechTaTokenizer, logging, ) from transformers.tokenization_utils import AddedToken logging.set_verbosity_info() UpperCamelCase__ = logging.get_logger('''transformers.models.speecht5''') UpperCamelCase__ = { '''speech_encoder_prenet.layer_norm''': '''speecht5.encoder.prenet.feature_projection.layer_norm''', '''speech_encoder_prenet.post_extract_proj''': '''speecht5.encoder.prenet.feature_projection.projection''', '''speech_encoder_prenet.pos_conv.0''': '''speecht5.encoder.prenet.pos_conv_embed.conv''', '''speech_encoder_prenet.mask_emb''': '''speecht5.encoder.prenet.masked_spec_embed''', } UpperCamelCase__ = { '''text_encoder_prenet.encoder_prenet.0''': '''speecht5.encoder.prenet.embed_tokens''', '''text_encoder_prenet.encoder_prenet.1.alpha''': '''speecht5.encoder.prenet.encode_positions.alpha''', } UpperCamelCase__ = { '''speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0''': '''speecht5.decoder.prenet.layers.0''', '''speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0''': '''speecht5.decoder.prenet.layers.1''', '''speech_decoder_prenet.decoder_prenet.0.1''': '''speecht5.decoder.prenet.final_layer''', '''speech_decoder_prenet.decoder_prenet.1.alpha''': '''speecht5.decoder.prenet.encode_positions.alpha''', '''speech_decoder_prenet.spkembs_layer.0''': '''speecht5.decoder.prenet.speaker_embeds_layer''', } UpperCamelCase__ = { '''speech_decoder_postnet.feat_out''': '''speech_decoder_postnet.feat_out''', '''speech_decoder_postnet.prob_out''': '''speech_decoder_postnet.prob_out''', '''speech_decoder_postnet.postnet.postnet.0.0''': '''speech_decoder_postnet.layers.0.conv''', '''speech_decoder_postnet.postnet.postnet.0.1''': '''speech_decoder_postnet.layers.0.batch_norm''', '''speech_decoder_postnet.postnet.postnet.1.0''': '''speech_decoder_postnet.layers.1.conv''', '''speech_decoder_postnet.postnet.postnet.1.1''': '''speech_decoder_postnet.layers.1.batch_norm''', '''speech_decoder_postnet.postnet.postnet.2.0''': '''speech_decoder_postnet.layers.2.conv''', '''speech_decoder_postnet.postnet.postnet.2.1''': '''speech_decoder_postnet.layers.2.batch_norm''', '''speech_decoder_postnet.postnet.postnet.3.0''': '''speech_decoder_postnet.layers.3.conv''', '''speech_decoder_postnet.postnet.postnet.3.1''': '''speech_decoder_postnet.layers.3.batch_norm''', '''speech_decoder_postnet.postnet.postnet.4.0''': '''speech_decoder_postnet.layers.4.conv''', '''speech_decoder_postnet.postnet.postnet.4.1''': '''speech_decoder_postnet.layers.4.batch_norm''', } UpperCamelCase__ = { '''text_decoder_prenet.embed_tokens''': '''speecht5.decoder.prenet.embed_tokens''', } UpperCamelCase__ = { '''text_decoder_postnet.output_projection''': '''text_decoder_postnet.lm_head''', } UpperCamelCase__ = { '''encoder.layers.*.self_attn.k_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj''', '''encoder.layers.*.self_attn.v_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj''', '''encoder.layers.*.self_attn.q_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj''', '''encoder.layers.*.self_attn.out_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj''', '''encoder.layers.*.self_attn_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.layer_norm''', '''encoder.layers.*.fc1''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense''', '''encoder.layers.*.fc2''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense''', '''encoder.layers.*.final_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''speecht5.encoder.wrapped_encoder.layer_norm''', '''encoder.pos_emb.pe_k''': '''speecht5.encoder.wrapped_encoder.embed_positions.pe_k''', } UpperCamelCase__ = { '''decoder.layers.*.self_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj''', '''decoder.layers.*.self_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj''', '''decoder.layers.*.self_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj''', '''decoder.layers.*.self_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj''', '''decoder.layers.*.self_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm''', '''decoder.layers.*.encoder_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj''', '''decoder.layers.*.encoder_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj''', '''decoder.layers.*.encoder_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj''', '''decoder.layers.*.encoder_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj''', '''decoder.layers.*.encoder_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm''', '''decoder.layers.*.fc1''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense''', '''decoder.layers.*.fc2''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense''', '''decoder.layers.*.final_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm''', } UpperCamelCase__ = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_TEXT_DECODER_PRENET, **MAPPING_TEXT_DECODER_POSTNET, } UpperCamelCase__ = { **MAPPING_TEXT_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } UpperCamelCase__ = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } UpperCamelCase__ = [] UpperCamelCase__ = [ '''encoder.version''', '''encoder.layers.*.norm_k.weight''', '''encoder.layers.*.norm_k.bias''', '''decoder.version''', '''decoder.layers.*.norm_k.weight''', '''decoder.layers.*.norm_k.bias''', '''decoder.pos_emb.pe_k''', '''speech_encoder_prenet.embed_positions._float_tensor''', '''text_decoder_prenet.embed_positions._float_tensor''', ] UpperCamelCase__ = IGNORE_KEYS + [ '''encoder.proj''', '''text_encoder_prenet.*''', '''speech_decoder_prenet.*''', '''speech_decoder_postnet.*''', ] UpperCamelCase__ = IGNORE_KEYS + [ '''encoder.proj''', '''speech_encoder_prenet.*''', '''text_decoder_prenet.*''', '''text_decoder_postnet.*''', ] UpperCamelCase__ = IGNORE_KEYS + [ '''encoder.proj''', '''text_encoder_prenet.*''', '''text_decoder_prenet.*''', '''text_decoder_postnet.*''', ] def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int: for attribute in key.split('''.''' ): UpperCAmelCase__ : Optional[int] = getattr(lowerCAmelCase__ , lowerCAmelCase__ ) if weight_type is not None: UpperCAmelCase__ : List[str] = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape else: UpperCAmelCase__ : Any = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": UpperCAmelCase__ : Union[str, Any] = value elif weight_type == "weight_g": UpperCAmelCase__ : Tuple = value elif weight_type == "weight_v": UpperCAmelCase__ : List[Any] = value elif weight_type == "bias": UpperCAmelCase__ : int = value elif weight_type == "running_mean": UpperCAmelCase__ : int = value elif weight_type == "running_var": UpperCAmelCase__ : Union[str, Any] = value elif weight_type == "num_batches_tracked": UpperCAmelCase__ : List[Any] = value else: UpperCAmelCase__ : Union[str, Any] = value logger.info(F"""{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.""" ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]: for key in ignore_keys: if key.endswith('''.*''' ): if name.startswith(key[:-1] ): return True elif ".*." in key: UpperCAmelCase__ , UpperCAmelCase__ : int = key.split('''.*.''' ) if prefix in name and suffix in name: return True elif key in name: return True return False def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]: UpperCAmelCase__ : int = [] if task == "s2t": UpperCAmelCase__ : Optional[Any] = hf_model.speechta.encoder.prenet.feature_encoder UpperCAmelCase__ : List[Any] = MAPPING_S2T UpperCAmelCase__ : int = IGNORE_KEYS_S2T elif task == "t2s": UpperCAmelCase__ : List[str] = None UpperCAmelCase__ : Tuple = MAPPING_T2S UpperCAmelCase__ : Union[str, Any] = IGNORE_KEYS_T2S elif task == "s2s": UpperCAmelCase__ : Optional[int] = hf_model.speechta.encoder.prenet.feature_encoder UpperCAmelCase__ : Tuple = MAPPING_S2S UpperCAmelCase__ : int = IGNORE_KEYS_S2S else: raise ValueError(F"""Unsupported task: {task}""" ) for name, value in fairseq_dict.items(): if should_ignore(lowerCAmelCase__ , lowerCAmelCase__ ): logger.info(F"""{name} was ignored""" ) continue UpperCAmelCase__ : List[Any] = False if "conv_layers" in name: load_conv_layer( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == '''group''' , ) UpperCAmelCase__ : Tuple = True else: for key, mapped_key in MAPPING.items(): # mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if "*" in key: UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = key.split('''.*.''' ) if prefix in name and suffix in name: UpperCAmelCase__ : List[str] = suffix # if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: if key in name: UpperCAmelCase__ : Optional[int] = True if "*" in mapped_key: UpperCAmelCase__ : Any = name.split(lowerCAmelCase__ )[0].split('''.''' )[-2] UpperCAmelCase__ : Union[str, Any] = mapped_key.replace('''*''' , lowerCAmelCase__ ) if "weight_g" in name: UpperCAmelCase__ : Dict = '''weight_g''' elif "weight_v" in name: UpperCAmelCase__ : Union[str, Any] = '''weight_v''' elif "bias" in name: UpperCAmelCase__ : Optional[int] = '''bias''' elif "weight" in name: UpperCAmelCase__ : Optional[int] = '''weight''' elif "running_mean" in name: UpperCAmelCase__ : Optional[int] = '''running_mean''' elif "running_var" in name: UpperCAmelCase__ : List[Any] = '''running_var''' elif "num_batches_tracked" in name: UpperCAmelCase__ : Optional[Any] = '''num_batches_tracked''' else: UpperCAmelCase__ : Union[str, Any] = None set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) continue if not is_used: unused_weights.append(lowerCAmelCase__ ) logger.warning(F"""Unused weights: {unused_weights}""" ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str: UpperCAmelCase__ : Optional[int] = full_name.split('''conv_layers.''' )[-1] UpperCAmelCase__ : Optional[Any] = name.split('''.''' ) UpperCAmelCase__ : Any = int(items[0] ) UpperCAmelCase__ : Optional[int] = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) UpperCAmelCase__ : Any = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) UpperCAmelCase__ : int = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) UpperCAmelCase__ : List[str] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) UpperCAmelCase__ : Union[str, Any] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(lowerCAmelCase__ ) @torch.no_grad() def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , ) -> Any: if config_path is not None: UpperCAmelCase__ : Optional[Any] = SpeechTaConfig.from_pretrained(lowerCAmelCase__ ) else: UpperCAmelCase__ : str = SpeechTaConfig() if task == "s2t": UpperCAmelCase__ : str = config.max_text_positions UpperCAmelCase__ : List[str] = SpeechTaForSpeechToText(lowerCAmelCase__ ) elif task == "t2s": UpperCAmelCase__ : Tuple = 18_76 UpperCAmelCase__ : int = 6_00 UpperCAmelCase__ : Union[str, Any] = config.max_speech_positions UpperCAmelCase__ : Optional[Any] = SpeechTaForTextToSpeech(lowerCAmelCase__ ) elif task == "s2s": UpperCAmelCase__ : Tuple = 18_76 UpperCAmelCase__ : Optional[Any] = config.max_speech_positions UpperCAmelCase__ : Dict = SpeechTaForSpeechToSpeech(lowerCAmelCase__ ) else: raise ValueError(F"""Unknown task name: {task}""" ) if vocab_path: UpperCAmelCase__ : Tuple = SpeechTaTokenizer(lowerCAmelCase__ , model_max_length=config.max_text_positions ) # Mask token behaves like a normal word, i.e. include the space before it UpperCAmelCase__ : Dict = AddedToken('''<mask>''' , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) UpperCAmelCase__ : int = mask_token tokenizer.add_special_tokens({'''mask_token''': mask_token} ) tokenizer.add_tokens(['''<ctc_blank>'''] ) UpperCAmelCase__ : Optional[Any] = SpeechTaFeatureExtractor() UpperCAmelCase__ : Any = SpeechTaProcessor(tokenizer=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ ) processor.save_pretrained(lowerCAmelCase__ ) UpperCAmelCase__ : List[str] = torch.load(lowerCAmelCase__ ) recursively_load_weights(fairseq_checkpoint['''model'''] , lowerCAmelCase__ , lowerCAmelCase__ ) model.save_pretrained(lowerCAmelCase__ ) if repo_id: print('''Pushing to the hub...''' ) processor.push_to_hub(lowerCAmelCase__ ) model.push_to_hub(lowerCAmelCase__ ) if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() parser.add_argument( '''--task''', default='''s2t''', type=str, help='''Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.''', ) parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--vocab_path''', default=None, type=str, help='''Path to SentencePiece model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) UpperCamelCase__ = parser.parse_args() convert_speechta_checkpoint( args.task, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.vocab_path, args.push_to_hub, )
299
1
'''simple docstring''' import os from typing import Dict, List, Union import tensorflow as tf from keras_nlp.tokenizers import BytePairTokenizer from tensorflow_text import pad_model_inputs from .tokenization_gpta import GPTaTokenizer class lowerCamelCase_ ( tf.keras.layers.Layer ): def __init__( self : Any , _A : Dict[str, int] , _A : List[str] , _A : int = None , _A : int = None ): '''simple docstring''' super().__init__() UpperCAmelCase__ : str = pad_token_id UpperCAmelCase__ : int = max_length UpperCAmelCase__ : Optional[Any] = vocab UpperCAmelCase__ : Union[str, Any] = merges UpperCAmelCase__ : Union[str, Any] = BytePairTokenizer(_A , _A , sequence_length=_A ) @classmethod def lowercase_ ( cls : Any , _A : GPTaTokenizer , *_A : Tuple , **_A : Tuple ): '''simple docstring''' UpperCAmelCase__ : Dict = [''' '''.join(_A ) for m in tokenizer.bpe_ranks.keys()] UpperCAmelCase__ : int = tokenizer.get_vocab() return cls(_A , _A , *_A , **_A ) @classmethod def lowercase_ ( cls : int , _A : Union[str, os.PathLike] , *_A : Any , **_A : str ): '''simple docstring''' UpperCAmelCase__ : List[Any] = GPTaTokenizer.from_pretrained(_A , *_A , **_A ) return cls.from_tokenizer(_A , *_A , **_A ) @classmethod def lowercase_ ( cls : List[Any] , _A : Tuple ): '''simple docstring''' return cls(**_A ) def lowercase_ ( self : Tuple ): '''simple docstring''' return { "vocab": self.vocab, "merges": self.merges, "max_length": self.max_length, "pad_token_id": self.pad_token_id, } def lowercase_ ( self : Dict , _A : int , _A : int = None ): '''simple docstring''' UpperCAmelCase__ : int = self.tf_tokenizer(_A ) UpperCAmelCase__ : Any = tf.ones_like(_A ) if self.pad_token_id is not None: # pad the tokens up to max length UpperCAmelCase__ : int = max_length if max_length is not None else self.max_length if max_length is not None: UpperCAmelCase__ , UpperCAmelCase__ : int = pad_model_inputs( _A , max_seq_length=_A , pad_value=self.pad_token_id ) return {"attention_mask": attention_mask, "input_ids": input_ids}
299
'''simple docstring''' import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''encoder.layer_norm_for_extract''': '''layer_norm_for_extract''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''label_embs_concat''': '''label_embeddings_concat''', '''mask_emb''': '''masked_spec_embed''', '''spk_proj''': '''speaker_proj''', } UpperCamelCase__ = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', '''label_embeddings_concat''', '''speaker_proj''', '''layer_norm_for_extract''', ] def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]: for attribute in key.split('''.''' ): UpperCAmelCase__ : Optional[int] = getattr(lowerCAmelCase__ , lowerCAmelCase__ ) if weight_type is not None: UpperCAmelCase__ : Any = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape else: UpperCAmelCase__ : Union[str, Any] = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": UpperCAmelCase__ : int = value elif weight_type == "weight_g": UpperCAmelCase__ : Dict = value elif weight_type == "weight_v": UpperCAmelCase__ : List[str] = value elif weight_type == "bias": UpperCAmelCase__ : Tuple = value else: UpperCAmelCase__ : Tuple = value logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict: UpperCAmelCase__ : Optional[int] = [] UpperCAmelCase__ : Dict = fairseq_model.state_dict() UpperCAmelCase__ : Union[str, Any] = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): UpperCAmelCase__ : Any = False if "conv_layers" in name: load_conv_layer( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == '''group''' , ) UpperCAmelCase__ : str = True else: for key, mapped_key in MAPPING.items(): UpperCAmelCase__ : List[str] = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key): # special case since naming is very similar continue UpperCAmelCase__ : Optional[int] = True if "*" in mapped_key: UpperCAmelCase__ : str = name.split(lowerCAmelCase__ )[0].split('''.''' )[-2] UpperCAmelCase__ : Optional[int] = mapped_key.replace('''*''' , lowerCAmelCase__ ) if "weight_g" in name: UpperCAmelCase__ : List[str] = '''weight_g''' elif "weight_v" in name: UpperCAmelCase__ : Dict = '''weight_v''' elif "bias" in name: UpperCAmelCase__ : Optional[int] = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj UpperCAmelCase__ : Tuple = '''weight''' else: UpperCAmelCase__ : Optional[Any] = None set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) continue if not is_used: unused_weights.append(lowerCAmelCase__ ) logger.warning(F"""Unused weights: {unused_weights}""" ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]: UpperCAmelCase__ : Tuple = full_name.split('''conv_layers.''' )[-1] UpperCAmelCase__ : Optional[Any] = name.split('''.''' ) UpperCAmelCase__ : Union[str, Any] = int(items[0] ) UpperCAmelCase__ : Tuple = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) UpperCAmelCase__ : str = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) UpperCAmelCase__ : Optional[int] = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" ) UpperCAmelCase__ : List[str] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) UpperCAmelCase__ : Optional[Any] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(lowerCAmelCase__ ) @torch.no_grad() def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True ) -> Any: if config_path is not None: UpperCAmelCase__ : Any = UniSpeechSatConfig.from_pretrained(lowerCAmelCase__ ) else: UpperCAmelCase__ : int = UniSpeechSatConfig() UpperCAmelCase__ : Tuple = '''''' if is_finetuned: UpperCAmelCase__ : Optional[int] = UniSpeechSatForCTC(lowerCAmelCase__ ) else: UpperCAmelCase__ : List[Any] = UniSpeechSatForPreTraining(lowerCAmelCase__ ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) UpperCAmelCase__ : Union[str, Any] = model[0].eval() recursively_load_weights(lowerCAmelCase__ , lowerCAmelCase__ ) hf_wavavec.save_pretrained(lowerCAmelCase__ ) if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) UpperCamelCase__ = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
299
1
'''simple docstring''' from pathlib import PurePosixPath from typing import Optional import fsspec from fsspec import AbstractFileSystem from huggingface_hub.hf_api import DatasetInfo from ..utils.file_utils import get_authentication_headers_for_url from ..utils.hub import hf_hub_url class lowerCamelCase_ ( __a ): lowerCAmelCase__ = '' lowerCAmelCase__ = 'hf-legacy' # "hf://"" is reserved for hffs def __init__( self : int , _A : Optional[DatasetInfo] = None , _A : Optional[str] = None , **_A : Optional[Any] , ): '''simple docstring''' super().__init__(self , **_A ) UpperCAmelCase__ : Any = repo_info UpperCAmelCase__ : Optional[Any] = token UpperCAmelCase__ : Optional[Any] = None def lowercase_ ( self : Optional[int] ): '''simple docstring''' if self.dir_cache is None: UpperCAmelCase__ : Dict = {} for hf_file in self.repo_info.siblings: # TODO(QL): add sizes UpperCAmelCase__ : str = { '''name''': hf_file.rfilename, '''size''': None, '''type''': '''file''', } self.dir_cache.update( { str(_A ): {'''name''': str(_A ), '''size''': None, '''type''': '''directory'''} for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1] } ) def lowercase_ ( self : Any , _A : str , _A : str = "rb" , **_A : Optional[Any] , ): '''simple docstring''' if not isinstance(self.repo_info , _A ): raise NotImplementedError(f"""Open is only implemented for dataset repositories, but got {self.repo_info}""" ) UpperCAmelCase__ : Any = hf_hub_url(self.repo_info.id , _A , revision=self.repo_info.sha ) return fsspec.open( _A , mode=_A , headers=get_authentication_headers_for_url(_A , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open() def lowercase_ ( self : int , _A : str , **_A : int ): '''simple docstring''' self._get_dirs() UpperCAmelCase__ : Tuple = self._strip_protocol(_A ) if path in self.dir_cache: return self.dir_cache[path] else: raise FileNotFoundError(_A ) def lowercase_ ( self : Union[str, Any] , _A : List[Any] , _A : int=False , **_A : Optional[int] ): '''simple docstring''' self._get_dirs() UpperCAmelCase__ : Any = PurePosixPath(path.strip('''/''' ) ) UpperCAmelCase__ : List[str] = {} for p, f in self.dir_cache.items(): UpperCAmelCase__ : Any = PurePosixPath(p.strip('''/''' ) ) UpperCAmelCase__ : Optional[int] = p.parent if root == path: UpperCAmelCase__ : Tuple = f UpperCAmelCase__ : Any = list(paths.values() ) if detail: return out else: return sorted(f['''name'''] for f in out )
299
'''simple docstring''' import itertools import random import unittest import numpy as np from transformers import ASTFeatureExtractor from transformers.testing_utils import require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin UpperCamelCase__ = random.Random() if is_torch_available(): import torch def a__ ( lowerCAmelCase__ , lowerCAmelCase__=1.0 , lowerCAmelCase__=None , lowerCAmelCase__=None ) -> Optional[Any]: if rng is None: UpperCAmelCase__ : List[str] = global_rng UpperCAmelCase__ : Optional[Any] = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class lowerCamelCase_ ( unittest.TestCase ): def __init__( self : Any , _A : List[str] , _A : int=7 , _A : Dict=400 , _A : Tuple=2_000 , _A : Optional[int]=1 , _A : List[Any]=0.0 , _A : Any=16_000 , _A : int=True , _A : str=True , ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = parent UpperCAmelCase__ : str = batch_size UpperCAmelCase__ : Dict = min_seq_length UpperCAmelCase__ : str = max_seq_length UpperCAmelCase__ : List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) UpperCAmelCase__ : Optional[Any] = feature_size UpperCAmelCase__ : int = padding_value UpperCAmelCase__ : int = sampling_rate UpperCAmelCase__ : Tuple = return_attention_mask UpperCAmelCase__ : str = do_normalize def lowercase_ ( self : Optional[int] ): '''simple docstring''' return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def lowercase_ ( self : int , _A : Optional[Any]=False , _A : Any=False ): '''simple docstring''' def _flatten(_A : Union[str, Any] ): return list(itertools.chain(*_A ) ) if equal_length: UpperCAmelCase__ : Tuple = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size UpperCAmelCase__ : Optional[int] = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: UpperCAmelCase__ : Dict = [np.asarray(_A ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class lowerCamelCase_ ( __a , unittest.TestCase ): lowerCAmelCase__ = ASTFeatureExtractor def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : int = ASTFeatureExtractionTester(self ) def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 UpperCAmelCase__ : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] UpperCAmelCase__ : List[Any] = [np.asarray(_A ) for speech_input in speech_inputs] # Test not batched input UpperCAmelCase__ : str = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values UpperCAmelCase__ : List[Any] = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) ) # Test batched UpperCAmelCase__ : Optional[Any] = feat_extract(_A , padding=_A , return_tensors='''np''' ).input_values UpperCAmelCase__ : Optional[int] = feat_extract(_A , padding=_A , return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(_A , _A ): self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. UpperCAmelCase__ : Tuple = [floats_list((1, x) )[0] for x in (800, 800, 800)] UpperCAmelCase__ : Any = np.asarray(_A ) UpperCAmelCase__ : int = feat_extract(_A , return_tensors='''np''' ).input_values UpperCAmelCase__ : List[str] = feat_extract(_A , return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(_A , _A ): self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) ) @require_torch def lowercase_ ( self : List[str] ): '''simple docstring''' import torch UpperCAmelCase__ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCAmelCase__ : Any = np.random.rand(100 ).astype(np.floataa ) UpperCAmelCase__ : int = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: UpperCAmelCase__ : str = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) UpperCAmelCase__ : Any = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def lowercase_ ( self : int , _A : List[Any] ): '''simple docstring''' from datasets import load_dataset UpperCAmelCase__ : Tuple = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' ) # automatic decoding with librispeech UpperCAmelCase__ : List[Any] = ds.sort('''id''' ).select(range(_A ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] @require_torch def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Any = torch.tensor( [-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6, -1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3, -1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6, -0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9] ) # fmt: on UpperCAmelCase__ : Optional[Any] = self._load_datasamples(1 ) UpperCAmelCase__ : Optional[int] = ASTFeatureExtractor() UpperCAmelCase__ : Dict = feature_extractor(_A , return_tensors='''pt''' ).input_values self.assertEquals(input_values.shape , (1, 1_024, 128) ) self.assertTrue(torch.allclose(input_values[0, 0, :30] , _A , atol=1e-4 ) )
299
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) UpperCamelCase__ = { '''configuration_layoutlmv2''': ['''LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LayoutLMv2Config'''], '''processing_layoutlmv2''': ['''LayoutLMv2Processor'''], '''tokenization_layoutlmv2''': ['''LayoutLMv2Tokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = ['''LayoutLMv2TokenizerFast'''] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = ['''LayoutLMv2FeatureExtractor'''] UpperCamelCase__ = ['''LayoutLMv2ImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = [ '''LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LayoutLMv2ForQuestionAnswering''', '''LayoutLMv2ForSequenceClassification''', '''LayoutLMv2ForTokenClassification''', '''LayoutLMv2Layer''', '''LayoutLMv2Model''', '''LayoutLMv2PreTrainedModel''', ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
299
'''simple docstring''' import os import shutil import sys import tempfile import unittest from pathlib import Path import pytest import transformers from transformers import ( BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoTokenizer, BertConfig, BertTokenizer, BertTokenizerFast, CTRLTokenizer, GPTaTokenizer, GPTaTokenizerFast, PreTrainedTokenizerFast, RobertaTokenizer, RobertaTokenizerFast, is_tokenizers_available, ) from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.auto.tokenization_auto import ( TOKENIZER_MAPPING, get_tokenizer_config, tokenizer_class_from_name, ) from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import ( DUMMY_DIFF_TOKENIZER_IDENTIFIER, DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tokenizers, slow, ) sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils''')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class lowerCamelCase_ ( unittest.TestCase ): def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = 0 @slow def lowercase_ ( self : Dict ): '''simple docstring''' for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x): UpperCAmelCase__ : int = AutoTokenizer.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) ) self.assertGreater(len(_A ) , 0 ) for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys(): UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , (GPTaTokenizer, GPTaTokenizerFast) ) self.assertGreater(len(_A ) , 0 ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : int = AutoTokenizer.from_pretrained(_A ) self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 12 ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : int = AutoTokenizer.from_pretrained(_A ) self.assertIsInstance(_A , (RobertaTokenizer, RobertaTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 20 ) def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Tuple = AutoConfig.from_pretrained(_A ) self.assertIsInstance(_A , _A ) # Check that tokenizer_type ≠ model_type UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained(_A , config=_A ) self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 12 ) def lowercase_ ( self : str ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(_A , '''vocab.txt''' ) ) UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained(_A , tokenizer_type='''bert''' , use_fast=_A ) self.assertIsInstance(_A , _A ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(_A , '''vocab.json''' ) ) shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(_A , '''merges.txt''' ) ) UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(_A , tokenizer_type='''gpt2''' , use_fast=_A ) self.assertIsInstance(_A , _A ) @require_tokenizers def lowercase_ ( self : str ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(_A , '''vocab.txt''' ) ) UpperCAmelCase__ : str = AutoTokenizer.from_pretrained(_A , tokenizer_type='''bert''' ) self.assertIsInstance(_A , _A ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(_A , '''vocab.json''' ) ) shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(_A , '''merges.txt''' ) ) UpperCAmelCase__ : Any = AutoTokenizer.from_pretrained(_A , tokenizer_type='''gpt2''' ) self.assertIsInstance(_A , _A ) def lowercase_ ( self : Optional[Any] ): '''simple docstring''' with pytest.raises(_A ): AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' ) @require_tokenizers def lowercase_ ( self : int ): '''simple docstring''' for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: UpperCAmelCase__ : Optional[int] = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' ) self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) ) if isinstance(_A , _A ): self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , _A ) else: self.assertEqual(tokenizer.do_lower_case , _A ) self.assertEqual(tokenizer.model_max_length , 512 ) @require_tokenizers def lowercase_ ( self : List[str] ): '''simple docstring''' for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: with self.assertRaisesRegex( _A , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ): UpperCAmelCase__ : Dict = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' ) def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : List[Any] = TOKENIZER_MAPPING.values() UpperCAmelCase__ : Any = [] for slow_tok, fast_tok in tokenizers: if slow_tok is not None: tokenizer_names.append(slow_tok.__name__ ) if fast_tok is not None: tokenizer_names.append(fast_tok.__name__ ) for tokenizer_name in tokenizer_names: # must find the right class tokenizer_class_from_name(_A ) @require_tokenizers def lowercase_ ( self : Optional[int] ): '''simple docstring''' self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=_A ) , _A ) self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , _A ) @require_tokenizers def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : int = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=_A ) UpperCAmelCase__ : Any = '''Hello, world. How are you?''' UpperCAmelCase__ : Dict = tokenizer.tokenize(_A ) self.assertEqual('''[UNK]''' , tokens[0] ) UpperCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=_A ) UpperCAmelCase__ : Union[str, Any] = tokenizer.tokenize(_A ) self.assertEqual('''[UNK]''' , tokens[0] ) @require_tokenizers def lowercase_ ( self : str ): '''simple docstring''' UpperCAmelCase__ : List[str] = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' ) self.assertEqual(type(_A ) , _A ) self.assertEqual(tokenizer.model_max_length , 512 ) self.assertEqual(tokenizer.vocab_size , 30_000 ) self.assertEqual(tokenizer.unk_token , '''[UNK]''' ) self.assertEqual(tokenizer.padding_side , '''right''' ) self.assertEqual(tokenizer.truncation_side , '''right''' ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(_A ) self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_A ) UpperCAmelCase__ : int = AutoTokenizer.from_pretrained(_A ) self.assertIsInstance(_A , tokenizer.__class__ ) self.assertEqual(tokenizera.vocab_size , 12 ) def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained('''ctrl''' ) # There is no fast CTRL so this always gives us a slow tokenizer. self.assertIsInstance(_A , _A ) def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : str = get_tokenizer_config('''bert-base-cased''' ) UpperCAmelCase__ : Optional[int] = config.pop('''_commit_hash''' , _A ) # If we ever update bert-base-cased tokenizer config, this dict here will need to be updated. self.assertEqual(_A , {'''do_lower_case''': False} ) # This model does not have a tokenizer_config so we get back an empty dict. UpperCAmelCase__ : Tuple = get_tokenizer_config(_A ) self.assertDictEqual(_A , {} ) # A tokenizer saved with `save_pretrained` always creates a tokenizer config. UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(_A ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_A ) UpperCAmelCase__ : List[Any] = get_tokenizer_config(_A ) # Check the class of the tokenizer was properly saved (note that it always saves the slow class). self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' ) def lowercase_ ( self : Dict ): '''simple docstring''' try: AutoConfig.register('''custom''' , _A ) AutoTokenizer.register(_A , slow_tokenizer_class=_A ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_A ): AutoTokenizer.register(_A , slow_tokenizer_class=_A ) UpperCAmelCase__ : Optional[int] = CustomTokenizer.from_pretrained(_A ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_A ) UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(_A ) self.assertIsInstance(_A , _A ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] @require_tokenizers def lowercase_ ( self : Any ): '''simple docstring''' try: AutoConfig.register('''custom''' , _A ) # Can register in two steps AutoTokenizer.register(_A , slow_tokenizer_class=_A ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) ) AutoTokenizer.register(_A , fast_tokenizer_class=_A ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) del TOKENIZER_MAPPING._extra_content[CustomConfig] # Can register in one step AutoTokenizer.register( _A , slow_tokenizer_class=_A , fast_tokenizer_class=_A ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_A ): AutoTokenizer.register(_A , fast_tokenizer_class=_A ) # We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer # and that model does not have a tokenizer.json with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase__ : Any = BertTokenizerFast.from_pretrained(_A ) bert_tokenizer.save_pretrained(_A ) UpperCAmelCase__ : Optional[int] = CustomTokenizerFast.from_pretrained(_A ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_A ) UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(_A ) self.assertIsInstance(_A , _A ) UpperCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained(_A , use_fast=_A ) self.assertIsInstance(_A , _A ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def lowercase_ ( self : Optional[int] ): '''simple docstring''' with self.assertRaises(_A ): UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(_A ): UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A ) UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A ) self.assertTrue(tokenizer.special_attribute_present ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_A ) UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(_A , trust_remote_code=_A ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) # Test we can also load the slow version UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A , use_fast=_A ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_A ) UpperCAmelCase__ : Any = AutoTokenizer.from_pretrained(_A , trust_remote_code=_A , use_fast=_A ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) else: self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' ) @require_tokenizers def lowercase_ ( self : int ): '''simple docstring''' class lowerCamelCase_ ( __a ): lowerCAmelCase__ = False class lowerCamelCase_ ( __a ): lowerCAmelCase__ = NewTokenizer lowerCAmelCase__ = False try: AutoConfig.register('''custom''' , _A ) AutoTokenizer.register(_A , slow_tokenizer_class=_A ) AutoTokenizer.register(_A , fast_tokenizer_class=_A ) # If remote code is not set, the default is to use local UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertFalse(tokenizer.special_attribute_present ) UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=_A ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertFalse(tokenizer.special_attribute_present ) # If remote code is disabled, we load the local one. UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertFalse(tokenizer.special_attribute_present ) UpperCAmelCase__ : str = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A , use_fast=_A ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertFalse(tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertTrue(tokenizer.special_attribute_present ) UpperCAmelCase__ : Any = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A , use_fast=_A ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertTrue(tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=_A ) self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) # Test we can also load the slow version UpperCAmelCase__ : Optional[Any] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=_A , use_fast=_A ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) else: self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) def lowercase_ ( self : Tuple ): '''simple docstring''' with self.assertRaisesRegex( _A , '''bert-base is not a local folder and is not a valid model identifier''' ): UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained('''bert-base''' ) def lowercase_ ( self : Dict ): '''simple docstring''' with self.assertRaisesRegex( _A , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(_A , revision='''aaaaaa''' ) def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) with RequestCounter() as counter: UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
299
1
'''simple docstring''' from math import isclose, sqrt def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> tuple[float, float, float]: UpperCAmelCase__ : Optional[int] = point_y / 4 / point_x UpperCAmelCase__ : int = 2 * normal_gradient / (1 + normal_gradient * normal_gradient) UpperCAmelCase__ : List[str] = (1 - normal_gradient * normal_gradient) / ( 1 + normal_gradient * normal_gradient ) UpperCAmelCase__ : Optional[int] = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient) # to find the next point, solve the simultaeneous equations: # y^2 + 4x^2 = 100 # y - b = m * (x - a) # ==> A x^2 + B x + C = 0 UpperCAmelCase__ : Optional[int] = outgoing_gradient**2 + 4 UpperCAmelCase__ : List[Any] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x) UpperCAmelCase__ : Tuple = (point_y - outgoing_gradient * point_x) ** 2 - 1_00 UpperCAmelCase__ : Union[str, Any] = ( -linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) UpperCAmelCase__ : List[Any] = ( -linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) # two solutions, one of which is our input point UpperCAmelCase__ : List[str] = x_minus if isclose(lowerCAmelCase__ , lowerCAmelCase__ ) else x_plus UpperCAmelCase__ : Any = point_y + outgoing_gradient * (next_x - point_x) return next_x, next_y, outgoing_gradient def a__ ( lowerCAmelCase__ = 1.4 , lowerCAmelCase__ = -9.6 ) -> int: UpperCAmelCase__ : int = 0 UpperCAmelCase__ : float = first_x_coord UpperCAmelCase__ : float = first_y_coord UpperCAmelCase__ : float = (1_0.1 - point_y) / (0.0 - point_x) while not (-0.0_1 <= point_x <= 0.0_1 and point_y > 0): UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = next_point(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) num_reflections += 1 return num_reflections if __name__ == "__main__": print(F"""{solution() = }""")
299
'''simple docstring''' def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> float: UpperCAmelCase__ : Tuple = [redshift, radiation_density, matter_density, dark_energy] if any(p < 0 for p in parameters ): raise ValueError('''All input parameters must be positive''' ) if any(p > 1 for p in parameters[1:4] ): raise ValueError('''Relative densities cannot be greater than one''' ) else: UpperCAmelCase__ : List[str] = 1 - (matter_density + radiation_density + dark_energy) UpperCAmelCase__ : List[str] = ( radiation_density * (redshift + 1) ** 4 + matter_density * (redshift + 1) ** 3 + curvature * (redshift + 1) ** 2 + dark_energy ) UpperCAmelCase__ : Any = hubble_constant * e_a ** (1 / 2) return hubble if __name__ == "__main__": import doctest # run doctest doctest.testmod() # demo LCDM approximation UpperCamelCase__ = 0.3 print( hubble_parameter( hubble_constant=68.3, radiation_density=1e-4, matter_density=matter_density, dark_energy=1 - matter_density, redshift=0, ) )
299
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) UpperCamelCase__ = { '''configuration_convnext''': ['''CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvNextConfig''', '''ConvNextOnnxConfig'''] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = ['''ConvNextFeatureExtractor'''] UpperCamelCase__ = ['''ConvNextImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = [ '''CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ConvNextForImageClassification''', '''ConvNextModel''', '''ConvNextPreTrainedModel''', '''ConvNextBackbone''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = [ '''TFConvNextForImageClassification''', '''TFConvNextModel''', '''TFConvNextPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_convnext import ConvNextFeatureExtractor from .image_processing_convnext import ConvNextImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convnext import ( CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvNextBackbone, ConvNextForImageClassification, ConvNextModel, ConvNextPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel else: import sys UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
299
'''simple docstring''' import gc import math import unittest import torch from diffusers import UNetaDModel from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin UpperCamelCase__ = logging.get_logger(__name__) enable_full_determinism() class lowerCamelCase_ ( __a , __a , unittest.TestCase ): lowerCAmelCase__ = UNetaDModel lowerCAmelCase__ = 'sample' @property def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = 4 UpperCAmelCase__ : str = 3 UpperCAmelCase__ : str = (32, 32) UpperCAmelCase__ : List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(_A ) UpperCAmelCase__ : Tuple = torch.tensor([10] ).to(_A ) return {"sample": noise, "timestep": time_step} @property def lowercase_ ( self : int ): '''simple docstring''' return (3, 32, 32) @property def lowercase_ ( self : Dict ): '''simple docstring''' return (3, 32, 32) def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = { '''block_out_channels''': (32, 64), '''down_block_types''': ('''DownBlock2D''', '''AttnDownBlock2D'''), '''up_block_types''': ('''AttnUpBlock2D''', '''UpBlock2D'''), '''attention_head_dim''': 3, '''out_channels''': 3, '''in_channels''': 3, '''layers_per_block''': 2, '''sample_size''': 32, } UpperCAmelCase__ : Tuple = self.dummy_input return init_dict, inputs_dict class lowerCamelCase_ ( __a , __a , unittest.TestCase ): lowerCAmelCase__ = UNetaDModel lowerCAmelCase__ = 'sample' @property def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : List[str] = 4 UpperCAmelCase__ : Dict = 4 UpperCAmelCase__ : List[str] = (32, 32) UpperCAmelCase__ : List[str] = floats_tensor((batch_size, num_channels) + sizes ).to(_A ) UpperCAmelCase__ : List[Any] = torch.tensor([10] ).to(_A ) return {"sample": noise, "timestep": time_step} @property def lowercase_ ( self : Tuple ): '''simple docstring''' return (4, 32, 32) @property def lowercase_ ( self : List[str] ): '''simple docstring''' return (4, 32, 32) def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : List[str] = { '''sample_size''': 32, '''in_channels''': 4, '''out_channels''': 4, '''layers_per_block''': 2, '''block_out_channels''': (32, 64), '''attention_head_dim''': 32, '''down_block_types''': ('''DownBlock2D''', '''DownBlock2D'''), '''up_block_types''': ('''UpBlock2D''', '''UpBlock2D'''), } UpperCAmelCase__ : Optional[Any] = self.dummy_input return init_dict, inputs_dict def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : int = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A ) self.assertIsNotNone(_A ) self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 ) model.to(_A ) UpperCAmelCase__ : Dict = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' ) def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Any = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A ) model.to(_A ) UpperCAmelCase__ : Dict = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' ) def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A ) model_accelerate.to(_A ) model_accelerate.eval() UpperCAmelCase__ : Tuple = torch.randn( 1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , ) UpperCAmelCase__ : Union[str, Any] = noise.to(_A ) UpperCAmelCase__ : Optional[Any] = torch.tensor([10] * noise.shape[0] ).to(_A ) UpperCAmelCase__ : Any = model_accelerate(_A , _A )['''sample'''] # two models don't need to stay in the device at the same time del model_accelerate torch.cuda.empty_cache() gc.collect() UpperCAmelCase__ , UpperCAmelCase__ : Dict = UNetaDModel.from_pretrained( '''fusing/unet-ldm-dummy-update''' , output_loading_info=_A , low_cpu_mem_usage=_A ) model_normal_load.to(_A ) model_normal_load.eval() UpperCAmelCase__ : Optional[int] = model_normal_load(_A , _A )['''sample'''] assert torch_all_close(_A , _A , rtol=1e-3 ) def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' ) model.eval() model.to(_A ) UpperCAmelCase__ : Union[str, Any] = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) UpperCAmelCase__ : str = noise.to(_A ) UpperCAmelCase__ : str = torch.tensor([10] * noise.shape[0] ).to(_A ) with torch.no_grad(): UpperCAmelCase__ : Optional[int] = model(_A , _A ).sample UpperCAmelCase__ : List[Any] = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off UpperCAmelCase__ : Tuple = torch.tensor([-1_3.3_2_5_8, -2_0.1_1_0_0, -1_5.9_8_7_3, -1_7.6_6_1_7, -2_3.0_5_9_6, -1_7.9_4_1_9, -1_3.3_6_7_5, -1_6.1_8_8_9, -1_2.3_8_0_0] ) # fmt: on self.assertTrue(torch_all_close(_A , _A , rtol=1e-3 ) ) class lowerCamelCase_ ( __a , __a , unittest.TestCase ): lowerCAmelCase__ = UNetaDModel lowerCAmelCase__ = 'sample' @property def lowercase_ ( self : Any , _A : str=(32, 32) ): '''simple docstring''' UpperCAmelCase__ : Tuple = 4 UpperCAmelCase__ : List[str] = 3 UpperCAmelCase__ : str = floats_tensor((batch_size, num_channels) + sizes ).to(_A ) UpperCAmelCase__ : Dict = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=_A ) return {"sample": noise, "timestep": time_step} @property def lowercase_ ( self : List[str] ): '''simple docstring''' return (3, 32, 32) @property def lowercase_ ( self : List[Any] ): '''simple docstring''' return (3, 32, 32) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : List[str] = { '''block_out_channels''': [32, 64, 64, 64], '''in_channels''': 3, '''layers_per_block''': 1, '''out_channels''': 3, '''time_embedding_type''': '''fourier''', '''norm_eps''': 1e-6, '''mid_block_scale_factor''': math.sqrt(2.0 ), '''norm_num_groups''': None, '''down_block_types''': [ '''SkipDownBlock2D''', '''AttnSkipDownBlock2D''', '''SkipDownBlock2D''', '''SkipDownBlock2D''', ], '''up_block_types''': [ '''SkipUpBlock2D''', '''SkipUpBlock2D''', '''AttnSkipUpBlock2D''', '''SkipUpBlock2D''', ], } UpperCAmelCase__ : Tuple = self.dummy_input return init_dict, inputs_dict @slow def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : str = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' , output_loading_info=_A ) self.assertIsNotNone(_A ) self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 ) model.to(_A ) UpperCAmelCase__ : List[str] = self.dummy_input UpperCAmelCase__ : Dict = floats_tensor((4, 3) + (256, 256) ).to(_A ) UpperCAmelCase__ : Optional[Any] = noise UpperCAmelCase__ : Any = model(**_A ) assert image is not None, "Make sure output is not None" @slow def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : int = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' ) model.to(_A ) UpperCAmelCase__ : Optional[Any] = 4 UpperCAmelCase__ : List[str] = 3 UpperCAmelCase__ : Dict = (256, 256) UpperCAmelCase__ : Optional[int] = torch.ones((batch_size, num_channels) + sizes ).to(_A ) UpperCAmelCase__ : Union[str, Any] = torch.tensor(batch_size * [1e-4] ).to(_A ) with torch.no_grad(): UpperCAmelCase__ : Optional[int] = model(_A , _A ).sample UpperCAmelCase__ : Any = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off UpperCAmelCase__ : Tuple = torch.tensor([-4_8_4_2.8_6_9_1, -6_4_9_9.6_6_3_1, -3_8_0_0.1_9_5_3, -7_9_7_8.2_6_8_6, -1_0_9_8_0.7_1_2_9, -2_0_0_2_8.8_5_3_5, 8_1_4_8.2_8_2_2, 2_3_4_2.2_9_0_5, 5_6_7.7_6_0_8] ) # fmt: on self.assertTrue(torch_all_close(_A , _A , rtol=1e-2 ) ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Dict = UNetaDModel.from_pretrained('''fusing/ncsnpp-ffhq-ve-dummy-update''' ) model.to(_A ) UpperCAmelCase__ : str = 4 UpperCAmelCase__ : Any = 3 UpperCAmelCase__ : int = (32, 32) UpperCAmelCase__ : Optional[Any] = torch.ones((batch_size, num_channels) + sizes ).to(_A ) UpperCAmelCase__ : Optional[Any] = torch.tensor(batch_size * [1e-4] ).to(_A ) with torch.no_grad(): UpperCAmelCase__ : int = model(_A , _A ).sample UpperCAmelCase__ : Dict = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off UpperCAmelCase__ : Any = torch.tensor([-0.0_3_2_5, -0.0_9_0_0, -0.0_8_6_9, -0.0_3_3_2, -0.0_7_2_5, -0.0_2_7_0, -0.0_1_0_1, 0.0_2_2_7, 0.0_2_5_6] ) # fmt: on self.assertTrue(torch_all_close(_A , _A , rtol=1e-2 ) ) def lowercase_ ( self : Tuple ): '''simple docstring''' pass
299
1
'''simple docstring''' import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class lowerCamelCase_ ( unittest.TestCase ): def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) UpperCAmelCase__ : int = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_A ) UpperCAmelCase__ : Optional[int] = -1 UpperCAmelCase__ : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_A ) UpperCAmelCase__ : Optional[int] = model.generate(_A , max_new_tokens=10 , do_sample=_A ) UpperCAmelCase__ : Dict = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: UpperCAmelCase__ : Tuple = TextStreamer(_A ) model.generate(_A , max_new_tokens=10 , do_sample=_A , streamer=_A ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer UpperCAmelCase__ : Tuple = cs.out[:-1] self.assertEqual(_A , _A ) def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) UpperCAmelCase__ : List[str] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_A ) UpperCAmelCase__ : Optional[Any] = -1 UpperCAmelCase__ : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_A ) UpperCAmelCase__ : Optional[int] = model.generate(_A , max_new_tokens=10 , do_sample=_A ) UpperCAmelCase__ : Optional[int] = tokenizer.decode(greedy_ids[0] ) UpperCAmelCase__ : Optional[int] = TextIteratorStreamer(_A ) UpperCAmelCase__ : Dict = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer} UpperCAmelCase__ : str = Thread(target=model.generate , kwargs=_A ) thread.start() UpperCAmelCase__ : Optional[Any] = '''''' for new_text in streamer: streamer_text += new_text self.assertEqual(_A , _A ) def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : str = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) UpperCAmelCase__ : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_A ) UpperCAmelCase__ : List[str] = -1 UpperCAmelCase__ : Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_A ) UpperCAmelCase__ : Optional[Any] = model.generate(_A , max_new_tokens=10 , do_sample=_A ) UpperCAmelCase__ : Union[str, Any] = greedy_ids[:, input_ids.shape[1] :] UpperCAmelCase__ : str = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: UpperCAmelCase__ : str = TextStreamer(_A , skip_prompt=_A ) model.generate(_A , max_new_tokens=10 , do_sample=_A , streamer=_A ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer UpperCAmelCase__ : str = cs.out[:-1] self.assertEqual(_A , _A ) def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Any = AutoTokenizer.from_pretrained('''distilgpt2''' ) UpperCAmelCase__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(_A ) UpperCAmelCase__ : Optional[int] = -1 UpperCAmelCase__ : Optional[int] = torch.ones((1, 5) , device=_A ).long() * model.config.bos_token_id with CaptureStdout() as cs: UpperCAmelCase__ : Union[str, Any] = TextStreamer(_A , skip_special_tokens=_A ) model.generate(_A , max_new_tokens=1 , do_sample=_A , streamer=_A ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token UpperCAmelCase__ : Optional[Any] = cs.out[:-1] # Remove the final "\n" UpperCAmelCase__ : str = tokenizer(_A , return_tensors='''pt''' ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) UpperCAmelCase__ : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_A ) UpperCAmelCase__ : Dict = -1 UpperCAmelCase__ : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_A ) UpperCAmelCase__ : str = TextIteratorStreamer(_A , timeout=0.0_0_1 ) UpperCAmelCase__ : Any = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer} UpperCAmelCase__ : List[str] = Thread(target=model.generate , kwargs=_A ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(_A ): UpperCAmelCase__ : List[Any] = '''''' for new_text in streamer: streamer_text += new_text
299
'''simple docstring''' from __future__ import annotations def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> tuple[float, list[float]]: UpperCAmelCase__ : Optional[Any] = list(range(len(lowerCAmelCase__ ) ) ) UpperCAmelCase__ : Optional[Any] = [v / w for v, w in zip(lowerCAmelCase__ , lowerCAmelCase__ )] index.sort(key=lambda lowerCAmelCase__ : ratio[i] , reverse=lowerCAmelCase__ ) UpperCAmelCase__ : float = 0 UpperCAmelCase__ : list[float] = [0] * len(lowerCAmelCase__ ) for i in index: if weight[i] <= capacity: UpperCAmelCase__ : List[str] = 1 max_value += value[i] capacity -= weight[i] else: UpperCAmelCase__ : Tuple = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
299
1
'''simple docstring''' UpperCamelCase__ = ''' # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git ''' UpperCamelCase__ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}] UpperCamelCase__ = { '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
299
'''simple docstring''' from ..utils import DummyObject, requires_backends class lowerCamelCase_ ( metaclass=__a ): lowerCAmelCase__ = ['torch', 'transformers', 'onnx'] def __init__( self : int , *_A : Tuple , **_A : Union[str, Any] ): '''simple docstring''' requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase_ ( cls : Union[str, Any] , *_A : List[Any] , **_A : Any ): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase_ ( cls : int , *_A : Union[str, Any] , **_A : Optional[Any] ): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) class lowerCamelCase_ ( metaclass=__a ): lowerCAmelCase__ = ['torch', 'transformers', 'onnx'] def __init__( self : Any , *_A : List[str] , **_A : Tuple ): '''simple docstring''' requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase_ ( cls : Tuple , *_A : Tuple , **_A : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase_ ( cls : List[Any] , *_A : List[str] , **_A : Dict ): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) class lowerCamelCase_ ( metaclass=__a ): lowerCAmelCase__ = ['torch', 'transformers', 'onnx'] def __init__( self : Dict , *_A : Any , **_A : int ): '''simple docstring''' requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase_ ( cls : List[Any] , *_A : List[Any] , **_A : Optional[int] ): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase_ ( cls : int , *_A : Dict , **_A : Optional[Any] ): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) class lowerCamelCase_ ( metaclass=__a ): lowerCAmelCase__ = ['torch', 'transformers', 'onnx'] def __init__( self : List[Any] , *_A : Optional[int] , **_A : Optional[Any] ): '''simple docstring''' requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase_ ( cls : Dict , *_A : Any , **_A : Tuple ): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase_ ( cls : int , *_A : Union[str, Any] , **_A : Dict ): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) class lowerCamelCase_ ( metaclass=__a ): lowerCAmelCase__ = ['torch', 'transformers', 'onnx'] def __init__( self : List[Any] , *_A : Optional[int] , **_A : Dict ): '''simple docstring''' requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase_ ( cls : Dict , *_A : str , **_A : Dict ): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase_ ( cls : Union[str, Any] , *_A : Optional[int] , **_A : int ): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) class lowerCamelCase_ ( metaclass=__a ): lowerCAmelCase__ = ['torch', 'transformers', 'onnx'] def __init__( self : Optional[Any] , *_A : Union[str, Any] , **_A : Dict ): '''simple docstring''' requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase_ ( cls : List[str] , *_A : str , **_A : List[str] ): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase_ ( cls : Dict , *_A : str , **_A : Any ): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
299
1
'''simple docstring''' import hashlib import unittest from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available from transformers.pipelines import DepthEstimationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class lowerCamelCase_ : @staticmethod def lowercase_ ( *_A : Any , **_A : Optional[int] ): '''simple docstring''' pass def a__ ( lowerCAmelCase__ ) -> str: UpperCAmelCase__ : List[Any] = hashlib.mda(image.tobytes() ) return m.hexdigest() @is_pipeline_test @require_vision @require_timm @require_torch class lowerCamelCase_ ( unittest.TestCase ): lowerCAmelCase__ = MODEL_FOR_DEPTH_ESTIMATION_MAPPING def lowercase_ ( self : Dict , _A : Optional[int] , _A : Optional[int] , _A : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = DepthEstimationPipeline(model=_A , image_processor=_A ) return depth_estimator, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def lowercase_ ( self : Tuple , _A : int , _A : int ): '''simple docstring''' UpperCAmelCase__ : Tuple = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , _A ) import datasets UpperCAmelCase__ : List[Any] = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' ) UpperCAmelCase__ : str = depth_estimator( [ Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ), '''http://images.cocodataset.org/val2017/000000039769.jpg''', # RGBA dataset[0]['''file'''], # LA dataset[1]['''file'''], # L dataset[2]['''file'''], ] ) self.assertEqual( [ {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, ] , _A , ) @require_tf @unittest.skip('''Depth estimation is not implemented in TF''' ) def lowercase_ ( self : Any ): '''simple docstring''' pass @slow @require_torch def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = '''Intel/dpt-large''' UpperCAmelCase__ : List[Any] = pipeline('''depth-estimation''' , model=_A ) UpperCAmelCase__ : Dict = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' ) UpperCAmelCase__ : Optional[Any] = hashimage(outputs['''depth'''] ) # This seems flaky. # self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977") self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 2_9.3_0_4 ) self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.6_6_2 ) @require_torch def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
299
'''simple docstring''' from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase__ = {'''configuration_mmbt''': ['''MMBTConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings'''] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
299
1
'''simple docstring''' from __future__ import absolute_import, division, print_function, unicode_literals from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers import RobertaConfig from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.roberta.modeling_roberta import ( ROBERTA_INPUTS_DOCSTRING, ROBERTA_START_DOCSTRING, RobertaEmbeddings, ) from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy @add_start_docstrings( 'The RoBERTa Model transformer with early exiting (DeeRoBERTa). ' , __a , ) class lowerCamelCase_ ( __a ): lowerCAmelCase__ = RobertaConfig lowerCAmelCase__ = 'roberta' def __init__( self : Optional[Any] , _A : List[str] ): '''simple docstring''' super().__init__(_A ) UpperCAmelCase__ : Any = RobertaEmbeddings(_A ) self.init_weights() @add_start_docstrings( 'RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ' , __a , ) class lowerCamelCase_ ( __a ): lowerCAmelCase__ = RobertaConfig lowerCAmelCase__ = 'roberta' def __init__( self : Optional[Any] , _A : Optional[int] ): '''simple docstring''' super().__init__(_A ) UpperCAmelCase__ : Tuple = config.num_labels UpperCAmelCase__ : List[Any] = config.num_hidden_layers UpperCAmelCase__ : List[str] = DeeRobertaModel(_A ) UpperCAmelCase__ : Optional[int] = nn.Dropout(config.hidden_dropout_prob ) UpperCAmelCase__ : Union[str, Any] = nn.Linear(config.hidden_size , self.config.num_labels ) @add_start_docstrings_to_model_forward(_A ) def lowercase_ ( self : List[str] , _A : Tuple=None , _A : Dict=None , _A : str=None , _A : Union[str, Any]=None , _A : Dict=None , _A : Optional[Any]=None , _A : str=None , _A : Optional[Any]=-1 , _A : Dict=False , ): '''simple docstring''' UpperCAmelCase__ : Any = self.num_layers try: UpperCAmelCase__ : Optional[Any] = self.roberta( _A , attention_mask=_A , token_type_ids=_A , position_ids=_A , head_mask=_A , inputs_embeds=_A , ) UpperCAmelCase__ : Optional[Any] = outputs[1] UpperCAmelCase__ : str = self.dropout(_A ) UpperCAmelCase__ : List[Any] = self.classifier(_A ) UpperCAmelCase__ : Union[str, Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: UpperCAmelCase__ : Optional[Any] = e.message UpperCAmelCase__ : List[str] = e.exit_layer UpperCAmelCase__ : int = outputs[0] if not self.training: UpperCAmelCase__ : List[str] = entropy(_A ) UpperCAmelCase__ : int = [] UpperCAmelCase__ : Optional[Any] = [] if labels is not None: if self.num_labels == 1: # We are doing regression UpperCAmelCase__ : int = MSELoss() UpperCAmelCase__ : Optional[Any] = loss_fct(logits.view(-1 ) , labels.view(-1 ) ) else: UpperCAmelCase__ : Optional[int] = CrossEntropyLoss() UpperCAmelCase__ : Any = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) # work with highway exits UpperCAmelCase__ : List[Any] = [] for highway_exit in outputs[-1]: UpperCAmelCase__ : Union[str, Any] = highway_exit[0] if not self.training: highway_logits_all.append(_A ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression UpperCAmelCase__ : Any = MSELoss() UpperCAmelCase__ : List[str] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) ) else: UpperCAmelCase__ : Any = CrossEntropyLoss() UpperCAmelCase__ : List[Any] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) highway_losses.append(_A ) if train_highway: UpperCAmelCase__ : Optional[int] = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: UpperCAmelCase__ : str = (loss,) + outputs if not self.training: UpperCAmelCase__ : Union[str, Any] = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: UpperCAmelCase__ : List[str] = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), entropy
299
'''simple docstring''' import inspect import tempfile from collections import OrderedDict, UserDict from collections.abc import MutableMapping from contextlib import ExitStack, contextmanager from dataclasses import fields from enum import Enum from typing import Any, ContextManager, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy if is_flax_available(): import jax.numpy as jnp class lowerCamelCase_ ( __a ): def __get__( self : str , _A : Tuple , _A : List[str]=None ): '''simple docstring''' if obj is None: return self if self.fget is None: raise AttributeError('''unreadable attribute''' ) UpperCAmelCase__ : Union[str, Any] = '''__cached_''' + self.fget.__name__ UpperCAmelCase__ : Any = getattr(_A , _A , _A ) if cached is None: UpperCAmelCase__ : Dict = self.fget(_A ) setattr(_A , _A , _A ) return cached def a__ ( lowerCAmelCase__ ) -> Optional[int]: UpperCAmelCase__ : Tuple = val.lower() if val in {"y", "yes", "t", "true", "on", "1"}: return 1 if val in {"n", "no", "f", "false", "off", "0"}: return 0 raise ValueError(F"""invalid truth value {val!r}""" ) def a__ ( lowerCAmelCase__ ) -> Optional[Any]: if is_torch_fx_proxy(lowerCAmelCase__ ): return True if is_torch_available(): import torch if isinstance(lowerCAmelCase__ , torch.Tensor ): return True if is_tf_available(): import tensorflow as tf if isinstance(lowerCAmelCase__ , tf.Tensor ): return True if is_flax_available(): import jax.numpy as jnp from jax.core import Tracer if isinstance(lowerCAmelCase__ , (jnp.ndarray, Tracer) ): return True return isinstance(lowerCAmelCase__ , np.ndarray ) def a__ ( lowerCAmelCase__ ) -> Any: return isinstance(lowerCAmelCase__ , np.ndarray ) def a__ ( lowerCAmelCase__ ) -> int: return _is_numpy(lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ ) -> Optional[Any]: import torch return isinstance(lowerCAmelCase__ , torch.Tensor ) def a__ ( lowerCAmelCase__ ) -> List[str]: return False if not is_torch_available() else _is_torch(lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ ) -> Optional[Any]: import torch return isinstance(lowerCAmelCase__ , torch.device ) def a__ ( lowerCAmelCase__ ) -> List[str]: return False if not is_torch_available() else _is_torch_device(lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ ) -> Any: import torch if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): if hasattr(lowerCAmelCase__ , lowerCAmelCase__ ): UpperCAmelCase__ : Any = getattr(lowerCAmelCase__ , lowerCAmelCase__ ) else: return False return isinstance(lowerCAmelCase__ , torch.dtype ) def a__ ( lowerCAmelCase__ ) -> Optional[int]: return False if not is_torch_available() else _is_torch_dtype(lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ ) -> List[Any]: import tensorflow as tf return isinstance(lowerCAmelCase__ , tf.Tensor ) def a__ ( lowerCAmelCase__ ) -> List[str]: return False if not is_tf_available() else _is_tensorflow(lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ ) -> Any: import tensorflow as tf # the `is_symbolic_tensor` predicate is only available starting with TF 2.14 if hasattr(lowerCAmelCase__ , '''is_symbolic_tensor''' ): return tf.is_symbolic_tensor(lowerCAmelCase__ ) return type(lowerCAmelCase__ ) == tf.Tensor def a__ ( lowerCAmelCase__ ) -> Union[str, Any]: return False if not is_tf_available() else _is_tf_symbolic_tensor(lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ ) -> Tuple: import jax.numpy as jnp # noqa: F811 return isinstance(lowerCAmelCase__ , jnp.ndarray ) def a__ ( lowerCAmelCase__ ) -> List[Any]: return False if not is_flax_available() else _is_jax(lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ ) -> Tuple: if isinstance(lowerCAmelCase__ , (dict, UserDict) ): return {k: to_py_obj(lowerCAmelCase__ ) for k, v in obj.items()} elif isinstance(lowerCAmelCase__ , (list, tuple) ): return [to_py_obj(lowerCAmelCase__ ) for o in obj] elif is_tf_tensor(lowerCAmelCase__ ): return obj.numpy().tolist() elif is_torch_tensor(lowerCAmelCase__ ): return obj.detach().cpu().tolist() elif is_jax_tensor(lowerCAmelCase__ ): return np.asarray(lowerCAmelCase__ ).tolist() elif isinstance(lowerCAmelCase__ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays return obj.tolist() else: return obj def a__ ( lowerCAmelCase__ ) -> Tuple: if isinstance(lowerCAmelCase__ , (dict, UserDict) ): return {k: to_numpy(lowerCAmelCase__ ) for k, v in obj.items()} elif isinstance(lowerCAmelCase__ , (list, tuple) ): return np.array(lowerCAmelCase__ ) elif is_tf_tensor(lowerCAmelCase__ ): return obj.numpy() elif is_torch_tensor(lowerCAmelCase__ ): return obj.detach().cpu().numpy() elif is_jax_tensor(lowerCAmelCase__ ): return np.asarray(lowerCAmelCase__ ) else: return obj class lowerCamelCase_ ( __a ): def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : List[str] = fields(self ) # Safety and consistency checks if not len(_A ): raise ValueError(f"""{self.__class__.__name__} has no fields.""" ) if not all(field.default is None for field in class_fields[1:] ): raise ValueError(f"""{self.__class__.__name__} should not have more than one required field.""" ) UpperCAmelCase__ : Dict = getattr(self , class_fields[0].name ) UpperCAmelCase__ : Any = all(getattr(self , field.name ) is None for field in class_fields[1:] ) if other_fields_are_none and not is_tensor(_A ): if isinstance(_A , _A ): UpperCAmelCase__ : List[Any] = first_field.items() UpperCAmelCase__ : Optional[int] = True else: try: UpperCAmelCase__ : Optional[int] = iter(_A ) UpperCAmelCase__ : Optional[int] = True except TypeError: UpperCAmelCase__ : Optional[Any] = False # if we provided an iterator as first field and the iterator is a (key, value) iterator # set the associated fields if first_field_iterator: for idx, element in enumerate(_A ): if ( not isinstance(_A , (list, tuple) ) or not len(_A ) == 2 or not isinstance(element[0] , _A ) ): if idx == 0: # If we do not have an iterator of key/values, set it as attribute UpperCAmelCase__ : List[Any] = first_field else: # If we have a mixed iterator, raise an error raise ValueError( f"""Cannot set key/value for {element}. It needs to be a tuple (key, value).""" ) break setattr(self , element[0] , element[1] ) if element[1] is not None: UpperCAmelCase__ : List[str] = element[1] elif first_field is not None: UpperCAmelCase__ : Optional[Any] = first_field else: for field in class_fields: UpperCAmelCase__ : Optional[int] = getattr(self , field.name ) if v is not None: UpperCAmelCase__ : str = v def __delitem__( self : Union[str, Any] , *_A : Any , **_A : str ): '''simple docstring''' raise Exception(f"""You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.""" ) def lowercase_ ( self : Any , *_A : List[str] , **_A : Tuple ): '''simple docstring''' raise Exception(f"""You cannot use ``setdefault`` on a {self.__class__.__name__} instance.""" ) def lowercase_ ( self : Optional[Any] , *_A : Any , **_A : Tuple ): '''simple docstring''' raise Exception(f"""You cannot use ``pop`` on a {self.__class__.__name__} instance.""" ) def lowercase_ ( self : Optional[Any] , *_A : Dict , **_A : List[Any] ): '''simple docstring''' raise Exception(f"""You cannot use ``update`` on a {self.__class__.__name__} instance.""" ) def __getitem__( self : List[str] , _A : Any ): '''simple docstring''' if isinstance(_A , _A ): UpperCAmelCase__ : Union[str, Any] = dict(self.items() ) return inner_dict[k] else: return self.to_tuple()[k] def __setattr__( self : int , _A : Union[str, Any] , _A : str ): '''simple docstring''' if name in self.keys() and value is not None: # Don't call self.__setitem__ to avoid recursion errors super().__setitem__(_A , _A ) super().__setattr__(_A , _A ) def __setitem__( self : Any , _A : Optional[int] , _A : List[str] ): '''simple docstring''' super().__setitem__(_A , _A ) # Don't call self.__setattr__ to avoid recursion errors super().__setattr__(_A , _A ) def lowercase_ ( self : Optional[Any] ): '''simple docstring''' return tuple(self[k] for k in self.keys() ) class lowerCamelCase_ ( __a , __a ): @classmethod def lowercase_ ( cls : Optional[Any] , _A : Optional[Any] ): '''simple docstring''' raise ValueError( f"""{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}""" ) class lowerCamelCase_ ( __a ): lowerCAmelCase__ = 'longest' lowerCAmelCase__ = 'max_length' lowerCAmelCase__ = 'do_not_pad' class lowerCamelCase_ ( __a ): lowerCAmelCase__ = 'pt' lowerCAmelCase__ = 'tf' lowerCAmelCase__ = 'np' lowerCAmelCase__ = 'jax' class lowerCamelCase_ : def __init__( self : List[Any] , _A : List[ContextManager] ): '''simple docstring''' UpperCAmelCase__ : str = context_managers UpperCAmelCase__ : int = ExitStack() def __enter__( self : str ): '''simple docstring''' for context_manager in self.context_managers: self.stack.enter_context(_A ) def __exit__( self : Dict , *_A : List[Any] , **_A : str ): '''simple docstring''' self.stack.__exit__(*_A , **_A ) def a__ ( lowerCAmelCase__ ) -> Any: UpperCAmelCase__ : int = infer_framework(lowerCAmelCase__ ) if framework == "tf": UpperCAmelCase__ : Optional[Any] = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": UpperCAmelCase__ : List[Any] = inspect.signature(model_class.forward ) # PyTorch models else: UpperCAmelCase__ : List[Any] = inspect.signature(model_class.__call__ ) # Flax models for p in signature.parameters: if p == "return_loss" and signature.parameters[p].default is True: return True return False def a__ ( lowerCAmelCase__ ) -> Optional[int]: UpperCAmelCase__ : Dict = model_class.__name__ UpperCAmelCase__ : Union[str, Any] = infer_framework(lowerCAmelCase__ ) if framework == "tf": UpperCAmelCase__ : Tuple = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": UpperCAmelCase__ : List[str] = inspect.signature(model_class.forward ) # PyTorch models else: UpperCAmelCase__ : int = inspect.signature(model_class.__call__ ) # Flax models if "QuestionAnswering" in model_name: return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")] else: return [p for p in signature.parameters if "label" in p] def a__ ( lowerCAmelCase__ , lowerCAmelCase__ = "" , lowerCAmelCase__ = "." ) -> Any: def _flatten_dict(lowerCAmelCase__ , lowerCAmelCase__="" , lowerCAmelCase__="." ): for k, v in d.items(): UpperCAmelCase__ : int = str(lowerCAmelCase__ ) + delimiter + str(lowerCAmelCase__ ) if parent_key else k if v and isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): yield from flatten_dict(lowerCAmelCase__ , lowerCAmelCase__ , delimiter=lowerCAmelCase__ ).items() else: yield key, v return dict(_flatten_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) ) @contextmanager def a__ ( lowerCAmelCase__ , lowerCAmelCase__ = False ) -> int: if use_temp_dir: with tempfile.TemporaryDirectory() as tmp_dir: yield tmp_dir else: yield working_dir def a__ ( lowerCAmelCase__ , lowerCAmelCase__=None ) -> Optional[Any]: if is_numpy_array(lowerCAmelCase__ ): return np.transpose(lowerCAmelCase__ , axes=lowerCAmelCase__ ) elif is_torch_tensor(lowerCAmelCase__ ): return array.T if axes is None else array.permute(*lowerCAmelCase__ ) elif is_tf_tensor(lowerCAmelCase__ ): import tensorflow as tf return tf.transpose(lowerCAmelCase__ , perm=lowerCAmelCase__ ) elif is_jax_tensor(lowerCAmelCase__ ): return jnp.transpose(lowerCAmelCase__ , axes=lowerCAmelCase__ ) else: raise ValueError(F"""Type not supported for transpose: {type(lowerCAmelCase__ )}.""" ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple: if is_numpy_array(lowerCAmelCase__ ): return np.reshape(lowerCAmelCase__ , lowerCAmelCase__ ) elif is_torch_tensor(lowerCAmelCase__ ): return array.reshape(*lowerCAmelCase__ ) elif is_tf_tensor(lowerCAmelCase__ ): import tensorflow as tf return tf.reshape(lowerCAmelCase__ , lowerCAmelCase__ ) elif is_jax_tensor(lowerCAmelCase__ ): return jnp.reshape(lowerCAmelCase__ , lowerCAmelCase__ ) else: raise ValueError(F"""Type not supported for reshape: {type(lowerCAmelCase__ )}.""" ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__=None ) -> List[Any]: if is_numpy_array(lowerCAmelCase__ ): return np.squeeze(lowerCAmelCase__ , axis=lowerCAmelCase__ ) elif is_torch_tensor(lowerCAmelCase__ ): return array.squeeze() if axis is None else array.squeeze(dim=lowerCAmelCase__ ) elif is_tf_tensor(lowerCAmelCase__ ): import tensorflow as tf return tf.squeeze(lowerCAmelCase__ , axis=lowerCAmelCase__ ) elif is_jax_tensor(lowerCAmelCase__ ): return jnp.squeeze(lowerCAmelCase__ , axis=lowerCAmelCase__ ) else: raise ValueError(F"""Type not supported for squeeze: {type(lowerCAmelCase__ )}.""" ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]: if is_numpy_array(lowerCAmelCase__ ): return np.expand_dims(lowerCAmelCase__ , lowerCAmelCase__ ) elif is_torch_tensor(lowerCAmelCase__ ): return array.unsqueeze(dim=lowerCAmelCase__ ) elif is_tf_tensor(lowerCAmelCase__ ): import tensorflow as tf return tf.expand_dims(lowerCAmelCase__ , axis=lowerCAmelCase__ ) elif is_jax_tensor(lowerCAmelCase__ ): return jnp.expand_dims(lowerCAmelCase__ , axis=lowerCAmelCase__ ) else: raise ValueError(F"""Type not supported for expand_dims: {type(lowerCAmelCase__ )}.""" ) def a__ ( lowerCAmelCase__ ) -> int: if is_numpy_array(lowerCAmelCase__ ): return np.size(lowerCAmelCase__ ) elif is_torch_tensor(lowerCAmelCase__ ): return array.numel() elif is_tf_tensor(lowerCAmelCase__ ): import tensorflow as tf return tf.size(lowerCAmelCase__ ) elif is_jax_tensor(lowerCAmelCase__ ): return array.size else: raise ValueError(F"""Type not supported for expand_dims: {type(lowerCAmelCase__ )}.""" ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]: for key, value in auto_map.items(): if isinstance(lowerCAmelCase__ , (tuple, list) ): UpperCAmelCase__ : int = [F"""{repo_id}--{v}""" if (v is not None and '''--''' not in v) else v for v in value] elif value is not None and "--" not in value: UpperCAmelCase__ : str = F"""{repo_id}--{value}""" return auto_map def a__ ( lowerCAmelCase__ ) -> Tuple: for base_class in inspect.getmro(lowerCAmelCase__ ): UpperCAmelCase__ : Optional[int] = base_class.__module__ UpperCAmelCase__ : Optional[int] = base_class.__name__ if module.startswith('''tensorflow''' ) or module.startswith('''keras''' ) or name == "TFPreTrainedModel": return "tf" elif module.startswith('''torch''' ) or name == "PreTrainedModel": return "pt" elif module.startswith('''flax''' ) or module.startswith('''jax''' ) or name == "FlaxPreTrainedModel": return "flax" else: raise TypeError(F"""Could not infer framework from class {model_class}.""" )
299
1
'''simple docstring''' import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging UpperCamelCase__ = logging.get_logger(__name__) def a__ ( lowerCAmelCase__ ) -> Optional[int]: UpperCAmelCase__ : Optional[int] = R'''\w+[.]\d+''' UpperCAmelCase__ : Union[str, Any] = re.findall(lowerCAmelCase__ , lowerCAmelCase__ ) for pat in pats: UpperCAmelCase__ : List[Any] = key.replace(lowerCAmelCase__ , '''_'''.join(pat.split('''.''' ) ) ) return key def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]: UpperCAmelCase__ : List[Any] = pt_tuple_key[:-1] + ('''scale''',) if ( any('''norm''' in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): UpperCAmelCase__ : int = pt_tuple_key[:-1] + ('''scale''',) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: UpperCAmelCase__ : Dict = pt_tuple_key[:-1] + ('''scale''',) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: UpperCAmelCase__ : Union[str, Any] = pt_tuple_key[:-1] + ('''embedding''',) return renamed_pt_tuple_key, pt_tensor # conv layer UpperCAmelCase__ : Union[str, Any] = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: UpperCAmelCase__ : str = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer UpperCAmelCase__ : int = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight": UpperCAmelCase__ : str = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight UpperCAmelCase__ : Tuple = pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias UpperCAmelCase__ : Any = pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=42 ) -> int: # Step 1: Convert pytorch tensor to numpy UpperCAmelCase__ : Optional[int] = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params UpperCAmelCase__ : Optional[Any] = flax_model.init_weights(PRNGKey(lowerCAmelCase__ ) ) UpperCAmelCase__ : Optional[Any] = flatten_dict(lowerCAmelCase__ ) UpperCAmelCase__ : Optional[Any] = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): UpperCAmelCase__ : Dict = rename_key(lowerCAmelCase__ ) UpperCAmelCase__ : Tuple = tuple(renamed_pt_key.split('''.''' ) ) # Correctly rename weight parameters UpperCAmelCase__ , UpperCAmelCase__ : int = rename_key_and_reshape_tensor(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """ F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" ) # also add unexpected weight so that warning is thrown UpperCAmelCase__ : Optional[Any] = jnp.asarray(lowerCAmelCase__ ) return unflatten_dict(lowerCAmelCase__ )
299
'''simple docstring''' import argparse from typing import List import evaluate import numpy as np import torch from datasets import DatasetDict, load_dataset # New Code # # We'll be using StratifiedKFold for this example from sklearn.model_selection import StratifiedKFold from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to perform Cross Validation, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## UpperCamelCase__ = 1_6 UpperCamelCase__ = 3_2 def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 16 ) -> Dict: UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained('''bert-base-cased''' ) UpperCAmelCase__ : str = DatasetDict( { '''train''': dataset['''train'''].select(lowerCAmelCase__ ), '''validation''': dataset['''train'''].select(lowerCAmelCase__ ), '''test''': dataset['''validation'''], } ) def tokenize_function(lowerCAmelCase__ ): # max_length=None => use the model max length (it's actually the default) UpperCAmelCase__ : Optional[int] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): UpperCAmelCase__ : Dict = datasets.map( lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCAmelCase__ : int = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(lowerCAmelCase__ ): # On TPU it's best to pad everything to the same length or training will be very slow. UpperCAmelCase__ : Optional[Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": UpperCAmelCase__ : Any = 16 elif accelerator.mixed_precision != "no": UpperCAmelCase__ : Dict = 8 else: UpperCAmelCase__ : List[Any] = None return tokenizer.pad( lowerCAmelCase__ , padding='''longest''' , max_length=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_tensors='''pt''' , ) # Instantiate dataloaders. UpperCAmelCase__ : List[Any] = DataLoader( tokenized_datasets['''train'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ ) UpperCAmelCase__ : List[str] = DataLoader( tokenized_datasets['''validation'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ ) UpperCAmelCase__ : List[Any] = DataLoader( tokenized_datasets['''test'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ ) return train_dataloader, eval_dataloader, test_dataloader def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str: # New Code # UpperCAmelCase__ : List[str] = [] # Download the dataset UpperCAmelCase__ : Union[str, Any] = load_dataset('''glue''' , '''mrpc''' ) # Create our splits UpperCAmelCase__ : str = StratifiedKFold(n_splits=int(args.num_folds ) ) # Initialize accelerator UpperCAmelCase__ : Dict = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCAmelCase__ : Any = config['''lr'''] UpperCAmelCase__ : Any = int(config['''num_epochs'''] ) UpperCAmelCase__ : Any = int(config['''seed'''] ) UpperCAmelCase__ : Dict = int(config['''batch_size'''] ) UpperCAmelCase__ : Any = evaluate.load('''glue''' , '''mrpc''' ) # If the batch size is too big we use gradient accumulation UpperCAmelCase__ : Optional[Any] = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: UpperCAmelCase__ : Any = batch_size // MAX_GPU_BATCH_SIZE UpperCAmelCase__ : List[Any] = MAX_GPU_BATCH_SIZE set_seed(lowerCAmelCase__ ) # New Code # # Create our folds: UpperCAmelCase__ : Union[str, Any] = kfold.split(np.zeros(datasets['''train'''].num_rows ) , datasets['''train''']['''label'''] ) UpperCAmelCase__ : Dict = [] # Iterate over them for i, (train_idxs, valid_idxs) in enumerate(lowerCAmelCase__ ): UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = get_fold_dataloaders( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCAmelCase__ : List[str] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=lowerCAmelCase__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). UpperCAmelCase__ : Optional[Any] = model.to(accelerator.device ) # Instantiate optimizer UpperCAmelCase__ : Union[str, Any] = AdamW(params=model.parameters() , lr=lowerCAmelCase__ ) # Instantiate scheduler UpperCAmelCase__ : Any = get_linear_schedule_with_warmup( optimizer=lowerCAmelCase__ , num_warmup_steps=1_00 , num_training_steps=(len(lowerCAmelCase__ ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = accelerator.prepare( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # Now we train the model for epoch in range(lowerCAmelCase__ ): model.train() for step, batch in enumerate(lowerCAmelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) UpperCAmelCase__ : Union[str, Any] = model(**lowerCAmelCase__ ) UpperCAmelCase__ : Dict = outputs.loss UpperCAmelCase__ : Dict = loss / gradient_accumulation_steps accelerator.backward(lowerCAmelCase__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowerCAmelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCAmelCase__ : str = model(**lowerCAmelCase__ ) UpperCAmelCase__ : Any = outputs.logits.argmax(dim=-1 ) UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=lowerCAmelCase__ , references=lowerCAmelCase__ , ) UpperCAmelCase__ : str = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" , lowerCAmelCase__ ) # New Code # # We also run predictions on the test set at the very end UpperCAmelCase__ : int = [] for step, batch in enumerate(lowerCAmelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCAmelCase__ : str = model(**lowerCAmelCase__ ) UpperCAmelCase__ : Union[str, Any] = outputs.logits UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) fold_predictions.append(predictions.cpu() ) if i == 0: # We need all of the test predictions test_references.append(references.cpu() ) # Use accelerator.print to print only on the main process. test_predictions.append(torch.cat(lowerCAmelCase__ , dim=0 ) ) # We now need to release all our memory and get rid of the current model, optimizer, etc accelerator.free_memory() # New Code # # Finally we check the accuracy of our folded results: UpperCAmelCase__ : Union[str, Any] = torch.cat(lowerCAmelCase__ , dim=0 ) UpperCAmelCase__ : Tuple = torch.stack(lowerCAmelCase__ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 ) UpperCAmelCase__ : Optional[Any] = metric.compute(predictions=lowerCAmelCase__ , references=lowerCAmelCase__ ) accelerator.print('''Average test metrics from all folds:''' , lowerCAmelCase__ ) def a__ ( ) -> Any: UpperCAmelCase__ : Tuple = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) # New Code # parser.add_argument('''--num_folds''' , type=lowerCAmelCase__ , default=3 , help='''The number of splits to perform across the dataset''' ) UpperCAmelCase__ : Tuple = parser.parse_args() UpperCAmelCase__ : Any = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(lowerCAmelCase__ , lowerCAmelCase__ ) if __name__ == "__main__": main()
299
1
'''simple docstring''' import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCamelCase_ : def __init__( self : List[str] , _A : str , _A : int=13 , _A : Tuple=7 , _A : Optional[Any]=True , _A : Optional[int]=True , _A : Any=True , _A : int=True , _A : Optional[Any]=99 , _A : List[Any]=16 , _A : Any=36 , _A : Any=6 , _A : Optional[int]=6 , _A : Union[str, Any]=6 , _A : Dict=37 , _A : List[Any]="gelu" , _A : List[str]=0.1 , _A : str=0.1 , _A : str=512 , _A : Dict=16 , _A : str=2 , _A : str=0.0_2 , _A : int=3 , _A : int=4 , _A : Dict=None , ): '''simple docstring''' UpperCAmelCase__ : str = parent UpperCAmelCase__ : int = batch_size UpperCAmelCase__ : List[Any] = seq_length UpperCAmelCase__ : Dict = is_training UpperCAmelCase__ : Any = use_input_mask UpperCAmelCase__ : Union[str, Any] = use_token_type_ids UpperCAmelCase__ : Optional[int] = use_labels UpperCAmelCase__ : str = vocab_size UpperCAmelCase__ : Tuple = embedding_size UpperCAmelCase__ : Any = hidden_size UpperCAmelCase__ : Union[str, Any] = num_hidden_layers UpperCAmelCase__ : int = num_hidden_groups UpperCAmelCase__ : str = num_attention_heads UpperCAmelCase__ : Tuple = intermediate_size UpperCAmelCase__ : List[str] = hidden_act UpperCAmelCase__ : Optional[Any] = hidden_dropout_prob UpperCAmelCase__ : Tuple = attention_probs_dropout_prob UpperCAmelCase__ : Tuple = max_position_embeddings UpperCAmelCase__ : List[str] = type_vocab_size UpperCAmelCase__ : List[str] = type_sequence_label_size UpperCAmelCase__ : Any = initializer_range UpperCAmelCase__ : Union[str, Any] = num_labels UpperCAmelCase__ : List[str] = num_choices UpperCAmelCase__ : str = scope def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase__ : int = None if self.use_input_mask: UpperCAmelCase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase__ : Dict = None if self.use_token_type_ids: UpperCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase__ : str = None UpperCAmelCase__ : str = None UpperCAmelCase__ : int = None if self.use_labels: UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase__ : List[Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowercase_ ( self : List[Any] ): '''simple docstring''' return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def lowercase_ ( self : List[Any] , _A : Dict , _A : Optional[Any] , _A : Optional[int] , _A : List[str] , _A : Optional[int] , _A : Optional[int] , _A : int ): '''simple docstring''' UpperCAmelCase__ : List[Any] = AlbertModel(config=_A ) model.to(_A ) model.eval() UpperCAmelCase__ : Union[str, Any] = model(_A , attention_mask=_A , token_type_ids=_A ) UpperCAmelCase__ : int = model(_A , token_type_ids=_A ) UpperCAmelCase__ : Union[str, Any] = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowercase_ ( self : Any , _A : Tuple , _A : Optional[Any] , _A : Tuple , _A : str , _A : List[str] , _A : Any , _A : List[str] ): '''simple docstring''' UpperCAmelCase__ : Dict = AlbertForPreTraining(config=_A ) model.to(_A ) model.eval() UpperCAmelCase__ : int = model( _A , attention_mask=_A , token_type_ids=_A , labels=_A , sentence_order_label=_A , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) ) def lowercase_ ( self : Optional[Any] , _A : List[str] , _A : int , _A : Any , _A : List[Any] , _A : Tuple , _A : Tuple , _A : Any ): '''simple docstring''' UpperCAmelCase__ : List[Any] = AlbertForMaskedLM(config=_A ) model.to(_A ) model.eval() UpperCAmelCase__ : Optional[Any] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowercase_ ( self : Optional[int] , _A : str , _A : Dict , _A : Optional[Any] , _A : Optional[int] , _A : Union[str, Any] , _A : List[Any] , _A : Any ): '''simple docstring''' UpperCAmelCase__ : Any = AlbertForQuestionAnswering(config=_A ) model.to(_A ) model.eval() UpperCAmelCase__ : List[str] = model( _A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowercase_ ( self : Optional[int] , _A : List[Any] , _A : Union[str, Any] , _A : str , _A : Tuple , _A : Optional[Any] , _A : int , _A : Any ): '''simple docstring''' UpperCAmelCase__ : Any = self.num_labels UpperCAmelCase__ : Optional[int] = AlbertForSequenceClassification(_A ) model.to(_A ) model.eval() UpperCAmelCase__ : int = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase_ ( self : Union[str, Any] , _A : str , _A : Dict , _A : Any , _A : str , _A : int , _A : Tuple , _A : int ): '''simple docstring''' UpperCAmelCase__ : Any = self.num_labels UpperCAmelCase__ : List[str] = AlbertForTokenClassification(config=_A ) model.to(_A ) model.eval() UpperCAmelCase__ : str = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowercase_ ( self : Tuple , _A : int , _A : Tuple , _A : List[str] , _A : Any , _A : str , _A : Optional[Any] , _A : str ): '''simple docstring''' UpperCAmelCase__ : int = self.num_choices UpperCAmelCase__ : Optional[int] = AlbertForMultipleChoice(config=_A ) model.to(_A ) model.eval() UpperCAmelCase__ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase__ : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase__ : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase__ : Optional[Any] = model( _A , attention_mask=_A , token_type_ids=_A , labels=_A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.prepare_config_and_inputs() ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : Any = config_and_inputs UpperCAmelCase__ : List[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class lowerCamelCase_ ( __a , __a , unittest.TestCase ): lowerCAmelCase__ = ( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) lowerCAmelCase__ = ( { 'feature-extraction': AlbertModel, 'fill-mask': AlbertForMaskedLM, 'question-answering': AlbertForQuestionAnswering, 'text-classification': AlbertForSequenceClassification, 'token-classification': AlbertForTokenClassification, 'zero-shot': AlbertForSequenceClassification, } if is_torch_available() else {} ) lowerCAmelCase__ = True def lowercase_ ( self : Optional[int] , _A : int , _A : int , _A : List[str]=False ): '''simple docstring''' UpperCAmelCase__ : str = super()._prepare_for_class(_A , _A , return_labels=_A ) if return_labels: if model_class in get_values(_A ): UpperCAmelCase__ : List[Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_A ) UpperCAmelCase__ : int = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_A ) return inputs_dict def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = AlbertModelTester(self ) UpperCAmelCase__ : Optional[Any] = ConfigTester(self , config_class=_A , hidden_size=37 ) def lowercase_ ( self : Any ): '''simple docstring''' self.config_tester.run_common_tests() def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*_A ) def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_A ) def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_A ) def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_A ) def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_A ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCAmelCase__ : int = type self.model_tester.create_and_check_model(*_A ) @slow def lowercase_ ( self : Optional[int] ): '''simple docstring''' for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : Dict = AlbertModel.from_pretrained(_A ) self.assertIsNotNone(_A ) @require_torch class lowerCamelCase_ ( unittest.TestCase ): @slow def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = AlbertModel.from_pretrained('''albert-base-v2''' ) UpperCAmelCase__ : Any = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] ) UpperCAmelCase__ : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): UpperCAmelCase__ : Union[str, Any] = model(_A , attention_mask=_A )[0] UpperCAmelCase__ : List[str] = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , _A ) UpperCAmelCase__ : Optional[int] = torch.tensor( [[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1e-4 ) )
299
'''simple docstring''' import json import os import shutil import tempfile import unittest from multiprocessing import get_context from pathlib import Path import datasets import numpy as np from datasets import load_dataset from parameterized import parameterized from transformers import AutoProcessor from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available from ..wavaveca.test_feature_extraction_wavaveca import floats_list if is_pyctcdecode_available(): from huggingface_hub import snapshot_download from pyctcdecode import BeamSearchDecoderCTC from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput if is_torch_available(): from transformers import WavaVecaForCTC @require_pyctcdecode class lowerCamelCase_ ( unittest.TestCase ): def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Any = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split() UpperCAmelCase__ : Optional[Any] = dict(zip(_A , range(len(_A ) ) ) ) UpperCAmelCase__ : Tuple = { '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>''', } UpperCAmelCase__ : Optional[int] = { '''feature_size''': 1, '''padding_value''': 0.0, '''sampling_rate''': 16_000, '''return_attention_mask''': False, '''do_normalize''': True, } UpperCAmelCase__ : Union[str, Any] = tempfile.mkdtemp() UpperCAmelCase__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) UpperCAmelCase__ : int = os.path.join(self.tmpdirname , _A ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_A ) + '''\n''' ) with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_A ) + '''\n''' ) # load decoder from hub UpperCAmelCase__ : Any = '''hf-internal-testing/ngram-beam-search-decoder''' def lowercase_ ( self : int , **_A : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Dict = self.add_kwargs_tokens_map.copy() kwargs.update(_A ) return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_A ) def lowercase_ ( self : str , **_A : Any ): '''simple docstring''' return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_A ) def lowercase_ ( self : str , **_A : Any ): '''simple docstring''' return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_A ) def lowercase_ ( self : Any ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.get_tokenizer() UpperCAmelCase__ : Any = self.get_feature_extractor() UpperCAmelCase__ : Tuple = self.get_decoder() UpperCAmelCase__ : Tuple = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase__ : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname ) # tokenizer self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , _A ) # feature extractor self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , _A ) # decoder self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels ) self.assertEqual( processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , ) self.assertIsInstance(processor.decoder , _A ) def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM( tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) processor.save_pretrained(self.tmpdirname ) # make sure that error is thrown when decoder alphabet doesn't match UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM.from_pretrained( self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 ) # decoder self.assertEqual(processor.language_model.alpha , 5.0 ) self.assertEqual(processor.language_model.beta , 3.0 ) self.assertEqual(processor.language_model.score_boundary , -7.0 ) self.assertEqual(processor.language_model.unk_score_offset , 3 ) def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.get_tokenizer() # add token to trigger raise tokenizer.add_tokens(['''xx'''] ) with self.assertRaisesRegex(_A , '''include''' ): WavaVecaProcessorWithLM( tokenizer=_A , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Any = self.get_feature_extractor() UpperCAmelCase__ : Optional[int] = self.get_tokenizer() UpperCAmelCase__ : Any = self.get_decoder() UpperCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A ) UpperCAmelCase__ : List[Any] = floats_list((3, 1_000) ) UpperCAmelCase__ : Dict = feature_extractor(_A , return_tensors='''np''' ) UpperCAmelCase__ : str = processor(_A , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.get_feature_extractor() UpperCAmelCase__ : str = self.get_tokenizer() UpperCAmelCase__ : str = self.get_decoder() UpperCAmelCase__ : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A ) UpperCAmelCase__ : Union[str, Any] = '''This is a test string''' UpperCAmelCase__ : Optional[int] = processor(text=_A ) UpperCAmelCase__ : List[str] = tokenizer(_A ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowercase_ ( self : Dict , _A : Optional[int]=(2, 10, 16) , _A : List[str]=77 ): '''simple docstring''' np.random.seed(_A ) return np.random.rand(*_A ) def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.get_feature_extractor() UpperCAmelCase__ : Union[str, Any] = self.get_tokenizer() UpperCAmelCase__ : Optional[Any] = self.get_decoder() UpperCAmelCase__ : Tuple = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A ) UpperCAmelCase__ : int = self._get_dummy_logits(shape=(10, 16) , seed=13 ) UpperCAmelCase__ : List[Any] = processor.decode(_A ) UpperCAmelCase__ : List[Any] = decoder.decode_beams(_A )[0] self.assertEqual(decoded_decoder[0] , decoded_processor.text ) self.assertEqual('''</s> <s> </s>''' , decoded_processor.text ) self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score ) self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score ) @parameterized.expand([[None], ['''fork'''], ['''spawn''']] ) def lowercase_ ( self : Any , _A : str ): '''simple docstring''' UpperCAmelCase__ : Any = self.get_feature_extractor() UpperCAmelCase__ : Tuple = self.get_tokenizer() UpperCAmelCase__ : Tuple = self.get_decoder() UpperCAmelCase__ : Any = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A ) UpperCAmelCase__ : Optional[Any] = self._get_dummy_logits() # note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM. # otherwise, the LM won't be available to the pool's sub-processes. # manual logic used to allow parameterized test for both pool=None and pool=Pool(...) if pool_context is None: UpperCAmelCase__ : Union[str, Any] = processor.batch_decode(_A ) else: with get_context(_A ).Pool() as pool: UpperCAmelCase__ : Union[str, Any] = processor.batch_decode(_A , _A ) UpperCAmelCase__ : str = list(_A ) with get_context('''fork''' ).Pool() as p: UpperCAmelCase__ : Dict = decoder.decode_beams_batch(_A , _A ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = [], [], [] for beams in decoded_beams: texts_decoder.append(beams[0][0] ) logit_scores_decoder.append(beams[0][-2] ) lm_scores_decoder.append(beams[0][-1] ) self.assertListEqual(_A , decoded_processor.text ) self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text ) self.assertListEqual(_A , decoded_processor.logit_score ) self.assertListEqual(_A , decoded_processor.lm_score ) def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : str = self.get_feature_extractor() UpperCAmelCase__ : List[Any] = self.get_tokenizer() UpperCAmelCase__ : int = self.get_decoder() UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A ) UpperCAmelCase__ : str = self._get_dummy_logits() UpperCAmelCase__ : Optional[int] = 15 UpperCAmelCase__ : Dict = -2_0.0 UpperCAmelCase__ : Optional[Any] = -4.0 UpperCAmelCase__ : Union[str, Any] = processor.batch_decode( _A , beam_width=_A , beam_prune_logp=_A , token_min_logp=_A , ) UpperCAmelCase__ : List[Any] = decoded_processor_out.text UpperCAmelCase__ : List[str] = list(_A ) with get_context('''fork''' ).Pool() as pool: UpperCAmelCase__ : Tuple = decoder.decode_beams_batch( _A , _A , beam_width=_A , beam_prune_logp=_A , token_min_logp=_A , ) UpperCAmelCase__ : Optional[int] = [d[0][0] for d in decoded_decoder_out] UpperCAmelCase__ : Optional[Any] = [d[0][2] for d in decoded_decoder_out] UpperCAmelCase__ : Optional[int] = [d[0][3] for d in decoded_decoder_out] self.assertListEqual(_A , _A ) self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , _A ) self.assertTrue(np.array_equal(_A , decoded_processor_out.logit_score ) ) self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] , _A , atol=1e-3 ) ) self.assertTrue(np.array_equal(_A , decoded_processor_out.lm_score ) ) self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] , _A , atol=1e-3 ) ) def lowercase_ ( self : str ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.get_feature_extractor() UpperCAmelCase__ : Optional[Any] = self.get_tokenizer() UpperCAmelCase__ : Dict = self.get_decoder() UpperCAmelCase__ : int = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A ) UpperCAmelCase__ : Optional[int] = self._get_dummy_logits() UpperCAmelCase__ : List[str] = 2.0 UpperCAmelCase__ : Union[str, Any] = 5.0 UpperCAmelCase__ : str = -2_0.0 UpperCAmelCase__ : Optional[int] = True UpperCAmelCase__ : Union[str, Any] = processor.batch_decode( _A , alpha=_A , beta=_A , unk_score_offset=_A , lm_score_boundary=_A , ) UpperCAmelCase__ : Union[str, Any] = decoded_processor_out.text UpperCAmelCase__ : Tuple = list(_A ) decoder.reset_params( alpha=_A , beta=_A , unk_score_offset=_A , lm_score_boundary=_A , ) with get_context('''fork''' ).Pool() as pool: UpperCAmelCase__ : Optional[Any] = decoder.decode_beams_batch( _A , _A , ) UpperCAmelCase__ : str = [d[0][0] for d in decoded_decoder_out] self.assertListEqual(_A , _A ) self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , _A ) UpperCAmelCase__ : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key] self.assertEqual(lm_model.alpha , 2.0 ) self.assertEqual(lm_model.beta , 5.0 ) self.assertEqual(lm_model.unk_score_offset , -2_0.0 ) self.assertEqual(lm_model.score_boundary , _A ) def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase__ : Dict = processor.decoder.model_container[processor.decoder._model_key] UpperCAmelCase__ : Optional[int] = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() UpperCAmelCase__ : Dict = os.listdir(_A ) UpperCAmelCase__ : Optional[Any] = ['''alphabet.json''', '''language_model'''] downloaded_decoder_files.sort() expected_decoder_files.sort() # test that only decoder relevant files from # https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main # are downloaded and none of the rest (e.g. README.md, ...) self.assertListEqual(_A , _A ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : str = snapshot_download('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase__ : Any = WavaVecaProcessorWithLM.from_pretrained(_A ) UpperCAmelCase__ : Optional[int] = processor.decoder.model_container[processor.decoder._model_key] UpperCAmelCase__ : str = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() UpperCAmelCase__ : List[str] = os.listdir(_A ) UpperCAmelCase__ : Any = os.listdir(_A ) local_decoder_files.sort() expected_decoder_files.sort() # test that both decoder form hub and local files in cache are the same self.assertListEqual(_A , _A ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Dict = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase__ : Dict = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase__ : Tuple = floats_list((3, 1_000) ) UpperCAmelCase__ : int = processor_wavaveca(_A , return_tensors='''np''' ) UpperCAmelCase__ : List[str] = processor_auto(_A , return_tensors='''np''' ) for key in input_wavaveca.keys(): self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 ) UpperCAmelCase__ : Tuple = self._get_dummy_logits() UpperCAmelCase__ : List[str] = processor_wavaveca.batch_decode(_A ) UpperCAmelCase__ : int = processor_auto.batch_decode(_A ) self.assertListEqual(decoded_wavaveca.text , decoded_auto.text ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : int = self.get_feature_extractor() UpperCAmelCase__ : int = self.get_tokenizer() UpperCAmelCase__ : Optional[Any] = self.get_decoder() UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A ) self.assertListEqual( processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , ) @staticmethod def lowercase_ ( _A : Dict , _A : str ): '''simple docstring''' UpperCAmelCase__ : int = [d[key] for d in offsets] return retrieved_list def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase__ : str = self._get_dummy_logits()[0] UpperCAmelCase__ : List[str] = processor.decode(_A , output_word_offsets=_A ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(_A , _A ) ) self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Any = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase__ : Dict = self._get_dummy_logits() UpperCAmelCase__ : Dict = processor.batch_decode(_A , output_word_offsets=_A ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(_A , _A ) ) self.assertListEqual( [''' '''.join(self.get_from_offsets(_A , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] ) @slow @require_torch @require_torchaudio def lowercase_ ( self : Optional[Any] ): '''simple docstring''' import torch UpperCAmelCase__ : Any = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=_A ) UpperCAmelCase__ : Dict = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=16_000 ) ) UpperCAmelCase__ : List[Any] = iter(_A ) UpperCAmelCase__ : Optional[Any] = next(_A ) UpperCAmelCase__ : Any = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) UpperCAmelCase__ : int = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) # compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train UpperCAmelCase__ : int = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values with torch.no_grad(): UpperCAmelCase__ : Dict = model(_A ).logits.cpu().numpy() UpperCAmelCase__ : int = processor.decode(logits[0] , output_word_offsets=_A ) UpperCAmelCase__ : Any = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate UpperCAmelCase__ : Any = [ { '''start_time''': d['''start_offset'''] * time_offset, '''end_time''': d['''end_offset'''] * time_offset, '''word''': d['''word'''], } for d in output['''word_offsets'''] ] UpperCAmelCase__ : int = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL''' # output words self.assertEqual(''' '''.join(self.get_from_offsets(_A , '''word''' ) ) , _A ) self.assertEqual(''' '''.join(self.get_from_offsets(_A , '''word''' ) ) , output.text ) # output times UpperCAmelCase__ : List[Any] = torch.tensor(self.get_from_offsets(_A , '''start_time''' ) ) UpperCAmelCase__ : List[str] = torch.tensor(self.get_from_offsets(_A , '''end_time''' ) ) # fmt: off UpperCAmelCase__ : int = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] ) UpperCAmelCase__ : List[str] = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] ) # fmt: on self.assertTrue(torch.allclose(_A , _A , atol=0.0_1 ) ) self.assertTrue(torch.allclose(_A , _A , atol=0.0_1 ) )
299
1
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy UpperCamelCase__ = logging.get_logger(__name__) class lowerCamelCase_ ( __a ): def __init__( self : Any , _A : int , _A : int , _A : float , **_A : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Tuple = feature_size UpperCAmelCase__ : Optional[int] = sampling_rate UpperCAmelCase__ : Tuple = padding_value UpperCAmelCase__ : str = kwargs.pop('''padding_side''' , '''right''' ) UpperCAmelCase__ : List[Any] = kwargs.pop('''return_attention_mask''' , _A ) super().__init__(**_A ) def lowercase_ ( self : Optional[int] , _A : Union[ BatchFeature, List[BatchFeature], Dict[str, BatchFeature], Dict[str, List[BatchFeature]], List[Dict[str, BatchFeature]], ] , _A : Union[bool, str, PaddingStrategy] = True , _A : Optional[int] = None , _A : bool = False , _A : Optional[int] = None , _A : Optional[bool] = None , _A : Optional[Union[str, TensorType]] = None , ): '''simple docstring''' if isinstance(_A , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ): UpperCAmelCase__ : List[Any] = { key: [example[key] for example in processed_features] for key in processed_features[0].keys() } # The model's main input name, usually `input_values`, has be passed for padding if self.model_input_names[0] not in processed_features: raise ValueError( '''You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`''' f""" to this method that includes {self.model_input_names[0]}, but you provided""" f""" {list(processed_features.keys() )}""" ) UpperCAmelCase__ : Optional[Any] = processed_features[self.model_input_names[0]] UpperCAmelCase__ : List[Any] = ( return_attention_mask if return_attention_mask is not None else self.return_attention_mask ) if len(_A ) == 0: if return_attention_mask: UpperCAmelCase__ : Optional[int] = [] return processed_features # If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays # and rebuild them afterwards if no return_tensors is specified # Note that we lose the specific device the tensor may be on for PyTorch UpperCAmelCase__ : Dict = required_input[0] if isinstance(_A , (list, tuple) ): # first_element might be an empty list/tuple in some edge cases so we grab the first non empty element. UpperCAmelCase__ : Dict = 0 while len(required_input[index] ) == 0: index += 1 if index < len(_A ): UpperCAmelCase__ : str = required_input[index][0] if return_tensors is None: if is_tf_tensor(_A ): UpperCAmelCase__ : Tuple = '''tf''' elif is_torch_tensor(_A ): UpperCAmelCase__ : str = '''pt''' elif isinstance(_A , (int, float, list, tuple, np.ndarray) ): UpperCAmelCase__ : Optional[Any] = '''np''' else: raise ValueError( f"""type of {first_element} unknown: {type(_A )}. """ '''Should be one of a python, numpy, pytorch or tensorflow object.''' ) for key, value in processed_features.items(): if isinstance(value[0] , (int, float) ): UpperCAmelCase__ : int = to_numpy(_A ) else: UpperCAmelCase__ : Dict = [to_numpy(_A ) for v in value] # Convert padding_strategy in PaddingStrategy UpperCAmelCase__ : Union[str, Any] = self._get_padding_strategies(padding=_A , max_length=_A ) UpperCAmelCase__ : Optional[int] = processed_features[self.model_input_names[0]] UpperCAmelCase__ : int = len(_A ) if not all(len(_A ) == batch_size for v in processed_features.values() ): raise ValueError('''Some items in the output dictionary have a different batch size than others.''' ) UpperCAmelCase__ : List[str] = [] for i in range(_A ): UpperCAmelCase__ : Union[str, Any] = {k: v[i] for k, v in processed_features.items()} # truncation UpperCAmelCase__ : List[str] = self._truncate( _A , max_length=_A , pad_to_multiple_of=_A , truncation=_A , ) truncated_inputs.append(_A ) if padding_strategy == PaddingStrategy.LONGEST: # make sure that `max_length` cannot be longer than the longest truncated length UpperCAmelCase__ : Optional[int] = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs ) UpperCAmelCase__ : Union[str, Any] = PaddingStrategy.MAX_LENGTH UpperCAmelCase__ : List[Any] = {} for i in range(_A ): # padding UpperCAmelCase__ : Union[str, Any] = self._pad( truncated_inputs[i] , max_length=_A , padding_strategy=_A , pad_to_multiple_of=_A , return_attention_mask=_A , ) for key, value in outputs.items(): if key not in batch_outputs: UpperCAmelCase__ : Any = [] if value.dtype is np.dtype(np.floataa ): UpperCAmelCase__ : Dict = value.astype(np.floataa ) batch_outputs[key].append(_A ) return BatchFeature(_A , tensor_type=_A ) def lowercase_ ( self : Union[str, Any] , _A : Union[Dict[str, np.ndarray], BatchFeature] , _A : Optional[int] = None , _A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , _A : Optional[int] = None , _A : Optional[bool] = None , ): '''simple docstring''' UpperCAmelCase__ : Tuple = processed_features[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: UpperCAmelCase__ : List[Any] = len(_A ) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): UpperCAmelCase__ : Optional[int] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of UpperCAmelCase__ : List[Any] = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(_A ) < max_length if return_attention_mask and "attention_mask" not in processed_features: UpperCAmelCase__ : List[str] = np.ones(len(_A ) , dtype=np.intaa ) if needs_to_be_padded: UpperCAmelCase__ : Tuple = max_length - len(_A ) if self.padding_side == "right": if return_attention_mask: UpperCAmelCase__ : Any = np.pad( processed_features['''attention_mask'''] , (0, difference) ) UpperCAmelCase__ : int = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference) UpperCAmelCase__ : Dict = np.pad( _A , _A , '''constant''' , constant_values=self.padding_value ) elif self.padding_side == "left": if return_attention_mask: UpperCAmelCase__ : int = np.pad( processed_features['''attention_mask'''] , (difference, 0) ) UpperCAmelCase__ : Union[str, Any] = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0) UpperCAmelCase__ : Tuple = np.pad( _A , _A , '''constant''' , constant_values=self.padding_value ) else: raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) ) return processed_features def lowercase_ ( self : List[str] , _A : Union[Dict[str, np.ndarray], BatchFeature] , _A : Optional[int] = None , _A : Optional[int] = None , _A : Optional[bool] = None , ): '''simple docstring''' if not truncation: return processed_features elif truncation and max_length is None: raise ValueError('''When setting ``truncation=True``, make sure that ``max_length`` is defined.''' ) UpperCAmelCase__ : List[str] = processed_features[self.model_input_names[0]] # find `max_length` that fits `pad_to_multiple_of` if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): UpperCAmelCase__ : List[Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of UpperCAmelCase__ : List[Any] = len(_A ) > max_length if needs_to_be_truncated: UpperCAmelCase__ : Tuple = processed_features[self.model_input_names[0]][:max_length] if "attention_mask" in processed_features: UpperCAmelCase__ : Dict = processed_features['''attention_mask'''][:max_length] return processed_features def lowercase_ ( self : List[str] , _A : str=False , _A : Tuple=None ): '''simple docstring''' if padding is not False: if padding is True: UpperCAmelCase__ : List[str] = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch elif not isinstance(_A , _A ): UpperCAmelCase__ : List[str] = PaddingStrategy(_A ) elif isinstance(_A , _A ): UpperCAmelCase__ : List[Any] = padding else: UpperCAmelCase__ : int = PaddingStrategy.DO_NOT_PAD # Set max length if needed if max_length is None: if padding_strategy == PaddingStrategy.MAX_LENGTH: raise ValueError( f"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" ) # Test if we have a padding value if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None): raise ValueError( '''Asking to pad but the feature_extractor does not have a padding value. Please select a value to use''' ''' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.''' ) return padding_strategy
299
'''simple docstring''' # Logistic Regression from scratch # In[62]: # In[63]: # importing all the required libraries import numpy as np from matplotlib import pyplot as plt from sklearn import datasets def a__ ( lowerCAmelCase__ ) -> List[Any]: return 1 / (1 + np.exp(-z )) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple: return (-y * np.log(lowerCAmelCase__ ) - (1 - y) * np.log(1 - h )).mean() def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict: UpperCAmelCase__ : str = np.dot(lowerCAmelCase__ , lowerCAmelCase__ ) return np.sum(y * scores - np.log(1 + np.exp(lowerCAmelCase__ ) ) ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=7_00_00 ) -> List[Any]: UpperCAmelCase__ : Tuple = np.zeros(x.shape[1] ) for iterations in range(lowerCAmelCase__ ): UpperCAmelCase__ : List[Any] = np.dot(lowerCAmelCase__ , lowerCAmelCase__ ) UpperCAmelCase__ : List[str] = sigmoid_function(lowerCAmelCase__ ) UpperCAmelCase__ : int = np.dot(x.T , h - y ) / y.size UpperCAmelCase__ : Optional[int] = theta - alpha * gradient # updating the weights UpperCAmelCase__ : Dict = np.dot(lowerCAmelCase__ , lowerCAmelCase__ ) UpperCAmelCase__ : int = sigmoid_function(lowerCAmelCase__ ) UpperCAmelCase__ : Tuple = cost_function(lowerCAmelCase__ , lowerCAmelCase__ ) if iterations % 1_00 == 0: print(F"""loss: {j} \t""" ) # printing the loss after every 100 iterations return theta # In[68]: if __name__ == "__main__": UpperCamelCase__ = datasets.load_iris() UpperCamelCase__ = iris.data[:, :2] UpperCamelCase__ = (iris.target != 0) * 1 UpperCamelCase__ = 0.1 UpperCamelCase__ = logistic_reg(alpha, x, y, max_iterations=7_0_0_0_0) print('''theta: ''', theta) # printing the theta i.e our weights vector def a__ ( lowerCAmelCase__ ) -> Dict: return sigmoid_function( np.dot(lowerCAmelCase__ , lowerCAmelCase__ ) ) # predicting the value of probability from the logistic regression algorithm plt.figure(figsize=(1_0, 6)) plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''') plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''') ((UpperCamelCase__) , (UpperCamelCase__)) = (x[:, 0].min(), x[:, 0].max()) ((UpperCamelCase__) , (UpperCamelCase__)) = (x[:, 1].min(), x[:, 1].max()) ((UpperCamelCase__) , (UpperCamelCase__)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max)) UpperCamelCase__ = np.c_[xxa.ravel(), xxa.ravel()] UpperCamelCase__ = predict_prob(grid).reshape(xxa.shape) plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''') plt.legend() plt.show()
299
1
'''simple docstring''' import dataclasses import re import string from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple import numpy as np from . import residue_constants UpperCamelCase__ = Mapping[str, np.ndarray] UpperCamelCase__ = Mapping[str, Any] # Is a nested dict. UpperCamelCase__ = 0.01 @dataclasses.dataclass(frozen=__a ) class lowerCamelCase_ : lowerCAmelCase__ = 42 # [num_res, num_atom_type, 3] # Amino-acid type for each residue represented as an integer between 0 and # 20, where 20 is 'X'. lowerCAmelCase__ = 42 # [num_res] # Binary float mask to indicate presence of a particular atom. 1.0 if an atom # is present and 0.0 if not. This should be used for loss masking. lowerCAmelCase__ = 42 # [num_res, num_atom_type] # Residue index as used in PDB. It is not necessarily continuous or 0-indexed. lowerCAmelCase__ = 42 # [num_res] # B-factors, or temperature factors, of each residue (in sq. angstroms units), # representing the displacement of the residue from its ground truth mean # value. lowerCAmelCase__ = 42 # [num_res, num_atom_type] # Chain indices for multi-chain predictions lowerCAmelCase__ = None # Optional remark about the protein. Included as a comment in output PDB # files lowerCAmelCase__ = None # Templates used to generate this protein (prediction-only) lowerCAmelCase__ = None # Chain corresponding to each parent lowerCAmelCase__ = None def a__ ( lowerCAmelCase__ ) -> Protein: UpperCAmelCase__ : str = R'''(\[[A-Z]+\]\n)''' UpperCAmelCase__ : List[str] = [tag.strip() for tag in re.split(lowerCAmelCase__ , lowerCAmelCase__ ) if len(lowerCAmelCase__ ) > 0] UpperCAmelCase__ : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split('''\n''' ) for l in tags[1::2]] ) UpperCAmelCase__ : List[str] = ["N", "CA", "C"] UpperCAmelCase__ : List[Any] = None UpperCAmelCase__ : Optional[Any] = None UpperCAmelCase__ : int = None for g in groups: if "[PRIMARY]" == g[0]: UpperCAmelCase__ : Dict = g[1][0].strip() for i in range(len(lowerCAmelCase__ ) ): if seq[i] not in residue_constants.restypes: UpperCAmelCase__ : Dict = '''X''' # FIXME: strings are immutable UpperCAmelCase__ : Optional[Any] = np.array( [residue_constants.restype_order.get(lowerCAmelCase__ , residue_constants.restype_num ) for res_symbol in seq] ) elif "[TERTIARY]" == g[0]: UpperCAmelCase__ : List[List[float]] = [] for axis in range(3 ): tertiary.append(list(map(lowerCAmelCase__ , g[1][axis].split() ) ) ) UpperCAmelCase__ : Any = np.array(lowerCAmelCase__ ) UpperCAmelCase__ : Any = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa ) for i, atom in enumerate(lowerCAmelCase__ ): UpperCAmelCase__ : Tuple = np.transpose(tertiary_np[:, i::3] ) atom_positions *= PICO_TO_ANGSTROM elif "[MASK]" == g[0]: UpperCAmelCase__ : Optional[Any] = np.array(list(map({'''-''': 0, '''+''': 1}.get , g[1][0].strip() ) ) ) UpperCAmelCase__ : int = np.zeros( ( len(lowerCAmelCase__ ), residue_constants.atom_type_num, ) ).astype(np.floataa ) for i, atom in enumerate(lowerCAmelCase__ ): UpperCAmelCase__ : str = 1 atom_mask *= mask[..., None] assert aatype is not None return Protein( atom_positions=lowerCAmelCase__ , atom_mask=lowerCAmelCase__ , aatype=lowerCAmelCase__ , residue_index=np.arange(len(lowerCAmelCase__ ) ) , b_factors=lowerCAmelCase__ , ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ = 0 ) -> List[str]: UpperCAmelCase__ : List[str] = [] UpperCAmelCase__ : Tuple = prot.remark if remark is not None: pdb_headers.append(F"""REMARK {remark}""" ) UpperCAmelCase__ : Tuple = prot.parents UpperCAmelCase__ : int = prot.parents_chain_index if parents is not None and parents_chain_index is not None: UpperCAmelCase__ : int = [p for i, p in zip(lowerCAmelCase__ , lowerCAmelCase__ ) if i == chain_id] if parents is None or len(lowerCAmelCase__ ) == 0: UpperCAmelCase__ : Dict = ['''N/A'''] pdb_headers.append(F"""PARENT {" ".join(lowerCAmelCase__ )}""" ) return pdb_headers def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str: UpperCAmelCase__ : List[str] = [] UpperCAmelCase__ : Optional[Any] = pdb_str.split('''\n''' ) UpperCAmelCase__ : List[str] = prot.remark if remark is not None: out_pdb_lines.append(F"""REMARK {remark}""" ) UpperCAmelCase__ : List[List[str]] if prot.parents is not None and len(prot.parents ) > 0: UpperCAmelCase__ : Tuple = [] if prot.parents_chain_index is not None: UpperCAmelCase__ : Dict[str, List[str]] = {} for p, i in zip(prot.parents , prot.parents_chain_index ): parent_dict.setdefault(str(lowerCAmelCase__ ) , [] ) parent_dict[str(lowerCAmelCase__ )].append(lowerCAmelCase__ ) UpperCAmelCase__ : Dict = max([int(lowerCAmelCase__ ) for chain_idx in parent_dict] ) for i in range(max_idx + 1 ): UpperCAmelCase__ : List[str] = parent_dict.get(str(lowerCAmelCase__ ) , ['''N/A'''] ) parents_per_chain.append(lowerCAmelCase__ ) else: parents_per_chain.append(list(prot.parents ) ) else: UpperCAmelCase__ : Dict = [['''N/A''']] def make_parent_line(lowerCAmelCase__ ) -> str: return F"""PARENT {" ".join(lowerCAmelCase__ )}""" out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) ) UpperCAmelCase__ : Any = 0 for i, l in enumerate(lowerCAmelCase__ ): if "PARENT" not in l and "REMARK" not in l: out_pdb_lines.append(lowerCAmelCase__ ) if "TER" in l and "END" not in lines[i + 1]: chain_counter += 1 if not chain_counter >= len(lowerCAmelCase__ ): UpperCAmelCase__ : List[str] = parents_per_chain[chain_counter] else: UpperCAmelCase__ : int = ['''N/A'''] out_pdb_lines.append(make_parent_line(lowerCAmelCase__ ) ) return "\n".join(lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ ) -> str: UpperCAmelCase__ : List[Any] = residue_constants.restypes + ['''X'''] def res_atoa(lowerCAmelCase__ ) -> str: return residue_constants.restype_atoa.get(restypes[r] , '''UNK''' ) UpperCAmelCase__ : Optional[Any] = residue_constants.atom_types UpperCAmelCase__ : List[str] = [] UpperCAmelCase__ : Optional[Any] = prot.atom_mask UpperCAmelCase__ : List[str] = prot.aatype UpperCAmelCase__ : Any = prot.atom_positions UpperCAmelCase__ : Tuple = prot.residue_index.astype(np.intaa ) UpperCAmelCase__ : str = prot.b_factors UpperCAmelCase__ : Optional[int] = prot.chain_index if np.any(aatype > residue_constants.restype_num ): raise ValueError('''Invalid aatypes.''' ) UpperCAmelCase__ : List[Any] = get_pdb_headers(lowerCAmelCase__ ) if len(lowerCAmelCase__ ) > 0: pdb_lines.extend(lowerCAmelCase__ ) UpperCAmelCase__ : int = aatype.shape[0] UpperCAmelCase__ : List[str] = 1 UpperCAmelCase__ : Optional[Any] = 0 UpperCAmelCase__ : Dict = string.ascii_uppercase UpperCAmelCase__ : Optional[int] = None # Add all atom sites. for i in range(lowerCAmelCase__ ): UpperCAmelCase__ : Union[str, Any] = res_atoa(aatype[i] ) for atom_name, pos, mask, b_factor in zip(lowerCAmelCase__ , atom_positions[i] , atom_mask[i] , b_factors[i] ): if mask < 0.5: continue UpperCAmelCase__ : Tuple = '''ATOM''' UpperCAmelCase__ : Optional[int] = atom_name if len(lowerCAmelCase__ ) == 4 else F""" {atom_name}""" UpperCAmelCase__ : Tuple = '''''' UpperCAmelCase__ : Tuple = '''''' UpperCAmelCase__ : List[Any] = 1.0_0 UpperCAmelCase__ : Optional[Any] = atom_name[0] # Protein supports only C, N, O, S, this works. UpperCAmelCase__ : str = '''''' UpperCAmelCase__ : Optional[int] = '''A''' if chain_index is not None: UpperCAmelCase__ : int = chain_tags[chain_index[i]] # PDB is a columnar format, every space matters here! UpperCAmelCase__ : Tuple = ( F"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}""" F"""{res_name_a:>3} {chain_tag:>1}""" F"""{residue_index[i]:>4}{insertion_code:>1} """ F"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}""" F"""{occupancy:>6.2f}{b_factor:>6.2f} """ F"""{element:>2}{charge:>2}""" ) pdb_lines.append(lowerCAmelCase__ ) atom_index += 1 UpperCAmelCase__ : List[str] = i == n - 1 if chain_index is not None: if i != n - 1 and chain_index[i + 1] != prev_chain_index: UpperCAmelCase__ : Dict = True UpperCAmelCase__ : int = chain_index[i + 1] if should_terminate: # Close the chain. UpperCAmelCase__ : Optional[int] = '''TER''' UpperCAmelCase__ : Optional[Any] = ( F"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}""" ) pdb_lines.append(lowerCAmelCase__ ) atom_index += 1 if i != n - 1: # "prev" is a misnomer here. This happens at the beginning of # each new chain. pdb_lines.extend(get_pdb_headers(lowerCAmelCase__ , lowerCAmelCase__ ) ) pdb_lines.append('''END''' ) pdb_lines.append('''''' ) return "\n".join(lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ ) -> np.ndarray: return residue_constants.STANDARD_ATOM_MASK[prot.aatype] def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , ) -> Protein: return Protein( aatype=features['''aatype'''] , atom_positions=result['''final_atom_positions'''] , atom_mask=result['''final_atom_mask'''] , residue_index=features['''residue_index'''] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['''final_atom_mask'''] ) , chain_index=lowerCAmelCase__ , remark=lowerCAmelCase__ , parents=lowerCAmelCase__ , parents_chain_index=lowerCAmelCase__ , )
299
'''simple docstring''' from __future__ import annotations import copy import tempfile import unittest from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available from transformers.testing_utils import ( DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tensorflow_probability, require_tf, slow, ) from ..bert.test_modeling_bert import BertModelTester if is_tf_available(): from transformers import ( TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelForTableQuestionAnswering, TFAutoModelForTokenClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFFunnelBaseModel, TFFunnelModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, TFTapasForQuestionAnswering, ) from transformers.models.auto.modeling_tf_auto import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_MAPPING, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCamelCase_ ( __a ): lowerCAmelCase__ = 'new-model' if is_tf_available(): class lowerCamelCase_ ( __a ): lowerCAmelCase__ = NewModelConfig @require_tf class lowerCamelCase_ ( unittest.TestCase ): @slow def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : List[str] = '''bert-base-cased''' UpperCAmelCase__ : int = AutoConfig.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) UpperCAmelCase__ : Dict = TFAutoModel.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) @slow def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : str = '''bert-base-cased''' UpperCAmelCase__ : Any = AutoConfig.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) UpperCAmelCase__ : List[str] = TFAutoModelForPreTraining.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) @slow def lowercase_ ( self : int ): '''simple docstring''' for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : int = AutoConfig.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) UpperCAmelCase__ : str = TFAutoModelForCausalLM.from_pretrained(_A ) UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = TFAutoModelForCausalLM.from_pretrained(_A , output_loading_info=_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) @slow def lowercase_ ( self : List[Any] ): '''simple docstring''' for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : List[Any] = AutoConfig.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) UpperCAmelCase__ : List[Any] = TFAutoModelWithLMHead.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) @slow def lowercase_ ( self : Optional[Any] ): '''simple docstring''' for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : int = AutoConfig.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) UpperCAmelCase__ : List[Any] = TFAutoModelForMaskedLM.from_pretrained(_A ) UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = TFAutoModelForMaskedLM.from_pretrained(_A , output_loading_info=_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) @slow def lowercase_ ( self : Optional[int] ): '''simple docstring''' for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : Optional[Any] = AutoConfig.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) UpperCAmelCase__ : Dict = TFAutoModelForSeqaSeqLM.from_pretrained(_A ) UpperCAmelCase__ , UpperCAmelCase__ : Dict = TFAutoModelForSeqaSeqLM.from_pretrained(_A , output_loading_info=_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) @slow def lowercase_ ( self : Any ): '''simple docstring''' for model_name in ["bert-base-uncased"]: UpperCAmelCase__ : Any = AutoConfig.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) UpperCAmelCase__ : Any = TFAutoModelForSequenceClassification.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) @slow def lowercase_ ( self : Any ): '''simple docstring''' for model_name in ["bert-base-uncased"]: UpperCAmelCase__ : Optional[Any] = AutoConfig.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) UpperCAmelCase__ : Dict = TFAutoModelForQuestionAnswering.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) @slow @require_tensorflow_probability def lowercase_ ( self : Optional[int] ): '''simple docstring''' for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]: UpperCAmelCase__ : List[str] = AutoConfig.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) UpperCAmelCase__ : List[str] = TFAutoModelForTableQuestionAnswering.from_pretrained(_A ) UpperCAmelCase__ , UpperCAmelCase__ : Dict = TFAutoModelForTableQuestionAnswering.from_pretrained( _A , output_loading_info=_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : List[Any] = TFAutoModelWithLMHead.from_pretrained(_A ) self.assertIsInstance(_A , _A ) self.assertEqual(model.num_parameters() , 14_410 ) self.assertEqual(model.num_parameters(only_trainable=_A ) , 14_410 ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = TFAutoModelWithLMHead.from_pretrained(_A ) self.assertIsInstance(_A , _A ) self.assertEqual(model.num_parameters() , 14_410 ) self.assertEqual(model.num_parameters(only_trainable=_A ) , 14_410 ) def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : int = TFAutoModel.from_pretrained('''sgugger/funnel-random-tiny''' ) self.assertIsInstance(_A , _A ) UpperCAmelCase__ : Any = copy.deepcopy(model.config ) UpperCAmelCase__ : Tuple = ['''FunnelBaseModel'''] UpperCAmelCase__ : int = TFAutoModel.from_config(_A ) self.assertIsInstance(_A , _A ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(_A ) UpperCAmelCase__ : str = TFAutoModel.from_pretrained(_A ) self.assertIsInstance(_A , _A ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' try: AutoConfig.register('''new-model''' , _A ) UpperCAmelCase__ : List[Any] = [ TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSequenceClassification, TFAutoModelForTokenClassification, ] for auto_class in auto_classes: with self.subTest(auto_class.__name__ ): # Wrong config class will raise an error with self.assertRaises(_A ): auto_class.register(_A , _A ) auto_class.register(_A , _A ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_A ): auto_class.register(_A , _A ) # Now that the config is registered, it can be used as any other config with the auto-API UpperCAmelCase__ : Tuple = BertModelTester(self ).get_config() UpperCAmelCase__ : str = NewModelConfig(**tiny_config.to_dict() ) UpperCAmelCase__ : str = auto_class.from_config(_A ) self.assertIsInstance(_A , _A ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(_A ) UpperCAmelCase__ : str = auto_class.from_pretrained(_A ) self.assertIsInstance(_A , _A ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"] for mapping in ( TF_MODEL_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, ): if NewModelConfig in mapping._extra_content: del mapping._extra_content[NewModelConfig] def lowercase_ ( self : str ): '''simple docstring''' with self.assertRaisesRegex( _A , '''bert-base is not a local folder and is not a valid model identifier''' ): UpperCAmelCase__ : Dict = TFAutoModel.from_pretrained('''bert-base''' ) def lowercase_ ( self : Tuple ): '''simple docstring''' with self.assertRaisesRegex( _A , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): UpperCAmelCase__ : int = TFAutoModel.from_pretrained(_A , revision='''aaaaaa''' ) def lowercase_ ( self : Tuple ): '''simple docstring''' with self.assertRaisesRegex( _A , '''hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin''' , ): UpperCAmelCase__ : List[Any] = TFAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' with self.assertRaisesRegex(_A , '''Use `from_pt=True` to load this model''' ): UpperCAmelCase__ : int = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' ) def lowercase_ ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : List[str] = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) with RequestCounter() as counter: UpperCAmelCase__ : Union[str, Any] = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 ) # With a sharded checkpoint UpperCAmelCase__ : Optional[Any] = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' ) with RequestCounter() as counter: UpperCAmelCase__ : List[Any] = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
299
1
'''simple docstring''' def a__ ( lowerCAmelCase__ ) -> Any: UpperCAmelCase__ : Optional[Any] = 1 UpperCAmelCase__ : str = 2 while i * i <= n: UpperCAmelCase__ : Optional[Any] = 0 while n % i == 0: n //= i multiplicity += 1 n_divisors *= multiplicity + 1 i += 1 if n > 1: n_divisors *= 2 return n_divisors def a__ ( ) -> Any: UpperCAmelCase__ : Optional[int] = 1 UpperCAmelCase__ : Any = 1 while True: i += 1 t_num += i if count_divisors(lowerCAmelCase__ ) > 5_00: break return t_num if __name__ == "__main__": print(solution())
299
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetaImageProcessor class lowerCamelCase_ ( unittest.TestCase ): def __init__( self : List[str] , _A : List[Any] , _A : Union[str, Any]=7 , _A : List[str]=3 , _A : str=30 , _A : Tuple=400 , _A : Optional[int]=True , _A : List[str]=None , _A : int=True , _A : int=[0.5, 0.5, 0.5] , _A : Optional[int]=[0.5, 0.5, 0.5] , _A : List[Any]=True , _A : str=1 / 255 , _A : Tuple=True , ): '''simple docstring''' UpperCAmelCase__ : str = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1_333} UpperCAmelCase__ : Optional[Any] = parent UpperCAmelCase__ : Optional[Any] = batch_size UpperCAmelCase__ : List[str] = num_channels UpperCAmelCase__ : List[Any] = min_resolution UpperCAmelCase__ : List[str] = max_resolution UpperCAmelCase__ : Tuple = do_resize UpperCAmelCase__ : Union[str, Any] = size UpperCAmelCase__ : Dict = do_normalize UpperCAmelCase__ : Union[str, Any] = image_mean UpperCAmelCase__ : Optional[int] = image_std UpperCAmelCase__ : Dict = do_rescale UpperCAmelCase__ : Union[str, Any] = rescale_factor UpperCAmelCase__ : int = do_pad def lowercase_ ( self : Any ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def lowercase_ ( self : Any , _A : Union[str, Any] , _A : Union[str, Any]=False ): '''simple docstring''' if not batched: UpperCAmelCase__ : Optional[int] = image_inputs[0] if isinstance(_A , Image.Image ): UpperCAmelCase__ , UpperCAmelCase__ : str = image.size else: UpperCAmelCase__ , UpperCAmelCase__ : int = image.shape[1], image.shape[2] if w < h: UpperCAmelCase__ : Optional[Any] = int(self.size['''shortest_edge'''] * h / w ) UpperCAmelCase__ : List[Any] = self.size['''shortest_edge'''] elif w > h: UpperCAmelCase__ : int = self.size['''shortest_edge'''] UpperCAmelCase__ : Dict = int(self.size['''shortest_edge'''] * w / h ) else: UpperCAmelCase__ : List[str] = self.size['''shortest_edge'''] UpperCAmelCase__ : Dict = self.size['''shortest_edge'''] else: UpperCAmelCase__ : int = [] for image in image_inputs: UpperCAmelCase__ , UpperCAmelCase__ : str = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) UpperCAmelCase__ : Union[str, Any] = max(_A , key=lambda _A : item[0] )[0] UpperCAmelCase__ : Union[str, Any] = max(_A , key=lambda _A : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class lowerCamelCase_ ( __a , unittest.TestCase ): lowerCAmelCase__ = DetaImageProcessor if is_vision_available() else None def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : List[Any] = DetaImageProcessingTester(self ) @property def lowercase_ ( self : int ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_A , '''image_mean''' ) ) self.assertTrue(hasattr(_A , '''image_std''' ) ) self.assertTrue(hasattr(_A , '''do_normalize''' ) ) self.assertTrue(hasattr(_A , '''do_resize''' ) ) self.assertTrue(hasattr(_A , '''do_rescale''' ) ) self.assertTrue(hasattr(_A , '''do_pad''' ) ) self.assertTrue(hasattr(_A , '''size''' ) ) def lowercase_ ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1_333} ) self.assertEqual(image_processor.do_pad , _A ) def lowercase_ ( self : Dict ): '''simple docstring''' pass def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A ) for image in image_inputs: self.assertIsInstance(_A , Image.Image ) # Test not batched input UpperCAmelCase__ : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values UpperCAmelCase__ , UpperCAmelCase__ : int = self.image_processor_tester.get_expected_values(_A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase__ , UpperCAmelCase__ : str = self.image_processor_tester.get_expected_values(_A , batched=_A ) UpperCAmelCase__ : Union[str, Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A ) for image in image_inputs: self.assertIsInstance(_A , np.ndarray ) # Test not batched input UpperCAmelCase__ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.image_processor_tester.get_expected_values(_A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase__ : List[str] = image_processing(_A , return_tensors='''pt''' ).pixel_values UpperCAmelCase__ , UpperCAmelCase__ : int = self.image_processor_tester.get_expected_values(_A , batched=_A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A ) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor ) # Test not batched input UpperCAmelCase__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.image_processor_tester.get_expected_values(_A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase__ : List[Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values UpperCAmelCase__ , UpperCAmelCase__ : Any = self.image_processor_tester.get_expected_values(_A , batched=_A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def lowercase_ ( self : str ): '''simple docstring''' UpperCAmelCase__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f: UpperCAmelCase__ : str = json.loads(f.read() ) UpperCAmelCase__ : Tuple = {'''image_id''': 39_769, '''annotations''': target} # encode them UpperCAmelCase__ : Optional[int] = DetaImageProcessor() UpperCAmelCase__ : str = image_processing(images=_A , annotations=_A , return_tensors='''pt''' ) # verify pixel values UpperCAmelCase__ : Optional[int] = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding['''pixel_values'''].shape , _A ) UpperCAmelCase__ : Any = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _A , atol=1e-4 ) ) # verify area UpperCAmelCase__ : List[Any] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _A ) ) # verify boxes UpperCAmelCase__ : int = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _A ) UpperCAmelCase__ : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _A , atol=1e-3 ) ) # verify image_id UpperCAmelCase__ : str = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _A ) ) # verify is_crowd UpperCAmelCase__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _A ) ) # verify class_labels UpperCAmelCase__ : Union[str, Any] = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _A ) ) # verify orig_size UpperCAmelCase__ : int = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _A ) ) # verify size UpperCAmelCase__ : int = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _A ) ) @slow def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f: UpperCAmelCase__ : int = json.loads(f.read() ) UpperCAmelCase__ : str = {'''file_name''': '''000000039769.png''', '''image_id''': 39_769, '''segments_info''': target} UpperCAmelCase__ : Dict = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' ) # encode them UpperCAmelCase__ : Any = DetaImageProcessor(format='''coco_panoptic''' ) UpperCAmelCase__ : str = image_processing(images=_A , annotations=_A , masks_path=_A , return_tensors='''pt''' ) # verify pixel values UpperCAmelCase__ : str = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding['''pixel_values'''].shape , _A ) UpperCAmelCase__ : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _A , atol=1e-4 ) ) # verify area UpperCAmelCase__ : Any = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _A ) ) # verify boxes UpperCAmelCase__ : Dict = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _A ) UpperCAmelCase__ : List[str] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _A , atol=1e-3 ) ) # verify image_id UpperCAmelCase__ : Optional[int] = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _A ) ) # verify is_crowd UpperCAmelCase__ : Any = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _A ) ) # verify class_labels UpperCAmelCase__ : Tuple = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _A ) ) # verify masks UpperCAmelCase__ : Dict = 822_873 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , _A ) # verify orig_size UpperCAmelCase__ : str = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _A ) ) # verify size UpperCAmelCase__ : Optional[Any] = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _A ) )
299
1
'''simple docstring''' def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> int: while a != 0: UpperCAmelCase__ , UpperCAmelCase__ : int = b % a, a return b def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> int: if gcd(lowerCAmelCase__ , lowerCAmelCase__ ) != 1: UpperCAmelCase__ : List[str] = F"""mod inverse of {a!r} and {m!r} does not exist""" raise ValueError(lowerCAmelCase__ ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = 1, 0, a UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = 0, 1, m while va != 0: UpperCAmelCase__ : Tuple = ua // va UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va return ua % m
299
'''simple docstring''' from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def a__ ( lowerCAmelCase__ ) -> None: UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = analyze_text(lowerCAmelCase__ ) UpperCAmelCase__ : List[Any] = list(''' ''' + ascii_lowercase ) # what is our total sum of probabilities. UpperCAmelCase__ : str = sum(single_char_strings.values() ) # one length string UpperCAmelCase__ : int = 0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: UpperCAmelCase__ : Optional[int] = single_char_strings[ch] UpperCAmelCase__ : int = my_str / all_sum my_fir_sum += prob * math.loga(lowerCAmelCase__ ) # entropy formula. # print entropy print(F"""{round(-1 * my_fir_sum ):.1f}""" ) # two len string UpperCAmelCase__ : str = sum(two_char_strings.values() ) UpperCAmelCase__ : Optional[Any] = 0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: UpperCAmelCase__ : Optional[int] = cha + cha if sequence in two_char_strings: UpperCAmelCase__ : Dict = two_char_strings[sequence] UpperCAmelCase__ : Optional[int] = int(lowerCAmelCase__ ) / all_sum my_sec_sum += prob * math.loga(lowerCAmelCase__ ) # print second entropy print(F"""{round(-1 * my_sec_sum ):.1f}""" ) # print the difference between them print(F"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" ) def a__ ( lowerCAmelCase__ ) -> tuple[dict, dict]: UpperCAmelCase__ : Union[str, Any] = Counter() # type: ignore UpperCAmelCase__ : Tuple = Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0 , len(lowerCAmelCase__ ) - 1 ): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def a__ ( ) -> Tuple: import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
299
1
'''simple docstring''' import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import ( AutoProcessor, BertTokenizerFast, BlipImageProcessor, GPTaTokenizer, InstructBlipProcessor, PreTrainedTokenizerFast, ) @require_vision class lowerCamelCase_ ( unittest.TestCase ): def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = tempfile.mkdtemp() UpperCAmelCase__ : Any = BlipImageProcessor() UpperCAmelCase__ : Optional[int] = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' ) UpperCAmelCase__ : Tuple = BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) UpperCAmelCase__ : Optional[Any] = InstructBlipProcessor(_A , _A , _A ) processor.save_pretrained(self.tmpdirname ) def lowercase_ ( self : List[Any] , **_A : List[Any] ): '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname , **_A ).tokenizer def lowercase_ ( self : Optional[Any] , **_A : Tuple ): '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname , **_A ).image_processor def lowercase_ ( self : Dict , **_A : Any ): '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname , **_A ).qformer_tokenizer def lowercase_ ( self : Optional[int] ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Tuple = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] UpperCAmelCase__ : Dict = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : str = InstructBlipProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase__ : Any = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) UpperCAmelCase__ : Optional[int] = self.get_image_processor(do_normalize=_A , padding_value=1.0 ) UpperCAmelCase__ : Optional[Any] = InstructBlipProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_A , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , _A ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _A ) self.assertIsInstance(processor.qformer_tokenizer , _A ) def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : List[str] = self.get_image_processor() UpperCAmelCase__ : Union[str, Any] = self.get_tokenizer() UpperCAmelCase__ : List[str] = self.get_qformer_tokenizer() UpperCAmelCase__ : Tuple = InstructBlipProcessor( tokenizer=_A , image_processor=_A , qformer_tokenizer=_A ) UpperCAmelCase__ : Tuple = self.prepare_image_inputs() UpperCAmelCase__ : Optional[int] = image_processor(_A , return_tensors='''np''' ) UpperCAmelCase__ : Optional[Any] = processor(images=_A , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : int = self.get_image_processor() UpperCAmelCase__ : Tuple = self.get_tokenizer() UpperCAmelCase__ : List[Any] = self.get_qformer_tokenizer() UpperCAmelCase__ : Optional[Any] = InstructBlipProcessor( tokenizer=_A , image_processor=_A , qformer_tokenizer=_A ) UpperCAmelCase__ : Any = '''lower newer''' UpperCAmelCase__ : Optional[Any] = processor(text=_A ) UpperCAmelCase__ : List[Any] = tokenizer(_A , return_token_type_ids=_A ) UpperCAmelCase__ : Optional[Any] = qformer_tokenizer(_A , return_token_type_ids=_A ) for key in encoded_tokens.keys(): self.assertListEqual(encoded_tokens[key] , encoded_processor[key] ) for key in encoded_tokens_qformer.keys(): self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['''qformer_''' + key] ) def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : int = self.get_image_processor() UpperCAmelCase__ : List[Any] = self.get_tokenizer() UpperCAmelCase__ : Union[str, Any] = self.get_qformer_tokenizer() UpperCAmelCase__ : Optional[Any] = InstructBlipProcessor( tokenizer=_A , image_processor=_A , qformer_tokenizer=_A ) UpperCAmelCase__ : Dict = '''lower newer''' UpperCAmelCase__ : Dict = self.prepare_image_inputs() UpperCAmelCase__ : List[str] = processor(text=_A , images=_A ) self.assertListEqual( list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , ) # test if it raises when no input is passed with pytest.raises(_A ): processor() def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.get_image_processor() UpperCAmelCase__ : Any = self.get_tokenizer() UpperCAmelCase__ : Dict = self.get_qformer_tokenizer() UpperCAmelCase__ : Dict = InstructBlipProcessor( tokenizer=_A , image_processor=_A , qformer_tokenizer=_A ) UpperCAmelCase__ : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] UpperCAmelCase__ : str = processor.batch_decode(_A ) UpperCAmelCase__ : Optional[int] = tokenizer.batch_decode(_A ) self.assertListEqual(_A , _A ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.get_image_processor() UpperCAmelCase__ : Union[str, Any] = self.get_tokenizer() UpperCAmelCase__ : List[Any] = self.get_qformer_tokenizer() UpperCAmelCase__ : str = InstructBlipProcessor( tokenizer=_A , image_processor=_A , qformer_tokenizer=_A ) UpperCAmelCase__ : Union[str, Any] = '''lower newer''' UpperCAmelCase__ : List[str] = self.prepare_image_inputs() UpperCAmelCase__ : int = processor(text=_A , images=_A ) self.assertListEqual( list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
299
'''simple docstring''' from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_config_file''': '''tokenizer_config.json''', } UpperCamelCase__ = { '''vocab_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json''' }, '''merges_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt''' }, '''tokenizer_config_file''': { '''facebook/blenderbot_small-90M''': ( '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json''' ) }, } UpperCamelCase__ = { '''facebook/blenderbot_small-90M''': 5_1_2, } class lowerCamelCase_ ( __a ): lowerCAmelCase__ = VOCAB_FILES_NAMES lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase__ = BlenderbotSmallTokenizer def __init__( self : List[Any] , _A : List[Any]=None , _A : Optional[Any]=None , _A : Optional[int]="<|endoftext|>" , _A : List[str]="<|endoftext|>" , _A : List[str]="<|endoftext|>" , _A : Any=False , _A : Union[str, Any]=True , **_A : Optional[int] , ): '''simple docstring''' super().__init__( ByteLevelBPETokenizer( vocab=_A , merges=_A , add_prefix_space=_A , trim_offsets=_A , ) , bos_token=_A , eos_token=_A , unk_token=_A , **_A , ) UpperCAmelCase__ : List[Any] = add_prefix_space def lowercase_ ( self : str , _A : Any , _A : Any=None ): '''simple docstring''' UpperCAmelCase__ : Dict = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def lowercase_ ( self : Optional[int] , _A : List[int] , _A : Optional[List[int]] = None ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = [self.sep_token_id] UpperCAmelCase__ : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
299
1
'''simple docstring''' import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import MaMaaaTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from transformers.utils import is_sentencepiece_available if is_sentencepiece_available(): from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin if is_sentencepiece_available(): UpperCamelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''') if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right UpperCamelCase__ = 1_2_8_0_2_2 UpperCamelCase__ = 1_2_8_0_2_8 @require_sentencepiece class lowerCamelCase_ ( __a , unittest.TestCase ): lowerCAmelCase__ = MaMaaaTokenizer lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = True def lowercase_ ( self : Tuple ): '''simple docstring''' super().setUp() UpperCAmelCase__ : Union[str, Any] = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>'''] UpperCAmelCase__ : str = dict(zip(_A , range(len(_A ) ) ) ) UpperCAmelCase__ : Union[str, Any] = Path(self.tmpdirname ) save_json(_A , save_dir / VOCAB_FILES_NAMES['''vocab_file'''] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(_A , save_dir / VOCAB_FILES_NAMES['''spm_file'''] ) UpperCAmelCase__ : int = MaMaaaTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def lowercase_ ( self : List[Any] , **_A : Union[str, Any] ): '''simple docstring''' return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **_A ) def lowercase_ ( self : List[str] , _A : Any ): '''simple docstring''' return ( "This is a test", "This is a test", ) def lowercase_ ( self : str ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = '''</s>''' UpperCAmelCase__ : Any = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A ) def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Any = self.get_tokenizer() UpperCAmelCase__ : List[Any] = list(tokenizer.get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''</s>''' ) self.assertEqual(vocab_keys[1] , '''<unk>''' ) self.assertEqual(vocab_keys[-1] , '''<s>''' ) self.assertEqual(len(_A ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) ) @unittest.skip('''Skip this test while all models are still to be uploaded.''' ) def lowercase_ ( self : Tuple ): '''simple docstring''' pass def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.get_tokenizer() UpperCAmelCase__ : int = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_A ) , [2, 3, 4, 5, 6] , ) UpperCAmelCase__ : List[str] = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] ) self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) UpperCAmelCase__ : List[str] = tokenizer.convert_tokens_to_string(_A ) self.assertEqual(_A , '''This is a test''' ) @slow def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : int = {'''input_ids''': [[128_022, 110_108, 397, 11, 38_272, 2_247, 124_811, 285, 18_105, 1_586, 207, 7, 39_534, 4_428, 397, 1_019, 18_105, 1_586, 207, 7, 41_337, 16_786, 241, 7, 20_214, 17, 125_690, 10_398, 7, 44_378, 58_069, 68_342, 7_798, 7_343, 11, 299, 33_310, 4, 158, 37_350, 94_077, 4_569, 299, 33_310, 90, 4, 52_840, 290, 4, 31_270, 112, 299, 682, 4, 52_840, 39_953, 14_079, 193, 52_519, 90_894, 17_894, 120_697, 11, 40_445, 551, 17, 1_019, 52_519, 90_894, 17_756, 963, 11, 40_445, 480, 17, 9_792, 1_120, 5_173, 1_393, 6_240, 16_786, 241, 120_996, 28, 1_245, 1_393, 118_240, 11_123, 1_019, 93_612, 2_691, 10_618, 98_058, 120_409, 1_928, 279, 4, 40_683, 367, 178, 207, 1_019, 103, 103_121, 506, 65_296, 5, 2], [128_022, 21_217, 367, 117, 125_450, 128, 719, 7, 7_308, 40, 93_612, 12_669, 1_116, 16_704, 71, 17_785, 3_699, 15_592, 35, 144, 9_584, 241, 11_943, 713, 950, 799, 2_247, 88_427, 150, 149, 118_813, 120_706, 1_019, 106_906, 81_518, 28, 1_224, 22_799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128_022, 1_658, 123_311, 5_155, 5_578, 4_722, 279, 14_947, 2_366, 1_120, 1_197, 14, 1_348, 9_232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_A , model_name='''facebook/m2m100_418M''' , revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' , ) @require_torch @require_sentencepiece @require_tokenizers class lowerCamelCase_ ( unittest.TestCase ): lowerCAmelCase__ = 'facebook/m2m100_418M' lowerCAmelCase__ = [ 'In my opinion, there are two levels of response from the French government.', 'NSA Affair Emphasizes Complete Lack of Debate on Intelligence', ] lowerCAmelCase__ = [ 'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.', 'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement', ] # fmt: off lowerCAmelCase__ = [EN_CODE, 5_9_3, 1_9_4_9, 1_1_5_7_8_1, 4, 7_1_5_8_6, 4_2_3_4, 6_0_6_3_3, 1_2_6_2_3_3, 4_3_2, 1_2_3_8_0_8, 1_5_5_9_2, 1_1_9_7, 1_1_7_1_3_2, 1_2_0_6_1_8, 5, 2] @classmethod def lowercase_ ( cls : int ): '''simple docstring''' UpperCAmelCase__ : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang='''en''' , tgt_lang='''fr''' ) UpperCAmelCase__ : Dict = 1 return cls def lowercase_ ( self : Tuple ): '''simple docstring''' self.assertEqual(self.tokenizer.get_lang_id('''ar''' ) , 128_006 ) self.assertEqual(self.tokenizer.get_lang_id('''en''' ) , 128_022 ) self.assertEqual(self.tokenizer.get_lang_id('''ro''' ) , 128_076 ) self.assertEqual(self.tokenizer.get_lang_id('''mr''' ) , 128_063 ) def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : str = self.tokenizer.get_vocab() self.assertEqual(len(_A ) , self.tokenizer.vocab_size ) self.assertEqual(vocab['''<unk>'''] , 3 ) self.assertIn(self.tokenizer.get_lang_token('''en''' ) , _A ) def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = '''en''' UpperCAmelCase__ : List[str] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , _A ) def lowercase_ ( self : str ): '''simple docstring''' self.assertIn(_A , self.tokenizer.all_special_ids ) # fmt: off UpperCAmelCase__ : Dict = [FR_CODE, 5_364, 82, 8_642, 4, 294, 47, 8, 14_028, 136, 3_286, 9_706, 6, 90_797, 6, 144_012, 162, 88_128, 30_061, 5, 2] # fmt: on UpperCAmelCase__ : str = self.tokenizer.decode(_A , skip_special_tokens=_A ) UpperCAmelCase__ : Optional[int] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_A ) self.assertEqual(_A , _A ) self.assertNotIn(self.tokenizer.eos_token , _A ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = tempfile.mkdtemp() UpperCAmelCase__ : Union[str, Any] = self.tokenizer.lang_token_to_id self.tokenizer.save_pretrained(_A ) UpperCAmelCase__ : Any = MaMaaaTokenizer.from_pretrained(_A ) self.assertDictEqual(new_tok.lang_token_to_id , _A ) @require_torch def lowercase_ ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = '''en''' UpperCAmelCase__ : Optional[int] = '''fr''' UpperCAmelCase__ : str = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_A , return_tensors='''pt''' ) UpperCAmelCase__ : Union[str, Any] = shift_tokens_right( batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id ) for k in batch: UpperCAmelCase__ : int = batch[k].tolist() # batch = {k: v.tolist() for k,v in batch.items()} # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 # batch.decoder_inputs_ids[0][0] == assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == FR_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2] == [2, FR_CODE] @require_torch def lowercase_ ( self : str ): '''simple docstring''' UpperCAmelCase__ : int = '''mr''' self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) UpperCAmelCase__ : Optional[int] = '''zh''' self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) @require_torch def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = '''mr''' self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) UpperCAmelCase__ : Optional[Any] = '''zh''' self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) @require_torch def lowercase_ ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : Any = self.tokenizer._build_translation_inputs('''A test''' , return_tensors='''pt''' , src_lang='''en''' , tgt_lang='''ar''' ) self.assertEqual( nested_simplify(_A ) , { # en_XX, A, test, EOS '''input_ids''': [[128_022, 58, 4_183, 2]], '''attention_mask''': [[1, 1, 1, 1]], # ar_AR '''forced_bos_token_id''': 128_006, } , )
299
'''simple docstring''' import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece @require_tokenizers class lowerCamelCase_ ( __a , unittest.TestCase ): lowerCAmelCase__ = XLMRobertaTokenizer lowerCAmelCase__ = XLMRobertaTokenizerFast lowerCAmelCase__ = True lowerCAmelCase__ = True def lowercase_ ( self : Dict ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing UpperCAmelCase__ : Union[str, Any] = XLMRobertaTokenizer(_A , keep_accents=_A ) tokenizer.save_pretrained(self.tmpdirname ) def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = '''<pad>''' UpperCAmelCase__ : Dict = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A ) def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(vocab_keys[-1] , '''<mask>''' ) self.assertEqual(len(_A ) , 1_002 ) def lowercase_ ( self : int ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1_002 ) def lowercase_ ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : str = XLMRobertaTokenizer(_A , keep_accents=_A ) UpperCAmelCase__ : int = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) UpperCAmelCase__ : Union[str, Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( _A , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) UpperCAmelCase__ : Dict = tokenizer.convert_tokens_to_ids(_A ) self.assertListEqual( _A , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) UpperCAmelCase__ : Optional[int] = tokenizer.convert_ids_to_tokens(_A ) self.assertListEqual( _A , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) def lowercase_ ( self : str ): '''simple docstring''' if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return UpperCAmelCase__ : List[str] = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): UpperCAmelCase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(_A , **_A ) UpperCAmelCase__ : Optional[int] = self.tokenizer_class.from_pretrained(_A , **_A ) UpperCAmelCase__ : List[str] = tempfile.mkdtemp() UpperCAmelCase__ : Any = tokenizer_r.save_pretrained(_A ) UpperCAmelCase__ : Tuple = tokenizer_p.save_pretrained(_A ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) UpperCAmelCase__ : Optional[int] = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f ) self.assertSequenceEqual(_A , _A ) # Checks everything loads correctly in the same way UpperCAmelCase__ : Any = tokenizer_r.from_pretrained(_A ) UpperCAmelCase__ : Dict = tokenizer_p.from_pretrained(_A ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_A , _A ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(_A ) # Save tokenizer rust, legacy_format=True UpperCAmelCase__ : Union[str, Any] = tempfile.mkdtemp() UpperCAmelCase__ : Union[str, Any] = tokenizer_r.save_pretrained(_A , legacy_format=_A ) UpperCAmelCase__ : List[str] = tokenizer_p.save_pretrained(_A ) # Checks it save with the same files self.assertSequenceEqual(_A , _A ) # Checks everything loads correctly in the same way UpperCAmelCase__ : List[str] = tokenizer_r.from_pretrained(_A ) UpperCAmelCase__ : List[str] = tokenizer_p.from_pretrained(_A ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_A , _A ) ) shutil.rmtree(_A ) # Save tokenizer rust, legacy_format=False UpperCAmelCase__ : Union[str, Any] = tempfile.mkdtemp() UpperCAmelCase__ : Dict = tokenizer_r.save_pretrained(_A , legacy_format=_A ) UpperCAmelCase__ : str = tokenizer_p.save_pretrained(_A ) # Checks it saved the tokenizer.json file self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way UpperCAmelCase__ : Union[str, Any] = tokenizer_r.from_pretrained(_A ) UpperCAmelCase__ : Optional[Any] = tokenizer_p.from_pretrained(_A ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_A , _A ) ) shutil.rmtree(_A ) @cached_property def lowercase_ ( self : Optional[Any] ): '''simple docstring''' return XLMRobertaTokenizer.from_pretrained('''xlm-roberta-base''' ) def lowercase_ ( self : Any ): '''simple docstring''' with tempfile.NamedTemporaryFile() as f: shutil.copyfile(_A , f.name ) UpperCAmelCase__ : int = XLMRobertaTokenizer(f.name , keep_accents=_A ) UpperCAmelCase__ : str = pickle.dumps(_A ) pickle.loads(_A ) def lowercase_ ( self : int ): '''simple docstring''' if not self.test_rust_tokenizer: return UpperCAmelCase__ : Optional[Any] = self.get_tokenizer() UpperCAmelCase__ : Union[str, Any] = self.get_rust_tokenizer() UpperCAmelCase__ : Dict = '''I was born in 92000, and this is falsé.''' UpperCAmelCase__ : Dict = tokenizer.tokenize(_A ) UpperCAmelCase__ : List[Any] = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) UpperCAmelCase__ : int = tokenizer.encode(_A , add_special_tokens=_A ) UpperCAmelCase__ : Optional[Any] = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) UpperCAmelCase__ : Any = self.get_rust_tokenizer() UpperCAmelCase__ : List[Any] = tokenizer.encode(_A ) UpperCAmelCase__ : Union[str, Any] = rust_tokenizer.encode(_A ) self.assertListEqual(_A , _A ) @slow def lowercase_ ( self : str ): '''simple docstring''' UpperCAmelCase__ : str = '''Hello World!''' UpperCAmelCase__ : Tuple = [0, 35_378, 6_661, 38, 2] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(_A , self.big_tokenizer.encode(_A ) ) @slow def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : List[str] = ( '''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will''' ''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth''' ) UpperCAmelCase__ : Any = [ 0, 3_293, 83, 10, 4_552, 4_989, 7_986, 678, 10, 5_915, 111, 179_459, 124_850, 4, 6_044, 237, 12, 6, 5, 6, 4, 6_780, 705, 15, 1_388, 44, 378, 10_114, 711, 152, 20, 6, 5, 22_376, 642, 1_221, 15_190, 34_153, 450, 5_608, 959, 1_119, 57_702, 136, 186, 47, 1_098, 29_367, 47, # 4426, # What fairseq tokenizes from "<unk>": "_<" # 3678, # What fairseq tokenizes from "<unk>": "unk" # 2740, # What fairseq tokenizes from "<unk>": ">" 3, # What we tokenize from "<unk>": "<unk>" 6, # Residue from the tokenization: an extra sentencepiece underline 4, 6_044, 237, 6_284, 50_901, 528, 31, 90, 34, 927, 2, ] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(_A , self.big_tokenizer.encode(_A ) ) @slow def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : int = {'''input_ids''': [[0, 11_062, 82_772, 7, 15, 82_772, 538, 51_529, 237, 17_198, 1_290, 206, 9, 215_175, 1_314, 136, 17_198, 1_290, 206, 9, 56_359, 42, 122_009, 9, 16_466, 16, 87_344, 4_537, 9, 4_717, 78_381, 6, 159_958, 7, 15, 24_480, 618, 4, 527, 22_693, 5_428, 4, 2_777, 24_480, 9_874, 4, 43_523, 594, 4, 803, 18_392, 33_189, 18, 4, 43_523, 24_447, 12_399, 100, 24_955, 83_658, 9_626, 144_057, 15, 839, 22_335, 16, 136, 24_955, 83_658, 83_479, 15, 39_102, 724, 16, 678, 645, 2_789, 1_328, 4_589, 42, 122_009, 115_774, 23, 805, 1_328, 46_876, 7, 136, 53_894, 1_940, 42_227, 41_159, 17_721, 823, 425, 4, 27_512, 98_722, 206, 136, 5_531, 4_970, 919, 17_336, 5, 2], [0, 20_080, 618, 83, 82_775, 47, 479, 9, 1_517, 73, 53_894, 333, 80_581, 110_117, 18_811, 5_256, 1_295, 51, 152_526, 297, 7_986, 390, 124_416, 538, 35_431, 214, 98, 15_044, 25_737, 136, 7_108, 43_701, 23, 756, 135_355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63_773, 119_455, 6, 147_797, 88_203, 7, 645, 70, 21, 3_285, 10_269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_A , model_name='''xlm-roberta-base''' , revision='''d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3''' , )
299
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_beit import BeitImageProcessor UpperCamelCase__ = logging.get_logger(__name__) class lowerCamelCase_ ( __a ): def __init__( self : int , *_A : Optional[Any] , **_A : Optional[int] ): '''simple docstring''' warnings.warn( '''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use BeitImageProcessor instead.''' , _A , ) super().__init__(*_A , **_A )
299
'''simple docstring''' from __future__ import annotations import math def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int: if depth < 0: raise ValueError('''Depth cannot be less than 0''' ) if not scores: raise ValueError('''Scores cannot be empty''' ) if depth == height: return scores[node_index] return ( max( minimax(depth + 1 , node_index * 2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , ) if is_max else min( minimax(depth + 1 , node_index * 2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , ) ) def a__ ( ) -> None: UpperCAmelCase__ : Union[str, Any] = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23] UpperCAmelCase__ : Optional[Any] = math.log(len(lowerCAmelCase__ ) , 2 ) print(F"""Optimal value : {minimax(0 , 0 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )}""" ) if __name__ == "__main__": import doctest doctest.testmod() main()
299
1
'''simple docstring''' from abc import ABC, abstractmethod from typing import Optional, Union from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit from ..utils.typing import NestedDataStructureLike, PathLike class lowerCamelCase_ ( __a ): def __init__( self : Optional[Any] , _A : Optional[NestedDataStructureLike[PathLike]] = None , _A : Optional[NamedSplit] = None , _A : Optional[Features] = None , _A : str = None , _A : bool = False , _A : bool = False , _A : Optional[int] = None , **_A : Optional[Any] , ): '''simple docstring''' UpperCAmelCase__ : List[str] = path_or_paths UpperCAmelCase__ : str = split if split or isinstance(_A , _A ) else '''train''' UpperCAmelCase__ : Tuple = features UpperCAmelCase__ : str = cache_dir UpperCAmelCase__ : List[Any] = keep_in_memory UpperCAmelCase__ : Dict = streaming UpperCAmelCase__ : Optional[Any] = num_proc UpperCAmelCase__ : str = kwargs @abstractmethod def lowercase_ ( self : Tuple ): '''simple docstring''' pass class lowerCamelCase_ ( __a ): def __init__( self : List[str] , _A : Optional[Features] = None , _A : str = None , _A : bool = False , _A : bool = False , _A : Optional[int] = None , **_A : str , ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = features UpperCAmelCase__ : Optional[Any] = cache_dir UpperCAmelCase__ : Union[str, Any] = keep_in_memory UpperCAmelCase__ : Union[str, Any] = streaming UpperCAmelCase__ : Optional[Any] = num_proc UpperCAmelCase__ : Optional[int] = kwargs @abstractmethod def lowercase_ ( self : List[str] ): '''simple docstring''' pass
299
'''simple docstring''' class lowerCamelCase_ : def __init__( self : Union[str, Any] , _A : int ): '''simple docstring''' UpperCAmelCase__ : str = n UpperCAmelCase__ : Union[str, Any] = [None] * self.n UpperCAmelCase__ : Tuple = 0 # index of the first element UpperCAmelCase__ : int = 0 UpperCAmelCase__ : int = 0 def __len__( self : Optional[Any] ): '''simple docstring''' return self.size def lowercase_ ( self : Dict ): '''simple docstring''' return self.size == 0 def lowercase_ ( self : List[str] ): '''simple docstring''' return False if self.is_empty() else self.array[self.front] def lowercase_ ( self : List[Any] , _A : int ): '''simple docstring''' if self.size >= self.n: raise Exception('''QUEUE IS FULL''' ) UpperCAmelCase__ : str = data UpperCAmelCase__ : Optional[Any] = (self.rear + 1) % self.n self.size += 1 return self def lowercase_ ( self : List[Any] ): '''simple docstring''' if self.size == 0: raise Exception('''UNDERFLOW''' ) UpperCAmelCase__ : Any = self.array[self.front] UpperCAmelCase__ : List[Any] = None UpperCAmelCase__ : Tuple = (self.front + 1) % self.n self.size -= 1 return temp
299
1
'''simple docstring''' from ..utils import DummyObject, requires_backends class lowerCamelCase_ ( metaclass=__a ): lowerCAmelCase__ = ['torch', 'scipy'] def __init__( self : List[Any] , *_A : Optional[Any] , **_A : Tuple ): '''simple docstring''' requires_backends(self , ['''torch''', '''scipy'''] ) @classmethod def lowercase_ ( cls : Dict , *_A : Tuple , **_A : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ['''torch''', '''scipy'''] ) @classmethod def lowercase_ ( cls : List[Any] , *_A : Tuple , **_A : Dict ): '''simple docstring''' requires_backends(cls , ['''torch''', '''scipy'''] )
299
'''simple docstring''' def a__ ( lowerCAmelCase__ ) -> Optional[Any]: UpperCAmelCase__ : Optional[Any] = len(lowerCAmelCase__ ) for i in range(length - 1 ): UpperCAmelCase__ : Optional[Any] = i for k in range(i + 1 , lowerCAmelCase__ ): if collection[k] < collection[least]: UpperCAmelCase__ : Dict = k if least != i: UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = (collection[i], collection[least]) return collection if __name__ == "__main__": UpperCamelCase__ = input('''Enter numbers separated by a comma:\n''').strip() UpperCamelCase__ = [int(item) for item in user_input.split(''',''')] print(selection_sort(unsorted))
299
1
'''simple docstring''' from __future__ import annotations import time from math import sqrt # 1 for manhattan, 0 for euclidean UpperCamelCase__ = 0 UpperCamelCase__ = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] UpperCamelCase__ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right UpperCamelCase__ = tuple[int, int] class lowerCamelCase_ : def __init__( self : int , _A : int , _A : int , _A : int , _A : int , _A : int , _A : Node | None , ): '''simple docstring''' UpperCAmelCase__ : List[str] = pos_x UpperCAmelCase__ : Dict = pos_y UpperCAmelCase__ : List[str] = (pos_y, pos_x) UpperCAmelCase__ : List[str] = goal_x UpperCAmelCase__ : List[Any] = goal_y UpperCAmelCase__ : int = g_cost UpperCAmelCase__ : str = parent UpperCAmelCase__ : Optional[Any] = self.calculate_heuristic() UpperCAmelCase__ : str = self.g_cost + self.h_cost def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Tuple = self.pos_x - self.goal_x UpperCAmelCase__ : Any = self.pos_y - self.goal_y if HEURISTIC == 1: return abs(_A ) + abs(_A ) else: return sqrt(dy**2 + dx**2 ) def __lt__( self : str , _A : Node ): '''simple docstring''' return self.f_cost < other.f_cost class lowerCamelCase_ : def __init__( self : Dict , _A : TPosition , _A : TPosition ): '''simple docstring''' UpperCAmelCase__ : str = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _A ) UpperCAmelCase__ : Optional[int] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , _A ) UpperCAmelCase__ : str = [self.start] UpperCAmelCase__ : list[Node] = [] UpperCAmelCase__ : Any = False def lowercase_ ( self : Optional[Any] ): '''simple docstring''' while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() UpperCAmelCase__ : Union[str, Any] = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: return self.retrace_path(_A ) self.closed_nodes.append(_A ) UpperCAmelCase__ : Dict = self.get_successors(_A ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(_A ) else: # retrieve the best current path UpperCAmelCase__ : str = self.open_nodes.pop(self.open_nodes.index(_A ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(_A ) else: self.open_nodes.append(_A ) return [self.start.pos] def lowercase_ ( self : Union[str, Any] , _A : Node ): '''simple docstring''' UpperCAmelCase__ : List[Any] = [] for action in delta: UpperCAmelCase__ : int = parent.pos_x + action[1] UpperCAmelCase__ : Tuple = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_A ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( _A , _A , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _A , ) ) return successors def lowercase_ ( self : str , _A : Node | None ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = node UpperCAmelCase__ : Optional[Any] = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) UpperCAmelCase__ : str = current_node.parent path.reverse() return path class lowerCamelCase_ : def __init__( self : Dict , _A : TPosition , _A : TPosition ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = AStar(_A , _A ) UpperCAmelCase__ : List[Any] = AStar(_A , _A ) UpperCAmelCase__ : int = False def lowercase_ ( self : int ): '''simple docstring''' while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes: self.fwd_astar.open_nodes.sort() self.bwd_astar.open_nodes.sort() UpperCAmelCase__ : str = self.fwd_astar.open_nodes.pop(0 ) UpperCAmelCase__ : int = self.bwd_astar.open_nodes.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: return self.retrace_bidirectional_path( _A , _A ) self.fwd_astar.closed_nodes.append(_A ) self.bwd_astar.closed_nodes.append(_A ) UpperCAmelCase__ : List[Any] = current_bwd_node UpperCAmelCase__ : Any = current_fwd_node UpperCAmelCase__ : Any = { self.fwd_astar: self.fwd_astar.get_successors(_A ), self.bwd_astar: self.bwd_astar.get_successors(_A ), } for astar in [self.fwd_astar, self.bwd_astar]: for child_node in successors[astar]: if child_node in astar.closed_nodes: continue if child_node not in astar.open_nodes: astar.open_nodes.append(_A ) else: # retrieve the best current path UpperCAmelCase__ : List[str] = astar.open_nodes.pop( astar.open_nodes.index(_A ) ) if child_node.g_cost < better_node.g_cost: astar.open_nodes.append(_A ) else: astar.open_nodes.append(_A ) return [self.fwd_astar.start.pos] def lowercase_ ( self : Any , _A : Node , _A : Node ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.fwd_astar.retrace_path(_A ) UpperCAmelCase__ : List[Any] = self.bwd_astar.retrace_path(_A ) bwd_path.pop() bwd_path.reverse() UpperCAmelCase__ : Dict = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] UpperCamelCase__ = (0, 0) UpperCamelCase__ = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) UpperCamelCase__ = time.time() UpperCamelCase__ = AStar(init, goal) UpperCamelCase__ = a_star.search() UpperCamelCase__ = time.time() - start_time print(F"""AStar execution time = {end_time:f} seconds""") UpperCamelCase__ = time.time() UpperCamelCase__ = BidirectionalAStar(init, goal) UpperCamelCase__ = time.time() - bd_start_time print(F"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
299
'''simple docstring''' from collections.abc import Iterable from typing import Any class lowerCamelCase_ : def __init__( self : List[Any] , _A : int | None = None ): '''simple docstring''' UpperCAmelCase__ : List[Any] = value UpperCAmelCase__ : Node | None = None # Added in order to delete a node easier UpperCAmelCase__ : Node | None = None UpperCAmelCase__ : Node | None = None def __repr__( self : Optional[Any] ): '''simple docstring''' from pprint import pformat if self.left is None and self.right is None: return str(self.value ) return pformat({f"""{self.value}""": (self.left, self.right)} , indent=1 ) class lowerCamelCase_ : def __init__( self : Optional[Any] , _A : Node | None = None ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = root def __str__( self : Union[str, Any] ): '''simple docstring''' return str(self.root ) def lowercase_ ( self : str , _A : Node , _A : Node | None ): '''simple docstring''' if new_children is not None: # reset its kids UpperCAmelCase__ : Dict = node.parent if node.parent is not None: # reset its parent if self.is_right(_A ): # If it is the right children UpperCAmelCase__ : str = new_children else: UpperCAmelCase__ : Optional[int] = new_children else: UpperCAmelCase__ : Union[str, Any] = new_children def lowercase_ ( self : Union[str, Any] , _A : Node ): '''simple docstring''' if node.parent and node.parent.right: return node == node.parent.right return False def lowercase_ ( self : int ): '''simple docstring''' return self.root is None def lowercase_ ( self : List[str] , _A : Any ): '''simple docstring''' UpperCAmelCase__ : Dict = Node(_A ) # create a new Node if self.empty(): # if Tree is empty UpperCAmelCase__ : List[Any] = new_node # set its root else: # Tree is not empty UpperCAmelCase__ : str = self.root # from root if parent_node is None: return while True: # While we don't get to a leaf if value < parent_node.value: # We go left if parent_node.left is None: UpperCAmelCase__ : Optional[Any] = new_node # We insert the new node in a leaf break else: UpperCAmelCase__ : Any = parent_node.left else: if parent_node.right is None: UpperCAmelCase__ : str = new_node break else: UpperCAmelCase__ : List[str] = parent_node.right UpperCAmelCase__ : Tuple = parent_node def lowercase_ ( self : Optional[Any] , *_A : Tuple ): '''simple docstring''' for value in values: self.__insert(_A ) def lowercase_ ( self : Union[str, Any] , _A : int ): '''simple docstring''' if self.empty(): raise IndexError('''Warning: Tree is empty! please use another.''' ) else: UpperCAmelCase__ : List[Any] = self.root # use lazy evaluation here to avoid NoneType Attribute error while node is not None and node.value is not value: UpperCAmelCase__ : str = node.left if value < node.value else node.right return node def lowercase_ ( self : List[Any] , _A : Node | None = None ): '''simple docstring''' if node is None: if self.root is None: return None UpperCAmelCase__ : int = self.root if not self.empty(): while node.right is not None: UpperCAmelCase__ : Tuple = node.right return node def lowercase_ ( self : List[Any] , _A : Node | None = None ): '''simple docstring''' if node is None: UpperCAmelCase__ : Optional[int] = self.root if self.root is None: return None if not self.empty(): UpperCAmelCase__ : Optional[int] = self.root while node.left is not None: UpperCAmelCase__ : Tuple = node.left return node def lowercase_ ( self : List[Any] , _A : int ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.search(_A ) # Look for the node with that label if node is not None: if node.left is None and node.right is None: # If it has no children self.__reassign_nodes(_A , _A ) elif node.left is None: # Has only right children self.__reassign_nodes(_A , node.right ) elif node.right is None: # Has only left children self.__reassign_nodes(_A , node.left ) else: UpperCAmelCase__ : Union[str, Any] = self.get_max( node.left ) # Gets the max value of the left branch self.remove(tmp_node.value ) # type: ignore UpperCAmelCase__ : Optional[Any] = ( tmp_node.value # type: ignore ) # Assigns the value to the node to delete and keep tree structure def lowercase_ ( self : List[str] , _A : Node | None ): '''simple docstring''' if node is not None: yield node # Preorder Traversal yield from self.preorder_traverse(node.left ) yield from self.preorder_traverse(node.right ) def lowercase_ ( self : str , _A : Any=None ): '''simple docstring''' if traversal_function is None: return self.preorder_traverse(self.root ) else: return traversal_function(self.root ) def lowercase_ ( self : Dict , _A : list , _A : Node | None ): '''simple docstring''' if node: self.inorder(_A , node.left ) arr.append(node.value ) self.inorder(_A , node.right ) def lowercase_ ( self : Optional[Any] , _A : int , _A : Node ): '''simple docstring''' UpperCAmelCase__ : list[int] = [] self.inorder(_A , _A ) # append all values to list using inorder traversal return arr[k - 1] def a__ ( lowerCAmelCase__ ) -> list[Node]: UpperCAmelCase__ : Union[str, Any] = [] if curr_node is not None: UpperCAmelCase__ : str = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node] return node_list def a__ ( ) -> None: UpperCAmelCase__ : List[Any] = (8, 3, 6, 1, 10, 14, 13, 4, 7) UpperCAmelCase__ : str = BinarySearchTree() for i in testlist: t.insert(lowerCAmelCase__ ) # Prints all the elements of the list in order traversal print(lowerCAmelCase__ ) if t.search(6 ) is not None: print('''The value 6 exists''' ) else: print('''The value 6 doesn\'t exist''' ) if t.search(-1 ) is not None: print('''The value -1 exists''' ) else: print('''The value -1 doesn\'t exist''' ) if not t.empty(): print('''Max Value: ''' , t.get_max().value ) # type: ignore print('''Min Value: ''' , t.get_min().value ) # type: ignore for i in testlist: t.remove(lowerCAmelCase__ ) print(lowerCAmelCase__ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
299
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers import ( TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, BertConfig, DPRConfig, TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, ) class lowerCamelCase_ : def __init__( self : Optional[Any] , _A : List[str] , _A : List[str]=13 , _A : Tuple=7 , _A : str=True , _A : Any=True , _A : Any=True , _A : Optional[Any]=True , _A : Union[str, Any]=99 , _A : Dict=32 , _A : Optional[Any]=2 , _A : Optional[int]=4 , _A : Optional[int]=37 , _A : str="gelu" , _A : Union[str, Any]=0.1 , _A : str=0.1 , _A : Union[str, Any]=512 , _A : int=16 , _A : Optional[Any]=2 , _A : int=0.0_2 , _A : Optional[Any]=3 , _A : Union[str, Any]=4 , _A : List[Any]=None , _A : List[Any]=0 , ): '''simple docstring''' UpperCAmelCase__ : Tuple = parent UpperCAmelCase__ : List[Any] = batch_size UpperCAmelCase__ : Optional[int] = seq_length UpperCAmelCase__ : int = is_training UpperCAmelCase__ : Optional[Any] = use_input_mask UpperCAmelCase__ : str = use_token_type_ids UpperCAmelCase__ : str = use_labels UpperCAmelCase__ : Union[str, Any] = vocab_size UpperCAmelCase__ : Optional[int] = hidden_size UpperCAmelCase__ : Union[str, Any] = num_hidden_layers UpperCAmelCase__ : Any = num_attention_heads UpperCAmelCase__ : str = intermediate_size UpperCAmelCase__ : Union[str, Any] = hidden_act UpperCAmelCase__ : List[Any] = hidden_dropout_prob UpperCAmelCase__ : List[str] = attention_probs_dropout_prob UpperCAmelCase__ : str = max_position_embeddings UpperCAmelCase__ : Tuple = type_vocab_size UpperCAmelCase__ : Union[str, Any] = type_sequence_label_size UpperCAmelCase__ : List[str] = initializer_range UpperCAmelCase__ : Optional[Any] = num_labels UpperCAmelCase__ : Any = num_choices UpperCAmelCase__ : Dict = scope UpperCAmelCase__ : Tuple = projection_dim def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase__ : Any = None if self.use_input_mask: # follow test_modeling_tf_ctrl.py UpperCAmelCase__ : Any = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase__ : List[Any] = None if self.use_token_type_ids: UpperCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase__ : Any = None UpperCAmelCase__ : List[Any] = None UpperCAmelCase__ : Optional[Any] = None if self.use_labels: UpperCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase__ : Any = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase__ : Tuple = BertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , ) UpperCAmelCase__ : List[Any] = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowercase_ ( self : str , _A : str , _A : str , _A : Union[str, Any] , _A : Optional[int] , _A : Dict , _A : Tuple , _A : Any ): '''simple docstring''' UpperCAmelCase__ : Any = TFDPRContextEncoder(config=_A ) UpperCAmelCase__ : int = model(_A , attention_mask=_A , token_type_ids=_A ) UpperCAmelCase__ : Optional[Any] = model(_A , token_type_ids=_A ) UpperCAmelCase__ : List[str] = model(_A ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) ) def lowercase_ ( self : Optional[int] , _A : str , _A : Union[str, Any] , _A : Optional[int] , _A : Any , _A : Dict , _A : str , _A : str ): '''simple docstring''' UpperCAmelCase__ : Tuple = TFDPRQuestionEncoder(config=_A ) UpperCAmelCase__ : Optional[Any] = model(_A , attention_mask=_A , token_type_ids=_A ) UpperCAmelCase__ : Optional[int] = model(_A , token_type_ids=_A ) UpperCAmelCase__ : Any = model(_A ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) ) def lowercase_ ( self : Union[str, Any] , _A : str , _A : Tuple , _A : str , _A : Union[str, Any] , _A : Union[str, Any] , _A : Union[str, Any] , _A : int ): '''simple docstring''' UpperCAmelCase__ : Tuple = TFDPRReader(config=_A ) UpperCAmelCase__ : Optional[Any] = model(_A , attention_mask=_A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) ) def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.prepare_config_and_inputs() ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : int = config_and_inputs UpperCAmelCase__ : int = {'''input_ids''': input_ids} return config, inputs_dict @require_tf class lowerCamelCase_ ( __a , __a , unittest.TestCase ): lowerCAmelCase__ = ( ( TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, ) if is_tf_available() else () ) lowerCAmelCase__ = {'feature-extraction': TFDPRQuestionEncoder} if is_tf_available() else {} lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = TFDPRModelTester(self ) UpperCAmelCase__ : Optional[int] = ConfigTester(self , config_class=_A , hidden_size=37 ) def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' self.config_tester.run_common_tests() def lowercase_ ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_context_encoder(*_A ) def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_question_encoder(*_A ) def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_reader(*_A ) @slow def lowercase_ ( self : Dict ): '''simple docstring''' for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : List[Any] = TFDPRContextEncoder.from_pretrained(_A ) self.assertIsNotNone(_A ) for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : int = TFDPRContextEncoder.from_pretrained(_A ) self.assertIsNotNone(_A ) for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : List[Any] = TFDPRQuestionEncoder.from_pretrained(_A ) self.assertIsNotNone(_A ) for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : Optional[int] = TFDPRReader.from_pretrained(_A ) self.assertIsNotNone(_A ) @require_tf class lowerCamelCase_ ( unittest.TestCase ): @slow def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : str = TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''' ) UpperCAmelCase__ : Optional[int] = tf.constant( [[101, 7_592, 1_010, 2_003, 2_026, 3_899, 10_140, 1_029, 102]] ) # [CLS] hello, is my dog cute? [SEP] UpperCAmelCase__ : List[Any] = model(_A )[0] # embedding shape = (1, 768) # compare the actual values for a slice. UpperCAmelCase__ : List[Any] = tf.constant( [ [ 0.0_3_2_3_6_2_5_3, 0.1_2_7_5_3_3_3_5, 0.1_6_8_1_8_5_0_9, 0.0_0_2_7_9_7_8_6, 0.3_8_9_6_9_3_3, 0.2_4_2_6_4_9_4_5, 0.2_1_7_8_9_7_1, -0.0_2_3_3_5_2_2_7, -0.0_8_4_8_1_9_5_9, -0.1_4_3_2_4_1_1_7, ] ] ) self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1e-4 ) )
299
'''simple docstring''' import argparse import torch from transformers import ( SpeechTaConfig, SpeechTaFeatureExtractor, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaProcessor, SpeechTaTokenizer, logging, ) from transformers.tokenization_utils import AddedToken logging.set_verbosity_info() UpperCamelCase__ = logging.get_logger('''transformers.models.speecht5''') UpperCamelCase__ = { '''speech_encoder_prenet.layer_norm''': '''speecht5.encoder.prenet.feature_projection.layer_norm''', '''speech_encoder_prenet.post_extract_proj''': '''speecht5.encoder.prenet.feature_projection.projection''', '''speech_encoder_prenet.pos_conv.0''': '''speecht5.encoder.prenet.pos_conv_embed.conv''', '''speech_encoder_prenet.mask_emb''': '''speecht5.encoder.prenet.masked_spec_embed''', } UpperCamelCase__ = { '''text_encoder_prenet.encoder_prenet.0''': '''speecht5.encoder.prenet.embed_tokens''', '''text_encoder_prenet.encoder_prenet.1.alpha''': '''speecht5.encoder.prenet.encode_positions.alpha''', } UpperCamelCase__ = { '''speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0''': '''speecht5.decoder.prenet.layers.0''', '''speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0''': '''speecht5.decoder.prenet.layers.1''', '''speech_decoder_prenet.decoder_prenet.0.1''': '''speecht5.decoder.prenet.final_layer''', '''speech_decoder_prenet.decoder_prenet.1.alpha''': '''speecht5.decoder.prenet.encode_positions.alpha''', '''speech_decoder_prenet.spkembs_layer.0''': '''speecht5.decoder.prenet.speaker_embeds_layer''', } UpperCamelCase__ = { '''speech_decoder_postnet.feat_out''': '''speech_decoder_postnet.feat_out''', '''speech_decoder_postnet.prob_out''': '''speech_decoder_postnet.prob_out''', '''speech_decoder_postnet.postnet.postnet.0.0''': '''speech_decoder_postnet.layers.0.conv''', '''speech_decoder_postnet.postnet.postnet.0.1''': '''speech_decoder_postnet.layers.0.batch_norm''', '''speech_decoder_postnet.postnet.postnet.1.0''': '''speech_decoder_postnet.layers.1.conv''', '''speech_decoder_postnet.postnet.postnet.1.1''': '''speech_decoder_postnet.layers.1.batch_norm''', '''speech_decoder_postnet.postnet.postnet.2.0''': '''speech_decoder_postnet.layers.2.conv''', '''speech_decoder_postnet.postnet.postnet.2.1''': '''speech_decoder_postnet.layers.2.batch_norm''', '''speech_decoder_postnet.postnet.postnet.3.0''': '''speech_decoder_postnet.layers.3.conv''', '''speech_decoder_postnet.postnet.postnet.3.1''': '''speech_decoder_postnet.layers.3.batch_norm''', '''speech_decoder_postnet.postnet.postnet.4.0''': '''speech_decoder_postnet.layers.4.conv''', '''speech_decoder_postnet.postnet.postnet.4.1''': '''speech_decoder_postnet.layers.4.batch_norm''', } UpperCamelCase__ = { '''text_decoder_prenet.embed_tokens''': '''speecht5.decoder.prenet.embed_tokens''', } UpperCamelCase__ = { '''text_decoder_postnet.output_projection''': '''text_decoder_postnet.lm_head''', } UpperCamelCase__ = { '''encoder.layers.*.self_attn.k_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj''', '''encoder.layers.*.self_attn.v_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj''', '''encoder.layers.*.self_attn.q_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj''', '''encoder.layers.*.self_attn.out_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj''', '''encoder.layers.*.self_attn_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.layer_norm''', '''encoder.layers.*.fc1''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense''', '''encoder.layers.*.fc2''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense''', '''encoder.layers.*.final_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''speecht5.encoder.wrapped_encoder.layer_norm''', '''encoder.pos_emb.pe_k''': '''speecht5.encoder.wrapped_encoder.embed_positions.pe_k''', } UpperCamelCase__ = { '''decoder.layers.*.self_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj''', '''decoder.layers.*.self_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj''', '''decoder.layers.*.self_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj''', '''decoder.layers.*.self_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj''', '''decoder.layers.*.self_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm''', '''decoder.layers.*.encoder_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj''', '''decoder.layers.*.encoder_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj''', '''decoder.layers.*.encoder_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj''', '''decoder.layers.*.encoder_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj''', '''decoder.layers.*.encoder_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm''', '''decoder.layers.*.fc1''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense''', '''decoder.layers.*.fc2''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense''', '''decoder.layers.*.final_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm''', } UpperCamelCase__ = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_TEXT_DECODER_PRENET, **MAPPING_TEXT_DECODER_POSTNET, } UpperCamelCase__ = { **MAPPING_TEXT_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } UpperCamelCase__ = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } UpperCamelCase__ = [] UpperCamelCase__ = [ '''encoder.version''', '''encoder.layers.*.norm_k.weight''', '''encoder.layers.*.norm_k.bias''', '''decoder.version''', '''decoder.layers.*.norm_k.weight''', '''decoder.layers.*.norm_k.bias''', '''decoder.pos_emb.pe_k''', '''speech_encoder_prenet.embed_positions._float_tensor''', '''text_decoder_prenet.embed_positions._float_tensor''', ] UpperCamelCase__ = IGNORE_KEYS + [ '''encoder.proj''', '''text_encoder_prenet.*''', '''speech_decoder_prenet.*''', '''speech_decoder_postnet.*''', ] UpperCamelCase__ = IGNORE_KEYS + [ '''encoder.proj''', '''speech_encoder_prenet.*''', '''text_decoder_prenet.*''', '''text_decoder_postnet.*''', ] UpperCamelCase__ = IGNORE_KEYS + [ '''encoder.proj''', '''text_encoder_prenet.*''', '''text_decoder_prenet.*''', '''text_decoder_postnet.*''', ] def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int: for attribute in key.split('''.''' ): UpperCAmelCase__ : Optional[int] = getattr(lowerCAmelCase__ , lowerCAmelCase__ ) if weight_type is not None: UpperCAmelCase__ : List[str] = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape else: UpperCAmelCase__ : Any = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": UpperCAmelCase__ : Union[str, Any] = value elif weight_type == "weight_g": UpperCAmelCase__ : Tuple = value elif weight_type == "weight_v": UpperCAmelCase__ : List[Any] = value elif weight_type == "bias": UpperCAmelCase__ : int = value elif weight_type == "running_mean": UpperCAmelCase__ : int = value elif weight_type == "running_var": UpperCAmelCase__ : Union[str, Any] = value elif weight_type == "num_batches_tracked": UpperCAmelCase__ : List[Any] = value else: UpperCAmelCase__ : Union[str, Any] = value logger.info(F"""{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.""" ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]: for key in ignore_keys: if key.endswith('''.*''' ): if name.startswith(key[:-1] ): return True elif ".*." in key: UpperCAmelCase__ , UpperCAmelCase__ : int = key.split('''.*.''' ) if prefix in name and suffix in name: return True elif key in name: return True return False def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]: UpperCAmelCase__ : int = [] if task == "s2t": UpperCAmelCase__ : Optional[Any] = hf_model.speechta.encoder.prenet.feature_encoder UpperCAmelCase__ : List[Any] = MAPPING_S2T UpperCAmelCase__ : int = IGNORE_KEYS_S2T elif task == "t2s": UpperCAmelCase__ : List[str] = None UpperCAmelCase__ : Tuple = MAPPING_T2S UpperCAmelCase__ : Union[str, Any] = IGNORE_KEYS_T2S elif task == "s2s": UpperCAmelCase__ : Optional[int] = hf_model.speechta.encoder.prenet.feature_encoder UpperCAmelCase__ : Tuple = MAPPING_S2S UpperCAmelCase__ : int = IGNORE_KEYS_S2S else: raise ValueError(F"""Unsupported task: {task}""" ) for name, value in fairseq_dict.items(): if should_ignore(lowerCAmelCase__ , lowerCAmelCase__ ): logger.info(F"""{name} was ignored""" ) continue UpperCAmelCase__ : List[Any] = False if "conv_layers" in name: load_conv_layer( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == '''group''' , ) UpperCAmelCase__ : Tuple = True else: for key, mapped_key in MAPPING.items(): # mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if "*" in key: UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = key.split('''.*.''' ) if prefix in name and suffix in name: UpperCAmelCase__ : List[str] = suffix # if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: if key in name: UpperCAmelCase__ : Optional[int] = True if "*" in mapped_key: UpperCAmelCase__ : Any = name.split(lowerCAmelCase__ )[0].split('''.''' )[-2] UpperCAmelCase__ : Union[str, Any] = mapped_key.replace('''*''' , lowerCAmelCase__ ) if "weight_g" in name: UpperCAmelCase__ : Dict = '''weight_g''' elif "weight_v" in name: UpperCAmelCase__ : Union[str, Any] = '''weight_v''' elif "bias" in name: UpperCAmelCase__ : Optional[int] = '''bias''' elif "weight" in name: UpperCAmelCase__ : Optional[int] = '''weight''' elif "running_mean" in name: UpperCAmelCase__ : Optional[int] = '''running_mean''' elif "running_var" in name: UpperCAmelCase__ : List[Any] = '''running_var''' elif "num_batches_tracked" in name: UpperCAmelCase__ : Optional[Any] = '''num_batches_tracked''' else: UpperCAmelCase__ : Union[str, Any] = None set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) continue if not is_used: unused_weights.append(lowerCAmelCase__ ) logger.warning(F"""Unused weights: {unused_weights}""" ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str: UpperCAmelCase__ : Optional[int] = full_name.split('''conv_layers.''' )[-1] UpperCAmelCase__ : Optional[Any] = name.split('''.''' ) UpperCAmelCase__ : Any = int(items[0] ) UpperCAmelCase__ : Optional[int] = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) UpperCAmelCase__ : Any = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) UpperCAmelCase__ : int = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) UpperCAmelCase__ : List[str] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) UpperCAmelCase__ : Union[str, Any] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(lowerCAmelCase__ ) @torch.no_grad() def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , ) -> Any: if config_path is not None: UpperCAmelCase__ : Optional[Any] = SpeechTaConfig.from_pretrained(lowerCAmelCase__ ) else: UpperCAmelCase__ : str = SpeechTaConfig() if task == "s2t": UpperCAmelCase__ : str = config.max_text_positions UpperCAmelCase__ : List[str] = SpeechTaForSpeechToText(lowerCAmelCase__ ) elif task == "t2s": UpperCAmelCase__ : Tuple = 18_76 UpperCAmelCase__ : int = 6_00 UpperCAmelCase__ : Union[str, Any] = config.max_speech_positions UpperCAmelCase__ : Optional[Any] = SpeechTaForTextToSpeech(lowerCAmelCase__ ) elif task == "s2s": UpperCAmelCase__ : Tuple = 18_76 UpperCAmelCase__ : Optional[Any] = config.max_speech_positions UpperCAmelCase__ : Dict = SpeechTaForSpeechToSpeech(lowerCAmelCase__ ) else: raise ValueError(F"""Unknown task name: {task}""" ) if vocab_path: UpperCAmelCase__ : Tuple = SpeechTaTokenizer(lowerCAmelCase__ , model_max_length=config.max_text_positions ) # Mask token behaves like a normal word, i.e. include the space before it UpperCAmelCase__ : Dict = AddedToken('''<mask>''' , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) UpperCAmelCase__ : int = mask_token tokenizer.add_special_tokens({'''mask_token''': mask_token} ) tokenizer.add_tokens(['''<ctc_blank>'''] ) UpperCAmelCase__ : Optional[Any] = SpeechTaFeatureExtractor() UpperCAmelCase__ : Any = SpeechTaProcessor(tokenizer=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ ) processor.save_pretrained(lowerCAmelCase__ ) UpperCAmelCase__ : List[str] = torch.load(lowerCAmelCase__ ) recursively_load_weights(fairseq_checkpoint['''model'''] , lowerCAmelCase__ , lowerCAmelCase__ ) model.save_pretrained(lowerCAmelCase__ ) if repo_id: print('''Pushing to the hub...''' ) processor.push_to_hub(lowerCAmelCase__ ) model.push_to_hub(lowerCAmelCase__ ) if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() parser.add_argument( '''--task''', default='''s2t''', type=str, help='''Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.''', ) parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--vocab_path''', default=None, type=str, help='''Path to SentencePiece model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) UpperCamelCase__ = parser.parse_args() convert_speechta_checkpoint( args.task, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.vocab_path, args.push_to_hub, )
299
1
'''simple docstring''' def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> bool: UpperCAmelCase__ : Optional[int] = len(lowerCAmelCase__ ) UpperCAmelCase__ : List[Any] = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )] # for each arr value, a sum of zero(0) can be formed by not taking any element # hence True/1 for i in range(arr_len + 1 ): UpperCAmelCase__ : List[str] = True # sum is not zero and set is empty then false for i in range(1 , required_sum + 1 ): UpperCAmelCase__ : int = False for i in range(1 , arr_len + 1 ): for j in range(1 , required_sum + 1 ): if arr[i - 1] > j: UpperCAmelCase__ : List[str] = subset[i - 1][j] if arr[i - 1] <= j: UpperCAmelCase__ : Union[str, Any] = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]] return subset[arr_len][required_sum] if __name__ == "__main__": import doctest doctest.testmod()
299
'''simple docstring''' import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''encoder.layer_norm_for_extract''': '''layer_norm_for_extract''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''label_embs_concat''': '''label_embeddings_concat''', '''mask_emb''': '''masked_spec_embed''', '''spk_proj''': '''speaker_proj''', } UpperCamelCase__ = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', '''label_embeddings_concat''', '''speaker_proj''', '''layer_norm_for_extract''', ] def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]: for attribute in key.split('''.''' ): UpperCAmelCase__ : Optional[int] = getattr(lowerCAmelCase__ , lowerCAmelCase__ ) if weight_type is not None: UpperCAmelCase__ : Any = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape else: UpperCAmelCase__ : Union[str, Any] = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": UpperCAmelCase__ : int = value elif weight_type == "weight_g": UpperCAmelCase__ : Dict = value elif weight_type == "weight_v": UpperCAmelCase__ : List[str] = value elif weight_type == "bias": UpperCAmelCase__ : Tuple = value else: UpperCAmelCase__ : Tuple = value logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict: UpperCAmelCase__ : Optional[int] = [] UpperCAmelCase__ : Dict = fairseq_model.state_dict() UpperCAmelCase__ : Union[str, Any] = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): UpperCAmelCase__ : Any = False if "conv_layers" in name: load_conv_layer( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == '''group''' , ) UpperCAmelCase__ : str = True else: for key, mapped_key in MAPPING.items(): UpperCAmelCase__ : List[str] = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key): # special case since naming is very similar continue UpperCAmelCase__ : Optional[int] = True if "*" in mapped_key: UpperCAmelCase__ : str = name.split(lowerCAmelCase__ )[0].split('''.''' )[-2] UpperCAmelCase__ : Optional[int] = mapped_key.replace('''*''' , lowerCAmelCase__ ) if "weight_g" in name: UpperCAmelCase__ : List[str] = '''weight_g''' elif "weight_v" in name: UpperCAmelCase__ : Dict = '''weight_v''' elif "bias" in name: UpperCAmelCase__ : Optional[int] = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj UpperCAmelCase__ : Tuple = '''weight''' else: UpperCAmelCase__ : Optional[Any] = None set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) continue if not is_used: unused_weights.append(lowerCAmelCase__ ) logger.warning(F"""Unused weights: {unused_weights}""" ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]: UpperCAmelCase__ : Tuple = full_name.split('''conv_layers.''' )[-1] UpperCAmelCase__ : Optional[Any] = name.split('''.''' ) UpperCAmelCase__ : Union[str, Any] = int(items[0] ) UpperCAmelCase__ : Tuple = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) UpperCAmelCase__ : str = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) UpperCAmelCase__ : Optional[int] = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" ) UpperCAmelCase__ : List[str] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) UpperCAmelCase__ : Optional[Any] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(lowerCAmelCase__ ) @torch.no_grad() def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True ) -> Any: if config_path is not None: UpperCAmelCase__ : Any = UniSpeechSatConfig.from_pretrained(lowerCAmelCase__ ) else: UpperCAmelCase__ : int = UniSpeechSatConfig() UpperCAmelCase__ : Tuple = '''''' if is_finetuned: UpperCAmelCase__ : Optional[int] = UniSpeechSatForCTC(lowerCAmelCase__ ) else: UpperCAmelCase__ : List[Any] = UniSpeechSatForPreTraining(lowerCAmelCase__ ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) UpperCAmelCase__ : Union[str, Any] = model[0].eval() recursively_load_weights(lowerCAmelCase__ , lowerCAmelCase__ ) hf_wavavec.save_pretrained(lowerCAmelCase__ ) if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) UpperCamelCase__ = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
299
1
'''simple docstring''' import math import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from .attention_processor import Attention from .embeddings import get_timestep_embedding from .modeling_utils import ModelMixin class lowerCamelCase_ ( __a , __a ): @register_to_config def __init__( self : str , _A : int = 128 , _A : int = 256 , _A : float = 2_0_0_0.0 , _A : int = 768 , _A : int = 12 , _A : int = 12 , _A : int = 64 , _A : int = 2_048 , _A : float = 0.1 , ): '''simple docstring''' super().__init__() UpperCAmelCase__ : List[Any] = nn.Sequential( nn.Linear(_A , d_model * 4 , bias=_A ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=_A ) , nn.SiLU() , ) UpperCAmelCase__ : int = nn.Embedding(_A , _A ) UpperCAmelCase__ : Tuple = False UpperCAmelCase__ : Any = nn.Linear(_A , _A , bias=_A ) UpperCAmelCase__ : Tuple = nn.Dropout(p=_A ) UpperCAmelCase__ : Dict = nn.ModuleList() for lyr_num in range(_A ): # FiLM conditional T5 decoder UpperCAmelCase__ : Dict = DecoderLayer(d_model=_A , d_kv=_A , num_heads=_A , d_ff=_A , dropout_rate=_A ) self.decoders.append(_A ) UpperCAmelCase__ : Optional[int] = TaLayerNorm(_A ) UpperCAmelCase__ : str = nn.Dropout(p=_A ) UpperCAmelCase__ : Tuple = nn.Linear(_A , _A , bias=_A ) def lowercase_ ( self : Any , _A : Dict , _A : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : int = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) ) return mask.unsqueeze(-3 ) def lowercase_ ( self : Tuple , _A : Tuple , _A : Optional[int] , _A : List[Any] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = decoder_input_tokens.shape assert decoder_noise_time.shape == (batch,) # decoder_noise_time is in [0, 1), so rescale to expected timing range. UpperCAmelCase__ : List[str] = get_timestep_embedding( decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype ) UpperCAmelCase__ : Optional[int] = self.conditioning_emb(_A ).unsqueeze(1 ) assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4) UpperCAmelCase__ : Dict = decoder_input_tokens.shape[1] # If we want to use relative positions for audio context, we can just offset # this sequence by the length of encodings_and_masks. UpperCAmelCase__ : Union[str, Any] = torch.broadcast_to( torch.arange(_A , device=decoder_input_tokens.device ) , (batch, seq_length) , ) UpperCAmelCase__ : Union[str, Any] = self.position_encoding(_A ) UpperCAmelCase__ : List[Any] = self.continuous_inputs_projection(_A ) inputs += position_encodings UpperCAmelCase__ : Dict = self.dropout(_A ) # decoder: No padding present. UpperCAmelCase__ : str = torch.ones( decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype ) # Translate encoding masks to encoder-decoder masks. UpperCAmelCase__ : Optional[int] = [(x, self.encoder_decoder_mask(_A , _A )) for x, y in encodings_and_masks] # cross attend style: concat encodings UpperCAmelCase__ : Any = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 ) UpperCAmelCase__ : List[str] = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 ) for lyr in self.decoders: UpperCAmelCase__ : Tuple = lyr( _A , conditioning_emb=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , )[0] UpperCAmelCase__ : Optional[int] = self.decoder_norm(_A ) UpperCAmelCase__ : Dict = self.post_dropout(_A ) UpperCAmelCase__ : List[str] = self.spec_out(_A ) return spec_out class lowerCamelCase_ ( nn.Module ): def __init__( self : List[Any] , _A : Dict , _A : Optional[int] , _A : Tuple , _A : List[str] , _A : Union[str, Any] , _A : Optional[int]=1e-6 ): '''simple docstring''' super().__init__() UpperCAmelCase__ : List[str] = nn.ModuleList() # cond self attention: layer 0 self.layer.append( TaLayerSelfAttentionCond(d_model=_A , d_kv=_A , num_heads=_A , dropout_rate=_A ) ) # cross attention: layer 1 self.layer.append( TaLayerCrossAttention( d_model=_A , d_kv=_A , num_heads=_A , dropout_rate=_A , layer_norm_epsilon=_A , ) ) # Film Cond MLP + dropout: last layer self.layer.append( TaLayerFFCond(d_model=_A , d_ff=_A , dropout_rate=_A , layer_norm_epsilon=_A ) ) def lowercase_ ( self : List[str] , _A : List[Any] , _A : Tuple=None , _A : str=None , _A : List[Any]=None , _A : Tuple=None , _A : Any=None , ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.layer[0]( _A , conditioning_emb=_A , attention_mask=_A , ) if encoder_hidden_states is not None: UpperCAmelCase__ : Dict = torch.where(encoder_attention_mask > 0 , 0 , -1e10 ).to( encoder_hidden_states.dtype ) UpperCAmelCase__ : Optional[Any] = self.layer[1]( _A , key_value_states=_A , attention_mask=_A , ) # Apply Film Conditional Feed Forward layer UpperCAmelCase__ : List[Any] = self.layer[-1](_A , _A ) return (hidden_states,) class lowerCamelCase_ ( nn.Module ): def __init__( self : Optional[Any] , _A : str , _A : int , _A : int , _A : int ): '''simple docstring''' super().__init__() UpperCAmelCase__ : str = TaLayerNorm(_A ) UpperCAmelCase__ : Tuple = TaFiLMLayer(in_features=d_model * 4 , out_features=_A ) UpperCAmelCase__ : str = Attention(query_dim=_A , heads=_A , dim_head=_A , out_bias=_A , scale_qk=_A ) UpperCAmelCase__ : Dict = nn.Dropout(_A ) def lowercase_ ( self : Any , _A : Union[str, Any] , _A : Optional[int]=None , _A : Dict=None , ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.layer_norm(_A ) if conditioning_emb is not None: UpperCAmelCase__ : int = self.FiLMLayer(_A , _A ) # Self-attention block UpperCAmelCase__ : Any = self.attention(_A ) UpperCAmelCase__ : int = hidden_states + self.dropout(_A ) return hidden_states class lowerCamelCase_ ( nn.Module ): def __init__( self : List[str] , _A : Dict , _A : Any , _A : List[str] , _A : str , _A : Dict ): '''simple docstring''' super().__init__() UpperCAmelCase__ : Union[str, Any] = Attention(query_dim=_A , heads=_A , dim_head=_A , out_bias=_A , scale_qk=_A ) UpperCAmelCase__ : Any = TaLayerNorm(_A , eps=_A ) UpperCAmelCase__ : Optional[int] = nn.Dropout(_A ) def lowercase_ ( self : Optional[Any] , _A : int , _A : Union[str, Any]=None , _A : Union[str, Any]=None , ): '''simple docstring''' UpperCAmelCase__ : int = self.layer_norm(_A ) UpperCAmelCase__ : str = self.attention( _A , encoder_hidden_states=_A , attention_mask=attention_mask.squeeze(1 ) , ) UpperCAmelCase__ : List[Any] = hidden_states + self.dropout(_A ) return layer_output class lowerCamelCase_ ( nn.Module ): def __init__( self : Optional[Any] , _A : Tuple , _A : List[str] , _A : Any , _A : List[str] ): '''simple docstring''' super().__init__() UpperCAmelCase__ : Tuple = TaDenseGatedActDense(d_model=_A , d_ff=_A , dropout_rate=_A ) UpperCAmelCase__ : Optional[int] = TaFiLMLayer(in_features=d_model * 4 , out_features=_A ) UpperCAmelCase__ : int = TaLayerNorm(_A , eps=_A ) UpperCAmelCase__ : Optional[int] = nn.Dropout(_A ) def lowercase_ ( self : Any , _A : Optional[int] , _A : Union[str, Any]=None ): '''simple docstring''' UpperCAmelCase__ : Dict = self.layer_norm(_A ) if conditioning_emb is not None: UpperCAmelCase__ : int = self.film(_A , _A ) UpperCAmelCase__ : Union[str, Any] = self.DenseReluDense(_A ) UpperCAmelCase__ : Optional[Any] = hidden_states + self.dropout(_A ) return hidden_states class lowerCamelCase_ ( nn.Module ): def __init__( self : Any , _A : Dict , _A : List[Any] , _A : List[str] ): '''simple docstring''' super().__init__() UpperCAmelCase__ : List[Any] = nn.Linear(_A , _A , bias=_A ) UpperCAmelCase__ : Tuple = nn.Linear(_A , _A , bias=_A ) UpperCAmelCase__ : Union[str, Any] = nn.Linear(_A , _A , bias=_A ) UpperCAmelCase__ : Tuple = nn.Dropout(_A ) UpperCAmelCase__ : List[Any] = NewGELUActivation() def lowercase_ ( self : str , _A : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.act(self.wi_a(_A ) ) UpperCAmelCase__ : Dict = self.wi_a(_A ) UpperCAmelCase__ : int = hidden_gelu * hidden_linear UpperCAmelCase__ : Optional[Any] = self.dropout(_A ) UpperCAmelCase__ : Optional[int] = self.wo(_A ) return hidden_states class lowerCamelCase_ ( nn.Module ): def __init__( self : Union[str, Any] , _A : str , _A : int=1e-6 ): '''simple docstring''' super().__init__() UpperCAmelCase__ : int = nn.Parameter(torch.ones(_A ) ) UpperCAmelCase__ : Union[str, Any] = eps def lowercase_ ( self : List[str] , _A : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=_A ) UpperCAmelCase__ : Any = hidden_states * torch.rsqrt(variance + self.variance_epsilon ) # convert into half-precision if necessary if self.weight.dtype in [torch.floataa, torch.bfloataa]: UpperCAmelCase__ : List[str] = hidden_states.to(self.weight.dtype ) return self.weight * hidden_states class lowerCamelCase_ ( nn.Module ): def lowercase_ ( self : Dict , _A : torch.Tensor ): '''simple docstring''' return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.0_4_4_7_1_5 * torch.pow(_A , 3.0 )) )) class lowerCamelCase_ ( nn.Module ): def __init__( self : Tuple , _A : Dict , _A : Optional[Any] ): '''simple docstring''' super().__init__() UpperCAmelCase__ : List[str] = nn.Linear(_A , out_features * 2 , bias=_A ) def lowercase_ ( self : List[str] , _A : List[Any] , _A : Dict ): '''simple docstring''' UpperCAmelCase__ : Dict = self.scale_bias(_A ) UpperCAmelCase__ , UpperCAmelCase__ : Tuple = torch.chunk(_A , 2 , -1 ) UpperCAmelCase__ : List[Any] = x * (1 + scale) + shift return x
299
'''simple docstring''' import itertools import random import unittest import numpy as np from transformers import ASTFeatureExtractor from transformers.testing_utils import require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin UpperCamelCase__ = random.Random() if is_torch_available(): import torch def a__ ( lowerCAmelCase__ , lowerCAmelCase__=1.0 , lowerCAmelCase__=None , lowerCAmelCase__=None ) -> Optional[Any]: if rng is None: UpperCAmelCase__ : List[str] = global_rng UpperCAmelCase__ : Optional[Any] = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class lowerCamelCase_ ( unittest.TestCase ): def __init__( self : Any , _A : List[str] , _A : int=7 , _A : Dict=400 , _A : Tuple=2_000 , _A : Optional[int]=1 , _A : List[Any]=0.0 , _A : Any=16_000 , _A : int=True , _A : str=True , ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = parent UpperCAmelCase__ : str = batch_size UpperCAmelCase__ : Dict = min_seq_length UpperCAmelCase__ : str = max_seq_length UpperCAmelCase__ : List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) UpperCAmelCase__ : Optional[Any] = feature_size UpperCAmelCase__ : int = padding_value UpperCAmelCase__ : int = sampling_rate UpperCAmelCase__ : Tuple = return_attention_mask UpperCAmelCase__ : str = do_normalize def lowercase_ ( self : Optional[int] ): '''simple docstring''' return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def lowercase_ ( self : int , _A : Optional[Any]=False , _A : Any=False ): '''simple docstring''' def _flatten(_A : Union[str, Any] ): return list(itertools.chain(*_A ) ) if equal_length: UpperCAmelCase__ : Tuple = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size UpperCAmelCase__ : Optional[int] = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: UpperCAmelCase__ : Dict = [np.asarray(_A ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class lowerCamelCase_ ( __a , unittest.TestCase ): lowerCAmelCase__ = ASTFeatureExtractor def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : int = ASTFeatureExtractionTester(self ) def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 UpperCAmelCase__ : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] UpperCAmelCase__ : List[Any] = [np.asarray(_A ) for speech_input in speech_inputs] # Test not batched input UpperCAmelCase__ : str = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values UpperCAmelCase__ : List[Any] = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) ) # Test batched UpperCAmelCase__ : Optional[Any] = feat_extract(_A , padding=_A , return_tensors='''np''' ).input_values UpperCAmelCase__ : Optional[int] = feat_extract(_A , padding=_A , return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(_A , _A ): self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. UpperCAmelCase__ : Tuple = [floats_list((1, x) )[0] for x in (800, 800, 800)] UpperCAmelCase__ : Any = np.asarray(_A ) UpperCAmelCase__ : int = feat_extract(_A , return_tensors='''np''' ).input_values UpperCAmelCase__ : List[str] = feat_extract(_A , return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(_A , _A ): self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) ) @require_torch def lowercase_ ( self : List[str] ): '''simple docstring''' import torch UpperCAmelCase__ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCAmelCase__ : Any = np.random.rand(100 ).astype(np.floataa ) UpperCAmelCase__ : int = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: UpperCAmelCase__ : str = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) UpperCAmelCase__ : Any = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def lowercase_ ( self : int , _A : List[Any] ): '''simple docstring''' from datasets import load_dataset UpperCAmelCase__ : Tuple = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' ) # automatic decoding with librispeech UpperCAmelCase__ : List[Any] = ds.sort('''id''' ).select(range(_A ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] @require_torch def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Any = torch.tensor( [-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6, -1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3, -1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6, -0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9] ) # fmt: on UpperCAmelCase__ : Optional[Any] = self._load_datasamples(1 ) UpperCAmelCase__ : Optional[int] = ASTFeatureExtractor() UpperCAmelCase__ : Dict = feature_extractor(_A , return_tensors='''pt''' ).input_values self.assertEquals(input_values.shape , (1, 1_024, 128) ) self.assertTrue(torch.allclose(input_values[0, 0, :30] , _A , atol=1e-4 ) )
299
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase__ = {'''configuration_vit_msn''': ['''VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMSNConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = [ '''VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ViTMSNModel''', '''ViTMSNForImageClassification''', '''ViTMSNPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_msn import ( VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMSNForImageClassification, ViTMSNModel, ViTMSNPreTrainedModel, ) else: import sys UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
299
'''simple docstring''' import os import shutil import sys import tempfile import unittest from pathlib import Path import pytest import transformers from transformers import ( BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoTokenizer, BertConfig, BertTokenizer, BertTokenizerFast, CTRLTokenizer, GPTaTokenizer, GPTaTokenizerFast, PreTrainedTokenizerFast, RobertaTokenizer, RobertaTokenizerFast, is_tokenizers_available, ) from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.auto.tokenization_auto import ( TOKENIZER_MAPPING, get_tokenizer_config, tokenizer_class_from_name, ) from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import ( DUMMY_DIFF_TOKENIZER_IDENTIFIER, DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tokenizers, slow, ) sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils''')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class lowerCamelCase_ ( unittest.TestCase ): def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = 0 @slow def lowercase_ ( self : Dict ): '''simple docstring''' for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x): UpperCAmelCase__ : int = AutoTokenizer.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) ) self.assertGreater(len(_A ) , 0 ) for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys(): UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , (GPTaTokenizer, GPTaTokenizerFast) ) self.assertGreater(len(_A ) , 0 ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : int = AutoTokenizer.from_pretrained(_A ) self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 12 ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : int = AutoTokenizer.from_pretrained(_A ) self.assertIsInstance(_A , (RobertaTokenizer, RobertaTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 20 ) def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Tuple = AutoConfig.from_pretrained(_A ) self.assertIsInstance(_A , _A ) # Check that tokenizer_type ≠ model_type UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained(_A , config=_A ) self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 12 ) def lowercase_ ( self : str ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(_A , '''vocab.txt''' ) ) UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained(_A , tokenizer_type='''bert''' , use_fast=_A ) self.assertIsInstance(_A , _A ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(_A , '''vocab.json''' ) ) shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(_A , '''merges.txt''' ) ) UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(_A , tokenizer_type='''gpt2''' , use_fast=_A ) self.assertIsInstance(_A , _A ) @require_tokenizers def lowercase_ ( self : str ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(_A , '''vocab.txt''' ) ) UpperCAmelCase__ : str = AutoTokenizer.from_pretrained(_A , tokenizer_type='''bert''' ) self.assertIsInstance(_A , _A ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(_A , '''vocab.json''' ) ) shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(_A , '''merges.txt''' ) ) UpperCAmelCase__ : Any = AutoTokenizer.from_pretrained(_A , tokenizer_type='''gpt2''' ) self.assertIsInstance(_A , _A ) def lowercase_ ( self : Optional[Any] ): '''simple docstring''' with pytest.raises(_A ): AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' ) @require_tokenizers def lowercase_ ( self : int ): '''simple docstring''' for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: UpperCAmelCase__ : Optional[int] = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' ) self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) ) if isinstance(_A , _A ): self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , _A ) else: self.assertEqual(tokenizer.do_lower_case , _A ) self.assertEqual(tokenizer.model_max_length , 512 ) @require_tokenizers def lowercase_ ( self : List[str] ): '''simple docstring''' for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: with self.assertRaisesRegex( _A , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ): UpperCAmelCase__ : Dict = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' ) def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : List[Any] = TOKENIZER_MAPPING.values() UpperCAmelCase__ : Any = [] for slow_tok, fast_tok in tokenizers: if slow_tok is not None: tokenizer_names.append(slow_tok.__name__ ) if fast_tok is not None: tokenizer_names.append(fast_tok.__name__ ) for tokenizer_name in tokenizer_names: # must find the right class tokenizer_class_from_name(_A ) @require_tokenizers def lowercase_ ( self : Optional[int] ): '''simple docstring''' self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=_A ) , _A ) self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , _A ) @require_tokenizers def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : int = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=_A ) UpperCAmelCase__ : Any = '''Hello, world. How are you?''' UpperCAmelCase__ : Dict = tokenizer.tokenize(_A ) self.assertEqual('''[UNK]''' , tokens[0] ) UpperCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=_A ) UpperCAmelCase__ : Union[str, Any] = tokenizer.tokenize(_A ) self.assertEqual('''[UNK]''' , tokens[0] ) @require_tokenizers def lowercase_ ( self : str ): '''simple docstring''' UpperCAmelCase__ : List[str] = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' ) self.assertEqual(type(_A ) , _A ) self.assertEqual(tokenizer.model_max_length , 512 ) self.assertEqual(tokenizer.vocab_size , 30_000 ) self.assertEqual(tokenizer.unk_token , '''[UNK]''' ) self.assertEqual(tokenizer.padding_side , '''right''' ) self.assertEqual(tokenizer.truncation_side , '''right''' ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(_A ) self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_A ) UpperCAmelCase__ : int = AutoTokenizer.from_pretrained(_A ) self.assertIsInstance(_A , tokenizer.__class__ ) self.assertEqual(tokenizera.vocab_size , 12 ) def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained('''ctrl''' ) # There is no fast CTRL so this always gives us a slow tokenizer. self.assertIsInstance(_A , _A ) def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : str = get_tokenizer_config('''bert-base-cased''' ) UpperCAmelCase__ : Optional[int] = config.pop('''_commit_hash''' , _A ) # If we ever update bert-base-cased tokenizer config, this dict here will need to be updated. self.assertEqual(_A , {'''do_lower_case''': False} ) # This model does not have a tokenizer_config so we get back an empty dict. UpperCAmelCase__ : Tuple = get_tokenizer_config(_A ) self.assertDictEqual(_A , {} ) # A tokenizer saved with `save_pretrained` always creates a tokenizer config. UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(_A ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_A ) UpperCAmelCase__ : List[Any] = get_tokenizer_config(_A ) # Check the class of the tokenizer was properly saved (note that it always saves the slow class). self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' ) def lowercase_ ( self : Dict ): '''simple docstring''' try: AutoConfig.register('''custom''' , _A ) AutoTokenizer.register(_A , slow_tokenizer_class=_A ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_A ): AutoTokenizer.register(_A , slow_tokenizer_class=_A ) UpperCAmelCase__ : Optional[int] = CustomTokenizer.from_pretrained(_A ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_A ) UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(_A ) self.assertIsInstance(_A , _A ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] @require_tokenizers def lowercase_ ( self : Any ): '''simple docstring''' try: AutoConfig.register('''custom''' , _A ) # Can register in two steps AutoTokenizer.register(_A , slow_tokenizer_class=_A ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) ) AutoTokenizer.register(_A , fast_tokenizer_class=_A ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) del TOKENIZER_MAPPING._extra_content[CustomConfig] # Can register in one step AutoTokenizer.register( _A , slow_tokenizer_class=_A , fast_tokenizer_class=_A ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_A ): AutoTokenizer.register(_A , fast_tokenizer_class=_A ) # We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer # and that model does not have a tokenizer.json with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase__ : Any = BertTokenizerFast.from_pretrained(_A ) bert_tokenizer.save_pretrained(_A ) UpperCAmelCase__ : Optional[int] = CustomTokenizerFast.from_pretrained(_A ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_A ) UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(_A ) self.assertIsInstance(_A , _A ) UpperCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained(_A , use_fast=_A ) self.assertIsInstance(_A , _A ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def lowercase_ ( self : Optional[int] ): '''simple docstring''' with self.assertRaises(_A ): UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(_A ): UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A ) UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A ) self.assertTrue(tokenizer.special_attribute_present ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_A ) UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(_A , trust_remote_code=_A ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) # Test we can also load the slow version UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A , use_fast=_A ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_A ) UpperCAmelCase__ : Any = AutoTokenizer.from_pretrained(_A , trust_remote_code=_A , use_fast=_A ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) else: self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' ) @require_tokenizers def lowercase_ ( self : int ): '''simple docstring''' class lowerCamelCase_ ( __a ): lowerCAmelCase__ = False class lowerCamelCase_ ( __a ): lowerCAmelCase__ = NewTokenizer lowerCAmelCase__ = False try: AutoConfig.register('''custom''' , _A ) AutoTokenizer.register(_A , slow_tokenizer_class=_A ) AutoTokenizer.register(_A , fast_tokenizer_class=_A ) # If remote code is not set, the default is to use local UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertFalse(tokenizer.special_attribute_present ) UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=_A ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertFalse(tokenizer.special_attribute_present ) # If remote code is disabled, we load the local one. UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertFalse(tokenizer.special_attribute_present ) UpperCAmelCase__ : str = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A , use_fast=_A ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertFalse(tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertTrue(tokenizer.special_attribute_present ) UpperCAmelCase__ : Any = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A , use_fast=_A ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertTrue(tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=_A ) self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) # Test we can also load the slow version UpperCAmelCase__ : Optional[Any] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=_A , use_fast=_A ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) else: self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) def lowercase_ ( self : Tuple ): '''simple docstring''' with self.assertRaisesRegex( _A , '''bert-base is not a local folder and is not a valid model identifier''' ): UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained('''bert-base''' ) def lowercase_ ( self : Dict ): '''simple docstring''' with self.assertRaisesRegex( _A , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(_A , revision='''aaaaaa''' ) def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) with RequestCounter() as counter: UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
299
1
'''simple docstring''' import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .tokenization_wavaveca import WavaVecaCTCTokenizer class lowerCamelCase_ ( __a ): lowerCAmelCase__ = 'Wav2Vec2FeatureExtractor' lowerCAmelCase__ = 'AutoTokenizer' def __init__( self : List[Any] , _A : Union[str, Any] , _A : Optional[int] ): '''simple docstring''' super().__init__(_A , _A ) UpperCAmelCase__ : Optional[Any] = self.feature_extractor UpperCAmelCase__ : str = False @classmethod def lowercase_ ( cls : str , _A : Tuple , **_A : Optional[int] ): '''simple docstring''' try: return super().from_pretrained(_A , **_A ) except OSError: warnings.warn( f"""Loading a tokenizer inside {cls.__name__} from a config that does not""" ''' include a `tokenizer_class` attribute is deprecated and will be ''' '''removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`''' ''' attribute to either your `config.json` or `tokenizer_config.json` ''' '''file to suppress this warning: ''' , _A , ) UpperCAmelCase__ : Tuple = WavaVecaFeatureExtractor.from_pretrained(_A , **_A ) UpperCAmelCase__ : int = WavaVecaCTCTokenizer.from_pretrained(_A , **_A ) return cls(feature_extractor=_A , tokenizer=_A ) def __call__( self : int , *_A : Tuple , **_A : List[str] ): '''simple docstring''' if self._in_target_context_manager: return self.current_processor(*_A , **_A ) if "raw_speech" in kwargs: warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' ) UpperCAmelCase__ : Union[str, Any] = kwargs.pop('''raw_speech''' ) else: UpperCAmelCase__ : Any = kwargs.pop('''audio''' , _A ) UpperCAmelCase__ : Any = kwargs.pop('''sampling_rate''' , _A ) UpperCAmelCase__ : List[Any] = kwargs.pop('''text''' , _A ) if len(_A ) > 0: UpperCAmelCase__ : str = args[0] UpperCAmelCase__ : List[str] = args[1:] if audio is None and text is None: raise ValueError('''You need to specify either an `audio` or `text` input to process.''' ) if audio is not None: UpperCAmelCase__ : Tuple = self.feature_extractor(_A , *_A , sampling_rate=_A , **_A ) if text is not None: UpperCAmelCase__ : List[str] = self.tokenizer(_A , **_A ) if text is None: return inputs elif audio is None: return encodings else: UpperCAmelCase__ : Any = encodings['''input_ids'''] return inputs def lowercase_ ( self : Any , *_A : List[str] , **_A : Optional[Any] ): '''simple docstring''' if self._in_target_context_manager: return self.current_processor.pad(*_A , **_A ) UpperCAmelCase__ : Dict = kwargs.pop('''input_features''' , _A ) UpperCAmelCase__ : int = kwargs.pop('''labels''' , _A ) if len(_A ) > 0: UpperCAmelCase__ : Any = args[0] UpperCAmelCase__ : int = args[1:] if input_features is not None: UpperCAmelCase__ : Optional[Any] = self.feature_extractor.pad(_A , *_A , **_A ) if labels is not None: UpperCAmelCase__ : str = self.tokenizer.pad(_A , **_A ) if labels is None: return input_features elif input_features is None: return labels else: UpperCAmelCase__ : Union[str, Any] = labels['''input_ids'''] return input_features def lowercase_ ( self : Dict , *_A : Any , **_A : Union[str, Any] ): '''simple docstring''' return self.tokenizer.batch_decode(*_A , **_A ) def lowercase_ ( self : List[str] , *_A : Union[str, Any] , **_A : Optional[Any] ): '''simple docstring''' return self.tokenizer.decode(*_A , **_A ) @contextmanager def lowercase_ ( self : str ): '''simple docstring''' warnings.warn( '''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ''' '''labels by using the argument `text` of the regular `__call__` method (either in the same call as ''' '''your audio inputs, or in a separate call.''' ) UpperCAmelCase__ : Tuple = True UpperCAmelCase__ : Optional[int] = self.tokenizer yield UpperCAmelCase__ : Tuple = self.feature_extractor UpperCAmelCase__ : Union[str, Any] = False
299
'''simple docstring''' def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> float: UpperCAmelCase__ : Tuple = [redshift, radiation_density, matter_density, dark_energy] if any(p < 0 for p in parameters ): raise ValueError('''All input parameters must be positive''' ) if any(p > 1 for p in parameters[1:4] ): raise ValueError('''Relative densities cannot be greater than one''' ) else: UpperCAmelCase__ : List[str] = 1 - (matter_density + radiation_density + dark_energy) UpperCAmelCase__ : List[str] = ( radiation_density * (redshift + 1) ** 4 + matter_density * (redshift + 1) ** 3 + curvature * (redshift + 1) ** 2 + dark_energy ) UpperCAmelCase__ : Any = hubble_constant * e_a ** (1 / 2) return hubble if __name__ == "__main__": import doctest # run doctest doctest.testmod() # demo LCDM approximation UpperCamelCase__ = 0.3 print( hubble_parameter( hubble_constant=68.3, radiation_density=1e-4, matter_density=matter_density, dark_energy=1 - matter_density, redshift=0, ) )
299
1
'''simple docstring''' import heapq as hq import math from collections.abc import Iterator class lowerCamelCase_ : def __init__( self : str , _A : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : str = str(id_ ) UpperCAmelCase__ : int = None UpperCAmelCase__ : int = None UpperCAmelCase__ : Any = [] UpperCAmelCase__ : Any = {} # {vertex:distance} def __lt__( self : Optional[int] , _A : List[Any] ): '''simple docstring''' return self.key < other.key def __repr__( self : Optional[Any] ): '''simple docstring''' return self.id def lowercase_ ( self : Dict , _A : List[str] ): '''simple docstring''' self.neighbors.append(_A ) def lowercase_ ( self : Any , _A : Tuple , _A : List[str] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = weight def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]: # add the neighbors: graph[a - 1].add_neighbor(graph[b - 1] ) graph[b - 1].add_neighbor(graph[a - 1] ) # add the edges: graph[a - 1].add_edge(graph[b - 1] , lowerCAmelCase__ ) graph[b - 1].add_edge(graph[a - 1] , lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> list: UpperCAmelCase__ : Optional[int] = [] for u in graph: UpperCAmelCase__ : str = math.inf UpperCAmelCase__ : Optional[Any] = None UpperCAmelCase__ : str = 0 UpperCAmelCase__ : List[str] = graph[:] while q: UpperCAmelCase__ : Union[str, Any] = min(lowerCAmelCase__ ) q.remove(lowerCAmelCase__ ) for v in u.neighbors: if (v in q) and (u.edges[v.id] < v.key): UpperCAmelCase__ : List[str] = u UpperCAmelCase__ : List[str] = u.edges[v.id] for i in range(1 , len(lowerCAmelCase__ ) ): a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) ) return a def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Iterator[tuple]: for u in graph: UpperCAmelCase__ : List[str] = math.inf UpperCAmelCase__ : int = None UpperCAmelCase__ : Optional[Any] = 0 UpperCAmelCase__ : List[str] = list(lowerCAmelCase__ ) hq.heapify(lowerCAmelCase__ ) while h: UpperCAmelCase__ : List[str] = hq.heappop(lowerCAmelCase__ ) for v in u.neighbors: if (v in h) and (u.edges[v.id] < v.key): UpperCAmelCase__ : int = u UpperCAmelCase__ : Tuple = u.edges[v.id] hq.heapify(lowerCAmelCase__ ) for i in range(1 , len(lowerCAmelCase__ ) ): yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) def a__ ( ) -> None: pass if __name__ == "__main__": import doctest doctest.testmod()
299
'''simple docstring''' import gc import math import unittest import torch from diffusers import UNetaDModel from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin UpperCamelCase__ = logging.get_logger(__name__) enable_full_determinism() class lowerCamelCase_ ( __a , __a , unittest.TestCase ): lowerCAmelCase__ = UNetaDModel lowerCAmelCase__ = 'sample' @property def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = 4 UpperCAmelCase__ : str = 3 UpperCAmelCase__ : str = (32, 32) UpperCAmelCase__ : List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(_A ) UpperCAmelCase__ : Tuple = torch.tensor([10] ).to(_A ) return {"sample": noise, "timestep": time_step} @property def lowercase_ ( self : int ): '''simple docstring''' return (3, 32, 32) @property def lowercase_ ( self : Dict ): '''simple docstring''' return (3, 32, 32) def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = { '''block_out_channels''': (32, 64), '''down_block_types''': ('''DownBlock2D''', '''AttnDownBlock2D'''), '''up_block_types''': ('''AttnUpBlock2D''', '''UpBlock2D'''), '''attention_head_dim''': 3, '''out_channels''': 3, '''in_channels''': 3, '''layers_per_block''': 2, '''sample_size''': 32, } UpperCAmelCase__ : Tuple = self.dummy_input return init_dict, inputs_dict class lowerCamelCase_ ( __a , __a , unittest.TestCase ): lowerCAmelCase__ = UNetaDModel lowerCAmelCase__ = 'sample' @property def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : List[str] = 4 UpperCAmelCase__ : Dict = 4 UpperCAmelCase__ : List[str] = (32, 32) UpperCAmelCase__ : List[str] = floats_tensor((batch_size, num_channels) + sizes ).to(_A ) UpperCAmelCase__ : List[Any] = torch.tensor([10] ).to(_A ) return {"sample": noise, "timestep": time_step} @property def lowercase_ ( self : Tuple ): '''simple docstring''' return (4, 32, 32) @property def lowercase_ ( self : List[str] ): '''simple docstring''' return (4, 32, 32) def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : List[str] = { '''sample_size''': 32, '''in_channels''': 4, '''out_channels''': 4, '''layers_per_block''': 2, '''block_out_channels''': (32, 64), '''attention_head_dim''': 32, '''down_block_types''': ('''DownBlock2D''', '''DownBlock2D'''), '''up_block_types''': ('''UpBlock2D''', '''UpBlock2D'''), } UpperCAmelCase__ : Optional[Any] = self.dummy_input return init_dict, inputs_dict def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : int = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A ) self.assertIsNotNone(_A ) self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 ) model.to(_A ) UpperCAmelCase__ : Dict = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' ) def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Any = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A ) model.to(_A ) UpperCAmelCase__ : Dict = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' ) def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A ) model_accelerate.to(_A ) model_accelerate.eval() UpperCAmelCase__ : Tuple = torch.randn( 1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , ) UpperCAmelCase__ : Union[str, Any] = noise.to(_A ) UpperCAmelCase__ : Optional[Any] = torch.tensor([10] * noise.shape[0] ).to(_A ) UpperCAmelCase__ : Any = model_accelerate(_A , _A )['''sample'''] # two models don't need to stay in the device at the same time del model_accelerate torch.cuda.empty_cache() gc.collect() UpperCAmelCase__ , UpperCAmelCase__ : Dict = UNetaDModel.from_pretrained( '''fusing/unet-ldm-dummy-update''' , output_loading_info=_A , low_cpu_mem_usage=_A ) model_normal_load.to(_A ) model_normal_load.eval() UpperCAmelCase__ : Optional[int] = model_normal_load(_A , _A )['''sample'''] assert torch_all_close(_A , _A , rtol=1e-3 ) def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' ) model.eval() model.to(_A ) UpperCAmelCase__ : Union[str, Any] = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) UpperCAmelCase__ : str = noise.to(_A ) UpperCAmelCase__ : str = torch.tensor([10] * noise.shape[0] ).to(_A ) with torch.no_grad(): UpperCAmelCase__ : Optional[int] = model(_A , _A ).sample UpperCAmelCase__ : List[Any] = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off UpperCAmelCase__ : Tuple = torch.tensor([-1_3.3_2_5_8, -2_0.1_1_0_0, -1_5.9_8_7_3, -1_7.6_6_1_7, -2_3.0_5_9_6, -1_7.9_4_1_9, -1_3.3_6_7_5, -1_6.1_8_8_9, -1_2.3_8_0_0] ) # fmt: on self.assertTrue(torch_all_close(_A , _A , rtol=1e-3 ) ) class lowerCamelCase_ ( __a , __a , unittest.TestCase ): lowerCAmelCase__ = UNetaDModel lowerCAmelCase__ = 'sample' @property def lowercase_ ( self : Any , _A : str=(32, 32) ): '''simple docstring''' UpperCAmelCase__ : Tuple = 4 UpperCAmelCase__ : List[str] = 3 UpperCAmelCase__ : str = floats_tensor((batch_size, num_channels) + sizes ).to(_A ) UpperCAmelCase__ : Dict = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=_A ) return {"sample": noise, "timestep": time_step} @property def lowercase_ ( self : List[str] ): '''simple docstring''' return (3, 32, 32) @property def lowercase_ ( self : List[Any] ): '''simple docstring''' return (3, 32, 32) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : List[str] = { '''block_out_channels''': [32, 64, 64, 64], '''in_channels''': 3, '''layers_per_block''': 1, '''out_channels''': 3, '''time_embedding_type''': '''fourier''', '''norm_eps''': 1e-6, '''mid_block_scale_factor''': math.sqrt(2.0 ), '''norm_num_groups''': None, '''down_block_types''': [ '''SkipDownBlock2D''', '''AttnSkipDownBlock2D''', '''SkipDownBlock2D''', '''SkipDownBlock2D''', ], '''up_block_types''': [ '''SkipUpBlock2D''', '''SkipUpBlock2D''', '''AttnSkipUpBlock2D''', '''SkipUpBlock2D''', ], } UpperCAmelCase__ : Tuple = self.dummy_input return init_dict, inputs_dict @slow def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : str = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' , output_loading_info=_A ) self.assertIsNotNone(_A ) self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 ) model.to(_A ) UpperCAmelCase__ : List[str] = self.dummy_input UpperCAmelCase__ : Dict = floats_tensor((4, 3) + (256, 256) ).to(_A ) UpperCAmelCase__ : Optional[Any] = noise UpperCAmelCase__ : Any = model(**_A ) assert image is not None, "Make sure output is not None" @slow def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : int = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' ) model.to(_A ) UpperCAmelCase__ : Optional[Any] = 4 UpperCAmelCase__ : List[str] = 3 UpperCAmelCase__ : Dict = (256, 256) UpperCAmelCase__ : Optional[int] = torch.ones((batch_size, num_channels) + sizes ).to(_A ) UpperCAmelCase__ : Union[str, Any] = torch.tensor(batch_size * [1e-4] ).to(_A ) with torch.no_grad(): UpperCAmelCase__ : Optional[int] = model(_A , _A ).sample UpperCAmelCase__ : Any = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off UpperCAmelCase__ : Tuple = torch.tensor([-4_8_4_2.8_6_9_1, -6_4_9_9.6_6_3_1, -3_8_0_0.1_9_5_3, -7_9_7_8.2_6_8_6, -1_0_9_8_0.7_1_2_9, -2_0_0_2_8.8_5_3_5, 8_1_4_8.2_8_2_2, 2_3_4_2.2_9_0_5, 5_6_7.7_6_0_8] ) # fmt: on self.assertTrue(torch_all_close(_A , _A , rtol=1e-2 ) ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Dict = UNetaDModel.from_pretrained('''fusing/ncsnpp-ffhq-ve-dummy-update''' ) model.to(_A ) UpperCAmelCase__ : str = 4 UpperCAmelCase__ : Any = 3 UpperCAmelCase__ : int = (32, 32) UpperCAmelCase__ : Optional[Any] = torch.ones((batch_size, num_channels) + sizes ).to(_A ) UpperCAmelCase__ : Optional[Any] = torch.tensor(batch_size * [1e-4] ).to(_A ) with torch.no_grad(): UpperCAmelCase__ : int = model(_A , _A ).sample UpperCAmelCase__ : Dict = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off UpperCAmelCase__ : Any = torch.tensor([-0.0_3_2_5, -0.0_9_0_0, -0.0_8_6_9, -0.0_3_3_2, -0.0_7_2_5, -0.0_2_7_0, -0.0_1_0_1, 0.0_2_2_7, 0.0_2_5_6] ) # fmt: on self.assertTrue(torch_all_close(_A , _A , rtol=1e-2 ) ) def lowercase_ ( self : Tuple ): '''simple docstring''' pass
299
1
'''simple docstring''' from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class lowerCamelCase_ ( __a ): lowerCAmelCase__ = ['image_processor', 'tokenizer'] lowerCAmelCase__ = 'BlipImageProcessor' lowerCAmelCase__ = ('BertTokenizer', 'BertTokenizerFast') def __init__( self : Tuple , _A : Optional[Any] , _A : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = False super().__init__(_A , _A ) UpperCAmelCase__ : Tuple = self.image_processor def __call__( self : Union[str, Any] , _A : ImageInput = None , _A : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _A : bool = True , _A : Union[bool, str, PaddingStrategy] = False , _A : Union[bool, str, TruncationStrategy] = None , _A : Optional[int] = None , _A : int = 0 , _A : Optional[int] = None , _A : Optional[bool] = None , _A : bool = False , _A : bool = False , _A : bool = False , _A : bool = False , _A : bool = False , _A : bool = True , _A : Optional[Union[str, TensorType]] = None , **_A : List[str] , ): '''simple docstring''' if images is None and text is None: raise ValueError('''You have to specify either images or text.''' ) # Get only text if images is None: UpperCAmelCase__ : List[str] = self.tokenizer UpperCAmelCase__ : Any = self.tokenizer( text=_A , add_special_tokens=_A , padding=_A , truncation=_A , max_length=_A , stride=_A , pad_to_multiple_of=_A , return_attention_mask=_A , return_overflowing_tokens=_A , return_special_tokens_mask=_A , return_offsets_mapping=_A , return_token_type_ids=_A , return_length=_A , verbose=_A , return_tensors=_A , **_A , ) return text_encoding # add pixel_values UpperCAmelCase__ : Optional[Any] = self.image_processor(_A , return_tensors=_A ) if text is not None: UpperCAmelCase__ : Union[str, Any] = self.tokenizer( text=_A , add_special_tokens=_A , padding=_A , truncation=_A , max_length=_A , stride=_A , pad_to_multiple_of=_A , return_attention_mask=_A , return_overflowing_tokens=_A , return_special_tokens_mask=_A , return_offsets_mapping=_A , return_token_type_ids=_A , return_length=_A , verbose=_A , return_tensors=_A , **_A , ) else: UpperCAmelCase__ : int = None if text_encoding is not None: encoding_image_processor.update(_A ) return encoding_image_processor def lowercase_ ( self : Optional[int] , *_A : Optional[Any] , **_A : Union[str, Any] ): '''simple docstring''' return self.tokenizer.batch_decode(*_A , **_A ) def lowercase_ ( self : Tuple , *_A : Union[str, Any] , **_A : str ): '''simple docstring''' return self.tokenizer.decode(*_A , **_A ) @property def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Any = self.tokenizer.model_input_names UpperCAmelCase__ : Any = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
299
'''simple docstring''' from __future__ import annotations def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> tuple[float, list[float]]: UpperCAmelCase__ : Optional[Any] = list(range(len(lowerCAmelCase__ ) ) ) UpperCAmelCase__ : Optional[Any] = [v / w for v, w in zip(lowerCAmelCase__ , lowerCAmelCase__ )] index.sort(key=lambda lowerCAmelCase__ : ratio[i] , reverse=lowerCAmelCase__ ) UpperCAmelCase__ : float = 0 UpperCAmelCase__ : list[float] = [0] * len(lowerCAmelCase__ ) for i in index: if weight[i] <= capacity: UpperCAmelCase__ : List[str] = 1 max_value += value[i] capacity -= weight[i] else: UpperCAmelCase__ : Tuple = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
299
1
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetaImageProcessor class lowerCamelCase_ ( unittest.TestCase ): def __init__( self : List[str] , _A : List[Any] , _A : Union[str, Any]=7 , _A : List[str]=3 , _A : str=30 , _A : Tuple=400 , _A : Optional[int]=True , _A : List[str]=None , _A : int=True , _A : int=[0.5, 0.5, 0.5] , _A : Optional[int]=[0.5, 0.5, 0.5] , _A : List[Any]=True , _A : str=1 / 255 , _A : Tuple=True , ): '''simple docstring''' UpperCAmelCase__ : str = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1_333} UpperCAmelCase__ : Optional[Any] = parent UpperCAmelCase__ : Optional[Any] = batch_size UpperCAmelCase__ : List[str] = num_channels UpperCAmelCase__ : List[Any] = min_resolution UpperCAmelCase__ : List[str] = max_resolution UpperCAmelCase__ : Tuple = do_resize UpperCAmelCase__ : Union[str, Any] = size UpperCAmelCase__ : Dict = do_normalize UpperCAmelCase__ : Union[str, Any] = image_mean UpperCAmelCase__ : Optional[int] = image_std UpperCAmelCase__ : Dict = do_rescale UpperCAmelCase__ : Union[str, Any] = rescale_factor UpperCAmelCase__ : int = do_pad def lowercase_ ( self : Any ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def lowercase_ ( self : Any , _A : Union[str, Any] , _A : Union[str, Any]=False ): '''simple docstring''' if not batched: UpperCAmelCase__ : Optional[int] = image_inputs[0] if isinstance(_A , Image.Image ): UpperCAmelCase__ , UpperCAmelCase__ : str = image.size else: UpperCAmelCase__ , UpperCAmelCase__ : int = image.shape[1], image.shape[2] if w < h: UpperCAmelCase__ : Optional[Any] = int(self.size['''shortest_edge'''] * h / w ) UpperCAmelCase__ : List[Any] = self.size['''shortest_edge'''] elif w > h: UpperCAmelCase__ : int = self.size['''shortest_edge'''] UpperCAmelCase__ : Dict = int(self.size['''shortest_edge'''] * w / h ) else: UpperCAmelCase__ : List[str] = self.size['''shortest_edge'''] UpperCAmelCase__ : Dict = self.size['''shortest_edge'''] else: UpperCAmelCase__ : int = [] for image in image_inputs: UpperCAmelCase__ , UpperCAmelCase__ : str = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) UpperCAmelCase__ : Union[str, Any] = max(_A , key=lambda _A : item[0] )[0] UpperCAmelCase__ : Union[str, Any] = max(_A , key=lambda _A : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class lowerCamelCase_ ( __a , unittest.TestCase ): lowerCAmelCase__ = DetaImageProcessor if is_vision_available() else None def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : List[Any] = DetaImageProcessingTester(self ) @property def lowercase_ ( self : int ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_A , '''image_mean''' ) ) self.assertTrue(hasattr(_A , '''image_std''' ) ) self.assertTrue(hasattr(_A , '''do_normalize''' ) ) self.assertTrue(hasattr(_A , '''do_resize''' ) ) self.assertTrue(hasattr(_A , '''do_rescale''' ) ) self.assertTrue(hasattr(_A , '''do_pad''' ) ) self.assertTrue(hasattr(_A , '''size''' ) ) def lowercase_ ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1_333} ) self.assertEqual(image_processor.do_pad , _A ) def lowercase_ ( self : Dict ): '''simple docstring''' pass def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A ) for image in image_inputs: self.assertIsInstance(_A , Image.Image ) # Test not batched input UpperCAmelCase__ : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values UpperCAmelCase__ , UpperCAmelCase__ : int = self.image_processor_tester.get_expected_values(_A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase__ , UpperCAmelCase__ : str = self.image_processor_tester.get_expected_values(_A , batched=_A ) UpperCAmelCase__ : Union[str, Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A ) for image in image_inputs: self.assertIsInstance(_A , np.ndarray ) # Test not batched input UpperCAmelCase__ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.image_processor_tester.get_expected_values(_A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase__ : List[str] = image_processing(_A , return_tensors='''pt''' ).pixel_values UpperCAmelCase__ , UpperCAmelCase__ : int = self.image_processor_tester.get_expected_values(_A , batched=_A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A ) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor ) # Test not batched input UpperCAmelCase__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.image_processor_tester.get_expected_values(_A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase__ : List[Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values UpperCAmelCase__ , UpperCAmelCase__ : Any = self.image_processor_tester.get_expected_values(_A , batched=_A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def lowercase_ ( self : str ): '''simple docstring''' UpperCAmelCase__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f: UpperCAmelCase__ : str = json.loads(f.read() ) UpperCAmelCase__ : Tuple = {'''image_id''': 39_769, '''annotations''': target} # encode them UpperCAmelCase__ : Optional[int] = DetaImageProcessor() UpperCAmelCase__ : str = image_processing(images=_A , annotations=_A , return_tensors='''pt''' ) # verify pixel values UpperCAmelCase__ : Optional[int] = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding['''pixel_values'''].shape , _A ) UpperCAmelCase__ : Any = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _A , atol=1e-4 ) ) # verify area UpperCAmelCase__ : List[Any] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _A ) ) # verify boxes UpperCAmelCase__ : int = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _A ) UpperCAmelCase__ : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _A , atol=1e-3 ) ) # verify image_id UpperCAmelCase__ : str = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _A ) ) # verify is_crowd UpperCAmelCase__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _A ) ) # verify class_labels UpperCAmelCase__ : Union[str, Any] = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _A ) ) # verify orig_size UpperCAmelCase__ : int = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _A ) ) # verify size UpperCAmelCase__ : int = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _A ) ) @slow def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f: UpperCAmelCase__ : int = json.loads(f.read() ) UpperCAmelCase__ : str = {'''file_name''': '''000000039769.png''', '''image_id''': 39_769, '''segments_info''': target} UpperCAmelCase__ : Dict = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' ) # encode them UpperCAmelCase__ : Any = DetaImageProcessor(format='''coco_panoptic''' ) UpperCAmelCase__ : str = image_processing(images=_A , annotations=_A , masks_path=_A , return_tensors='''pt''' ) # verify pixel values UpperCAmelCase__ : str = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding['''pixel_values'''].shape , _A ) UpperCAmelCase__ : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _A , atol=1e-4 ) ) # verify area UpperCAmelCase__ : Any = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _A ) ) # verify boxes UpperCAmelCase__ : Dict = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _A ) UpperCAmelCase__ : List[str] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _A , atol=1e-3 ) ) # verify image_id UpperCAmelCase__ : Optional[int] = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _A ) ) # verify is_crowd UpperCAmelCase__ : Any = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _A ) ) # verify class_labels UpperCAmelCase__ : Tuple = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _A ) ) # verify masks UpperCAmelCase__ : Dict = 822_873 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , _A ) # verify orig_size UpperCAmelCase__ : str = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _A ) ) # verify size UpperCAmelCase__ : Optional[Any] = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _A ) )
299
'''simple docstring''' from ..utils import DummyObject, requires_backends class lowerCamelCase_ ( metaclass=__a ): lowerCAmelCase__ = ['torch', 'transformers', 'onnx'] def __init__( self : int , *_A : Tuple , **_A : Union[str, Any] ): '''simple docstring''' requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase_ ( cls : Union[str, Any] , *_A : List[Any] , **_A : Any ): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase_ ( cls : int , *_A : Union[str, Any] , **_A : Optional[Any] ): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) class lowerCamelCase_ ( metaclass=__a ): lowerCAmelCase__ = ['torch', 'transformers', 'onnx'] def __init__( self : Any , *_A : List[str] , **_A : Tuple ): '''simple docstring''' requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase_ ( cls : Tuple , *_A : Tuple , **_A : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase_ ( cls : List[Any] , *_A : List[str] , **_A : Dict ): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) class lowerCamelCase_ ( metaclass=__a ): lowerCAmelCase__ = ['torch', 'transformers', 'onnx'] def __init__( self : Dict , *_A : Any , **_A : int ): '''simple docstring''' requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase_ ( cls : List[Any] , *_A : List[Any] , **_A : Optional[int] ): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase_ ( cls : int , *_A : Dict , **_A : Optional[Any] ): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) class lowerCamelCase_ ( metaclass=__a ): lowerCAmelCase__ = ['torch', 'transformers', 'onnx'] def __init__( self : List[Any] , *_A : Optional[int] , **_A : Optional[Any] ): '''simple docstring''' requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase_ ( cls : Dict , *_A : Any , **_A : Tuple ): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase_ ( cls : int , *_A : Union[str, Any] , **_A : Dict ): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) class lowerCamelCase_ ( metaclass=__a ): lowerCAmelCase__ = ['torch', 'transformers', 'onnx'] def __init__( self : List[Any] , *_A : Optional[int] , **_A : Dict ): '''simple docstring''' requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase_ ( cls : Dict , *_A : str , **_A : Dict ): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase_ ( cls : Union[str, Any] , *_A : Optional[int] , **_A : int ): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) class lowerCamelCase_ ( metaclass=__a ): lowerCAmelCase__ = ['torch', 'transformers', 'onnx'] def __init__( self : Optional[Any] , *_A : Union[str, Any] , **_A : Dict ): '''simple docstring''' requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase_ ( cls : List[str] , *_A : str , **_A : List[str] ): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase_ ( cls : Dict , *_A : str , **_A : Any ): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
299
1
'''simple docstring''' import warnings from diffusers import StableDiffusionImgaImgPipeline # noqa F401 warnings.warn( '''The `image_to_image.py` script is outdated. Please use directly `from diffusers import''' ''' StableDiffusionImg2ImgPipeline` instead.''' )
299
'''simple docstring''' from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase__ = {'''configuration_mmbt''': ['''MMBTConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings'''] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
299
1
'''simple docstring''' def a__ ( lowerCAmelCase__ ) -> Optional[Any]: UpperCAmelCase__ : Optional[Any] = len(lowerCAmelCase__ ) for i in range(length - 1 ): UpperCAmelCase__ : Optional[Any] = i for k in range(i + 1 , lowerCAmelCase__ ): if collection[k] < collection[least]: UpperCAmelCase__ : Dict = k if least != i: UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = (collection[i], collection[least]) return collection if __name__ == "__main__": UpperCamelCase__ = input('''Enter numbers separated by a comma:\n''').strip() UpperCamelCase__ = [int(item) for item in user_input.split(''',''')] print(selection_sort(unsorted))
299
'''simple docstring''' import inspect import tempfile from collections import OrderedDict, UserDict from collections.abc import MutableMapping from contextlib import ExitStack, contextmanager from dataclasses import fields from enum import Enum from typing import Any, ContextManager, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy if is_flax_available(): import jax.numpy as jnp class lowerCamelCase_ ( __a ): def __get__( self : str , _A : Tuple , _A : List[str]=None ): '''simple docstring''' if obj is None: return self if self.fget is None: raise AttributeError('''unreadable attribute''' ) UpperCAmelCase__ : Union[str, Any] = '''__cached_''' + self.fget.__name__ UpperCAmelCase__ : Any = getattr(_A , _A , _A ) if cached is None: UpperCAmelCase__ : Dict = self.fget(_A ) setattr(_A , _A , _A ) return cached def a__ ( lowerCAmelCase__ ) -> Optional[int]: UpperCAmelCase__ : Tuple = val.lower() if val in {"y", "yes", "t", "true", "on", "1"}: return 1 if val in {"n", "no", "f", "false", "off", "0"}: return 0 raise ValueError(F"""invalid truth value {val!r}""" ) def a__ ( lowerCAmelCase__ ) -> Optional[Any]: if is_torch_fx_proxy(lowerCAmelCase__ ): return True if is_torch_available(): import torch if isinstance(lowerCAmelCase__ , torch.Tensor ): return True if is_tf_available(): import tensorflow as tf if isinstance(lowerCAmelCase__ , tf.Tensor ): return True if is_flax_available(): import jax.numpy as jnp from jax.core import Tracer if isinstance(lowerCAmelCase__ , (jnp.ndarray, Tracer) ): return True return isinstance(lowerCAmelCase__ , np.ndarray ) def a__ ( lowerCAmelCase__ ) -> Any: return isinstance(lowerCAmelCase__ , np.ndarray ) def a__ ( lowerCAmelCase__ ) -> int: return _is_numpy(lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ ) -> Optional[Any]: import torch return isinstance(lowerCAmelCase__ , torch.Tensor ) def a__ ( lowerCAmelCase__ ) -> List[str]: return False if not is_torch_available() else _is_torch(lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ ) -> Optional[Any]: import torch return isinstance(lowerCAmelCase__ , torch.device ) def a__ ( lowerCAmelCase__ ) -> List[str]: return False if not is_torch_available() else _is_torch_device(lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ ) -> Any: import torch if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): if hasattr(lowerCAmelCase__ , lowerCAmelCase__ ): UpperCAmelCase__ : Any = getattr(lowerCAmelCase__ , lowerCAmelCase__ ) else: return False return isinstance(lowerCAmelCase__ , torch.dtype ) def a__ ( lowerCAmelCase__ ) -> Optional[int]: return False if not is_torch_available() else _is_torch_dtype(lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ ) -> List[Any]: import tensorflow as tf return isinstance(lowerCAmelCase__ , tf.Tensor ) def a__ ( lowerCAmelCase__ ) -> List[str]: return False if not is_tf_available() else _is_tensorflow(lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ ) -> Any: import tensorflow as tf # the `is_symbolic_tensor` predicate is only available starting with TF 2.14 if hasattr(lowerCAmelCase__ , '''is_symbolic_tensor''' ): return tf.is_symbolic_tensor(lowerCAmelCase__ ) return type(lowerCAmelCase__ ) == tf.Tensor def a__ ( lowerCAmelCase__ ) -> Union[str, Any]: return False if not is_tf_available() else _is_tf_symbolic_tensor(lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ ) -> Tuple: import jax.numpy as jnp # noqa: F811 return isinstance(lowerCAmelCase__ , jnp.ndarray ) def a__ ( lowerCAmelCase__ ) -> List[Any]: return False if not is_flax_available() else _is_jax(lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ ) -> Tuple: if isinstance(lowerCAmelCase__ , (dict, UserDict) ): return {k: to_py_obj(lowerCAmelCase__ ) for k, v in obj.items()} elif isinstance(lowerCAmelCase__ , (list, tuple) ): return [to_py_obj(lowerCAmelCase__ ) for o in obj] elif is_tf_tensor(lowerCAmelCase__ ): return obj.numpy().tolist() elif is_torch_tensor(lowerCAmelCase__ ): return obj.detach().cpu().tolist() elif is_jax_tensor(lowerCAmelCase__ ): return np.asarray(lowerCAmelCase__ ).tolist() elif isinstance(lowerCAmelCase__ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays return obj.tolist() else: return obj def a__ ( lowerCAmelCase__ ) -> Tuple: if isinstance(lowerCAmelCase__ , (dict, UserDict) ): return {k: to_numpy(lowerCAmelCase__ ) for k, v in obj.items()} elif isinstance(lowerCAmelCase__ , (list, tuple) ): return np.array(lowerCAmelCase__ ) elif is_tf_tensor(lowerCAmelCase__ ): return obj.numpy() elif is_torch_tensor(lowerCAmelCase__ ): return obj.detach().cpu().numpy() elif is_jax_tensor(lowerCAmelCase__ ): return np.asarray(lowerCAmelCase__ ) else: return obj class lowerCamelCase_ ( __a ): def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : List[str] = fields(self ) # Safety and consistency checks if not len(_A ): raise ValueError(f"""{self.__class__.__name__} has no fields.""" ) if not all(field.default is None for field in class_fields[1:] ): raise ValueError(f"""{self.__class__.__name__} should not have more than one required field.""" ) UpperCAmelCase__ : Dict = getattr(self , class_fields[0].name ) UpperCAmelCase__ : Any = all(getattr(self , field.name ) is None for field in class_fields[1:] ) if other_fields_are_none and not is_tensor(_A ): if isinstance(_A , _A ): UpperCAmelCase__ : List[Any] = first_field.items() UpperCAmelCase__ : Optional[int] = True else: try: UpperCAmelCase__ : Optional[int] = iter(_A ) UpperCAmelCase__ : Optional[int] = True except TypeError: UpperCAmelCase__ : Optional[Any] = False # if we provided an iterator as first field and the iterator is a (key, value) iterator # set the associated fields if first_field_iterator: for idx, element in enumerate(_A ): if ( not isinstance(_A , (list, tuple) ) or not len(_A ) == 2 or not isinstance(element[0] , _A ) ): if idx == 0: # If we do not have an iterator of key/values, set it as attribute UpperCAmelCase__ : List[Any] = first_field else: # If we have a mixed iterator, raise an error raise ValueError( f"""Cannot set key/value for {element}. It needs to be a tuple (key, value).""" ) break setattr(self , element[0] , element[1] ) if element[1] is not None: UpperCAmelCase__ : List[str] = element[1] elif first_field is not None: UpperCAmelCase__ : Optional[Any] = first_field else: for field in class_fields: UpperCAmelCase__ : Optional[int] = getattr(self , field.name ) if v is not None: UpperCAmelCase__ : str = v def __delitem__( self : Union[str, Any] , *_A : Any , **_A : str ): '''simple docstring''' raise Exception(f"""You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.""" ) def lowercase_ ( self : Any , *_A : List[str] , **_A : Tuple ): '''simple docstring''' raise Exception(f"""You cannot use ``setdefault`` on a {self.__class__.__name__} instance.""" ) def lowercase_ ( self : Optional[Any] , *_A : Any , **_A : Tuple ): '''simple docstring''' raise Exception(f"""You cannot use ``pop`` on a {self.__class__.__name__} instance.""" ) def lowercase_ ( self : Optional[Any] , *_A : Dict , **_A : List[Any] ): '''simple docstring''' raise Exception(f"""You cannot use ``update`` on a {self.__class__.__name__} instance.""" ) def __getitem__( self : List[str] , _A : Any ): '''simple docstring''' if isinstance(_A , _A ): UpperCAmelCase__ : Union[str, Any] = dict(self.items() ) return inner_dict[k] else: return self.to_tuple()[k] def __setattr__( self : int , _A : Union[str, Any] , _A : str ): '''simple docstring''' if name in self.keys() and value is not None: # Don't call self.__setitem__ to avoid recursion errors super().__setitem__(_A , _A ) super().__setattr__(_A , _A ) def __setitem__( self : Any , _A : Optional[int] , _A : List[str] ): '''simple docstring''' super().__setitem__(_A , _A ) # Don't call self.__setattr__ to avoid recursion errors super().__setattr__(_A , _A ) def lowercase_ ( self : Optional[Any] ): '''simple docstring''' return tuple(self[k] for k in self.keys() ) class lowerCamelCase_ ( __a , __a ): @classmethod def lowercase_ ( cls : Optional[Any] , _A : Optional[Any] ): '''simple docstring''' raise ValueError( f"""{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}""" ) class lowerCamelCase_ ( __a ): lowerCAmelCase__ = 'longest' lowerCAmelCase__ = 'max_length' lowerCAmelCase__ = 'do_not_pad' class lowerCamelCase_ ( __a ): lowerCAmelCase__ = 'pt' lowerCAmelCase__ = 'tf' lowerCAmelCase__ = 'np' lowerCAmelCase__ = 'jax' class lowerCamelCase_ : def __init__( self : List[Any] , _A : List[ContextManager] ): '''simple docstring''' UpperCAmelCase__ : str = context_managers UpperCAmelCase__ : int = ExitStack() def __enter__( self : str ): '''simple docstring''' for context_manager in self.context_managers: self.stack.enter_context(_A ) def __exit__( self : Dict , *_A : List[Any] , **_A : str ): '''simple docstring''' self.stack.__exit__(*_A , **_A ) def a__ ( lowerCAmelCase__ ) -> Any: UpperCAmelCase__ : int = infer_framework(lowerCAmelCase__ ) if framework == "tf": UpperCAmelCase__ : Optional[Any] = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": UpperCAmelCase__ : List[Any] = inspect.signature(model_class.forward ) # PyTorch models else: UpperCAmelCase__ : List[Any] = inspect.signature(model_class.__call__ ) # Flax models for p in signature.parameters: if p == "return_loss" and signature.parameters[p].default is True: return True return False def a__ ( lowerCAmelCase__ ) -> Optional[int]: UpperCAmelCase__ : Dict = model_class.__name__ UpperCAmelCase__ : Union[str, Any] = infer_framework(lowerCAmelCase__ ) if framework == "tf": UpperCAmelCase__ : Tuple = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": UpperCAmelCase__ : List[str] = inspect.signature(model_class.forward ) # PyTorch models else: UpperCAmelCase__ : int = inspect.signature(model_class.__call__ ) # Flax models if "QuestionAnswering" in model_name: return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")] else: return [p for p in signature.parameters if "label" in p] def a__ ( lowerCAmelCase__ , lowerCAmelCase__ = "" , lowerCAmelCase__ = "." ) -> Any: def _flatten_dict(lowerCAmelCase__ , lowerCAmelCase__="" , lowerCAmelCase__="." ): for k, v in d.items(): UpperCAmelCase__ : int = str(lowerCAmelCase__ ) + delimiter + str(lowerCAmelCase__ ) if parent_key else k if v and isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): yield from flatten_dict(lowerCAmelCase__ , lowerCAmelCase__ , delimiter=lowerCAmelCase__ ).items() else: yield key, v return dict(_flatten_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) ) @contextmanager def a__ ( lowerCAmelCase__ , lowerCAmelCase__ = False ) -> int: if use_temp_dir: with tempfile.TemporaryDirectory() as tmp_dir: yield tmp_dir else: yield working_dir def a__ ( lowerCAmelCase__ , lowerCAmelCase__=None ) -> Optional[Any]: if is_numpy_array(lowerCAmelCase__ ): return np.transpose(lowerCAmelCase__ , axes=lowerCAmelCase__ ) elif is_torch_tensor(lowerCAmelCase__ ): return array.T if axes is None else array.permute(*lowerCAmelCase__ ) elif is_tf_tensor(lowerCAmelCase__ ): import tensorflow as tf return tf.transpose(lowerCAmelCase__ , perm=lowerCAmelCase__ ) elif is_jax_tensor(lowerCAmelCase__ ): return jnp.transpose(lowerCAmelCase__ , axes=lowerCAmelCase__ ) else: raise ValueError(F"""Type not supported for transpose: {type(lowerCAmelCase__ )}.""" ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple: if is_numpy_array(lowerCAmelCase__ ): return np.reshape(lowerCAmelCase__ , lowerCAmelCase__ ) elif is_torch_tensor(lowerCAmelCase__ ): return array.reshape(*lowerCAmelCase__ ) elif is_tf_tensor(lowerCAmelCase__ ): import tensorflow as tf return tf.reshape(lowerCAmelCase__ , lowerCAmelCase__ ) elif is_jax_tensor(lowerCAmelCase__ ): return jnp.reshape(lowerCAmelCase__ , lowerCAmelCase__ ) else: raise ValueError(F"""Type not supported for reshape: {type(lowerCAmelCase__ )}.""" ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__=None ) -> List[Any]: if is_numpy_array(lowerCAmelCase__ ): return np.squeeze(lowerCAmelCase__ , axis=lowerCAmelCase__ ) elif is_torch_tensor(lowerCAmelCase__ ): return array.squeeze() if axis is None else array.squeeze(dim=lowerCAmelCase__ ) elif is_tf_tensor(lowerCAmelCase__ ): import tensorflow as tf return tf.squeeze(lowerCAmelCase__ , axis=lowerCAmelCase__ ) elif is_jax_tensor(lowerCAmelCase__ ): return jnp.squeeze(lowerCAmelCase__ , axis=lowerCAmelCase__ ) else: raise ValueError(F"""Type not supported for squeeze: {type(lowerCAmelCase__ )}.""" ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]: if is_numpy_array(lowerCAmelCase__ ): return np.expand_dims(lowerCAmelCase__ , lowerCAmelCase__ ) elif is_torch_tensor(lowerCAmelCase__ ): return array.unsqueeze(dim=lowerCAmelCase__ ) elif is_tf_tensor(lowerCAmelCase__ ): import tensorflow as tf return tf.expand_dims(lowerCAmelCase__ , axis=lowerCAmelCase__ ) elif is_jax_tensor(lowerCAmelCase__ ): return jnp.expand_dims(lowerCAmelCase__ , axis=lowerCAmelCase__ ) else: raise ValueError(F"""Type not supported for expand_dims: {type(lowerCAmelCase__ )}.""" ) def a__ ( lowerCAmelCase__ ) -> int: if is_numpy_array(lowerCAmelCase__ ): return np.size(lowerCAmelCase__ ) elif is_torch_tensor(lowerCAmelCase__ ): return array.numel() elif is_tf_tensor(lowerCAmelCase__ ): import tensorflow as tf return tf.size(lowerCAmelCase__ ) elif is_jax_tensor(lowerCAmelCase__ ): return array.size else: raise ValueError(F"""Type not supported for expand_dims: {type(lowerCAmelCase__ )}.""" ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]: for key, value in auto_map.items(): if isinstance(lowerCAmelCase__ , (tuple, list) ): UpperCAmelCase__ : int = [F"""{repo_id}--{v}""" if (v is not None and '''--''' not in v) else v for v in value] elif value is not None and "--" not in value: UpperCAmelCase__ : str = F"""{repo_id}--{value}""" return auto_map def a__ ( lowerCAmelCase__ ) -> Tuple: for base_class in inspect.getmro(lowerCAmelCase__ ): UpperCAmelCase__ : Optional[int] = base_class.__module__ UpperCAmelCase__ : Optional[int] = base_class.__name__ if module.startswith('''tensorflow''' ) or module.startswith('''keras''' ) or name == "TFPreTrainedModel": return "tf" elif module.startswith('''torch''' ) or name == "PreTrainedModel": return "pt" elif module.startswith('''flax''' ) or module.startswith('''jax''' ) or name == "FlaxPreTrainedModel": return "flax" else: raise TypeError(F"""Could not infer framework from class {model_class}.""" )
299
1
'''simple docstring''' import argparse import gc import json import os import shutil import warnings import torch from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer try: from transformers import LlamaTokenizerFast except ImportError as e: warnings.warn(e) warnings.warn( '''The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion''' ) UpperCamelCase__ = None UpperCamelCase__ = { '''7B''': 1_1_0_0_8, '''13B''': 1_3_8_2_4, '''30B''': 1_7_9_2_0, '''65B''': 2_2_0_1_6, '''70B''': 2_8_6_7_2, } UpperCamelCase__ = { '''7B''': 1, '''7Bf''': 1, '''13B''': 2, '''13Bf''': 2, '''30B''': 4, '''65B''': 8, '''70B''': 8, '''70Bf''': 8, } def a__ ( lowerCAmelCase__ , lowerCAmelCase__=1 , lowerCAmelCase__=2_56 ) -> str: return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of) def a__ ( lowerCAmelCase__ ) -> Any: with open(lowerCAmelCase__ , '''r''' ) as f: return json.load(lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]: with open(lowerCAmelCase__ , '''w''' ) as f: json.dump(lowerCAmelCase__ , lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=True ) -> str: os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ ) UpperCAmelCase__ : Optional[int] = os.path.join(lowerCAmelCase__ , '''tmp''' ) os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ ) UpperCAmelCase__ : List[Any] = read_json(os.path.join(lowerCAmelCase__ , '''params.json''' ) ) UpperCAmelCase__ : int = NUM_SHARDS[model_size] UpperCAmelCase__ : Optional[Any] = params['''n_layers'''] UpperCAmelCase__ : Optional[int] = params['''n_heads'''] UpperCAmelCase__ : Optional[Any] = n_heads // num_shards UpperCAmelCase__ : Optional[int] = params['''dim'''] UpperCAmelCase__ : Optional[int] = dim // n_heads UpperCAmelCase__ : int = 1_0_0_0_0.0 UpperCAmelCase__ : Any = 1.0 / (base ** (torch.arange(0 , lowerCAmelCase__ , 2 ).float() / dims_per_head)) if "n_kv_heads" in params: UpperCAmelCase__ : Optional[int] = params['''n_kv_heads'''] # for GQA / MQA UpperCAmelCase__ : Tuple = n_heads_per_shard // num_key_value_heads UpperCAmelCase__ : Any = dim // num_key_value_heads else: # compatibility with other checkpoints UpperCAmelCase__ : Tuple = n_heads UpperCAmelCase__ : Tuple = n_heads_per_shard UpperCAmelCase__ : Optional[int] = dim # permute for sliced rotary def permute(lowerCAmelCase__ , lowerCAmelCase__=n_heads , lowerCAmelCase__=dim , lowerCAmelCase__=dim ): return w.view(lowerCAmelCase__ , dima // n_heads // 2 , 2 , lowerCAmelCase__ ).transpose(1 , 2 ).reshape(lowerCAmelCase__ , lowerCAmelCase__ ) print(F"""Fetching all parameters from the checkpoint at {input_base_path}.""" ) # Load weights if model_size == "7B": # Not sharded # (The sharded implementation would also work, but this is simpler.) UpperCAmelCase__ : str = torch.load(os.path.join(lowerCAmelCase__ , '''consolidated.00.pth''' ) , map_location='''cpu''' ) else: # Sharded UpperCAmelCase__ : int = [ torch.load(os.path.join(lowerCAmelCase__ , F"""consolidated.{i:02d}.pth""" ) , map_location='''cpu''' ) for i in range(lowerCAmelCase__ ) ] UpperCAmelCase__ : Optional[int] = 0 UpperCAmelCase__ : Union[str, Any] = {'''weight_map''': {}} for layer_i in range(lowerCAmelCase__ ): UpperCAmelCase__ : Optional[int] = F"""pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin""" if model_size == "7B": # Unsharded UpperCAmelCase__ : Any = { F"""model.layers.{layer_i}.self_attn.q_proj.weight""": permute( loaded[F"""layers.{layer_i}.attention.wq.weight"""] ), F"""model.layers.{layer_i}.self_attn.k_proj.weight""": permute( loaded[F"""layers.{layer_i}.attention.wk.weight"""] ), F"""model.layers.{layer_i}.self_attn.v_proj.weight""": loaded[F"""layers.{layer_i}.attention.wv.weight"""], F"""model.layers.{layer_i}.self_attn.o_proj.weight""": loaded[F"""layers.{layer_i}.attention.wo.weight"""], F"""model.layers.{layer_i}.mlp.gate_proj.weight""": loaded[F"""layers.{layer_i}.feed_forward.w1.weight"""], F"""model.layers.{layer_i}.mlp.down_proj.weight""": loaded[F"""layers.{layer_i}.feed_forward.w2.weight"""], F"""model.layers.{layer_i}.mlp.up_proj.weight""": loaded[F"""layers.{layer_i}.feed_forward.w3.weight"""], F"""model.layers.{layer_i}.input_layernorm.weight""": loaded[F"""layers.{layer_i}.attention_norm.weight"""], F"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[F"""layers.{layer_i}.ffn_norm.weight"""], } else: # Sharded # Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share # the same storage object, saving attention_norm and ffn_norm will save other weights too, which is # redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned. UpperCAmelCase__ : int = { F"""model.layers.{layer_i}.input_layernorm.weight""": loaded[0][ F"""layers.{layer_i}.attention_norm.weight""" ].clone(), F"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[0][ F"""layers.{layer_i}.ffn_norm.weight""" ].clone(), } UpperCAmelCase__ : Dict = permute( torch.cat( [ loaded[i][F"""layers.{layer_i}.attention.wq.weight"""].view(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) for i in range(lowerCAmelCase__ ) ] , dim=0 , ).reshape(lowerCAmelCase__ , lowerCAmelCase__ ) ) UpperCAmelCase__ : List[str] = permute( torch.cat( [ loaded[i][F"""layers.{layer_i}.attention.wk.weight"""].view( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) for i in range(lowerCAmelCase__ ) ] , dim=0 , ).reshape(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) UpperCAmelCase__ : Union[str, Any] = torch.cat( [ loaded[i][F"""layers.{layer_i}.attention.wv.weight"""].view( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) for i in range(lowerCAmelCase__ ) ] , dim=0 , ).reshape(lowerCAmelCase__ , lowerCAmelCase__ ) UpperCAmelCase__ : Any = torch.cat( [loaded[i][F"""layers.{layer_i}.attention.wo.weight"""] for i in range(lowerCAmelCase__ )] , dim=1 ) UpperCAmelCase__ : Tuple = torch.cat( [loaded[i][F"""layers.{layer_i}.feed_forward.w1.weight"""] for i in range(lowerCAmelCase__ )] , dim=0 ) UpperCAmelCase__ : Tuple = torch.cat( [loaded[i][F"""layers.{layer_i}.feed_forward.w2.weight"""] for i in range(lowerCAmelCase__ )] , dim=1 ) UpperCAmelCase__ : str = torch.cat( [loaded[i][F"""layers.{layer_i}.feed_forward.w3.weight"""] for i in range(lowerCAmelCase__ )] , dim=0 ) UpperCAmelCase__ : Dict = inv_freq for k, v in state_dict.items(): UpperCAmelCase__ : Any = filename param_count += v.numel() torch.save(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) ) UpperCAmelCase__ : Union[str, Any] = F"""pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin""" if model_size == "7B": # Unsharded UpperCAmelCase__ : Dict = { '''model.embed_tokens.weight''': loaded['''tok_embeddings.weight'''], '''model.norm.weight''': loaded['''norm.weight'''], '''lm_head.weight''': loaded['''output.weight'''], } else: UpperCAmelCase__ : str = { '''model.norm.weight''': loaded[0]['''norm.weight'''], '''model.embed_tokens.weight''': torch.cat( [loaded[i]['''tok_embeddings.weight'''] for i in range(lowerCAmelCase__ )] , dim=1 ), '''lm_head.weight''': torch.cat([loaded[i]['''output.weight'''] for i in range(lowerCAmelCase__ )] , dim=0 ), } for k, v in state_dict.items(): UpperCAmelCase__ : List[str] = filename param_count += v.numel() torch.save(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) ) # Write configs UpperCAmelCase__ : Optional[Any] = {'''total_size''': param_count * 2} write_json(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , '''pytorch_model.bin.index.json''' ) ) UpperCAmelCase__ : Any = params['''ffn_dim_multiplier'''] if '''ffn_dim_multiplier''' in params else 1 UpperCAmelCase__ : Union[str, Any] = params['''multiple_of'''] if '''multiple_of''' in params else 2_56 UpperCAmelCase__ : List[str] = LlamaConfig( hidden_size=lowerCAmelCase__ , intermediate_size=compute_intermediate_size(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , num_attention_heads=params['''n_heads'''] , num_hidden_layers=params['''n_layers'''] , rms_norm_eps=params['''norm_eps'''] , num_key_value_heads=lowerCAmelCase__ , ) config.save_pretrained(lowerCAmelCase__ ) # Make space so we can load the model properly now. del state_dict del loaded gc.collect() print('''Loading the checkpoint in a Llama model.''' ) UpperCAmelCase__ : Optional[Any] = LlamaForCausalLM.from_pretrained(lowerCAmelCase__ , torch_dtype=torch.floataa , low_cpu_mem_usage=lowerCAmelCase__ ) # Avoid saving this as part of the config. del model.config._name_or_path print('''Saving in the Transformers format.''' ) model.save_pretrained(lowerCAmelCase__ , safe_serialization=lowerCAmelCase__ ) shutil.rmtree(lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]: # Initialize the tokenizer based on the `spm` model UpperCAmelCase__ : Union[str, Any] = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast print(F"""Saving a {tokenizer_class.__name__} to {tokenizer_path}.""" ) UpperCAmelCase__ : List[str] = tokenizer_class(lowerCAmelCase__ ) tokenizer.save_pretrained(lowerCAmelCase__ ) def a__ ( ) -> Optional[Any]: UpperCAmelCase__ : Any = argparse.ArgumentParser() parser.add_argument( '''--input_dir''' , help='''Location of LLaMA weights, which contains tokenizer.model and model folders''' , ) parser.add_argument( '''--model_size''' , choices=['''7B''', '''7Bf''', '''13B''', '''13Bf''', '''30B''', '''65B''', '''70B''', '''70Bf''', '''tokenizer_only'''] , ) parser.add_argument( '''--output_dir''' , help='''Location to write HF model and tokenizer''' , ) parser.add_argument('''--safe_serialization''' , type=lowerCAmelCase__ , help='''Whether or not to save using `safetensors`.''' ) UpperCAmelCase__ : Union[str, Any] = parser.parse_args() if args.model_size != "tokenizer_only": write_model( model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , ) UpperCAmelCase__ : List[str] = os.path.join(args.input_dir , '''tokenizer.model''' ) write_tokenizer(args.output_dir , lowerCAmelCase__ ) if __name__ == "__main__": main()
299
'''simple docstring''' import argparse from typing import List import evaluate import numpy as np import torch from datasets import DatasetDict, load_dataset # New Code # # We'll be using StratifiedKFold for this example from sklearn.model_selection import StratifiedKFold from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to perform Cross Validation, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## UpperCamelCase__ = 1_6 UpperCamelCase__ = 3_2 def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 16 ) -> Dict: UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained('''bert-base-cased''' ) UpperCAmelCase__ : str = DatasetDict( { '''train''': dataset['''train'''].select(lowerCAmelCase__ ), '''validation''': dataset['''train'''].select(lowerCAmelCase__ ), '''test''': dataset['''validation'''], } ) def tokenize_function(lowerCAmelCase__ ): # max_length=None => use the model max length (it's actually the default) UpperCAmelCase__ : Optional[int] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): UpperCAmelCase__ : Dict = datasets.map( lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCAmelCase__ : int = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(lowerCAmelCase__ ): # On TPU it's best to pad everything to the same length or training will be very slow. UpperCAmelCase__ : Optional[Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": UpperCAmelCase__ : Any = 16 elif accelerator.mixed_precision != "no": UpperCAmelCase__ : Dict = 8 else: UpperCAmelCase__ : List[Any] = None return tokenizer.pad( lowerCAmelCase__ , padding='''longest''' , max_length=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_tensors='''pt''' , ) # Instantiate dataloaders. UpperCAmelCase__ : List[Any] = DataLoader( tokenized_datasets['''train'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ ) UpperCAmelCase__ : List[str] = DataLoader( tokenized_datasets['''validation'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ ) UpperCAmelCase__ : List[Any] = DataLoader( tokenized_datasets['''test'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ ) return train_dataloader, eval_dataloader, test_dataloader def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str: # New Code # UpperCAmelCase__ : List[str] = [] # Download the dataset UpperCAmelCase__ : Union[str, Any] = load_dataset('''glue''' , '''mrpc''' ) # Create our splits UpperCAmelCase__ : str = StratifiedKFold(n_splits=int(args.num_folds ) ) # Initialize accelerator UpperCAmelCase__ : Dict = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCAmelCase__ : Any = config['''lr'''] UpperCAmelCase__ : Any = int(config['''num_epochs'''] ) UpperCAmelCase__ : Any = int(config['''seed'''] ) UpperCAmelCase__ : Dict = int(config['''batch_size'''] ) UpperCAmelCase__ : Any = evaluate.load('''glue''' , '''mrpc''' ) # If the batch size is too big we use gradient accumulation UpperCAmelCase__ : Optional[Any] = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: UpperCAmelCase__ : Any = batch_size // MAX_GPU_BATCH_SIZE UpperCAmelCase__ : List[Any] = MAX_GPU_BATCH_SIZE set_seed(lowerCAmelCase__ ) # New Code # # Create our folds: UpperCAmelCase__ : Union[str, Any] = kfold.split(np.zeros(datasets['''train'''].num_rows ) , datasets['''train''']['''label'''] ) UpperCAmelCase__ : Dict = [] # Iterate over them for i, (train_idxs, valid_idxs) in enumerate(lowerCAmelCase__ ): UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = get_fold_dataloaders( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCAmelCase__ : List[str] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=lowerCAmelCase__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). UpperCAmelCase__ : Optional[Any] = model.to(accelerator.device ) # Instantiate optimizer UpperCAmelCase__ : Union[str, Any] = AdamW(params=model.parameters() , lr=lowerCAmelCase__ ) # Instantiate scheduler UpperCAmelCase__ : Any = get_linear_schedule_with_warmup( optimizer=lowerCAmelCase__ , num_warmup_steps=1_00 , num_training_steps=(len(lowerCAmelCase__ ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = accelerator.prepare( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # Now we train the model for epoch in range(lowerCAmelCase__ ): model.train() for step, batch in enumerate(lowerCAmelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) UpperCAmelCase__ : Union[str, Any] = model(**lowerCAmelCase__ ) UpperCAmelCase__ : Dict = outputs.loss UpperCAmelCase__ : Dict = loss / gradient_accumulation_steps accelerator.backward(lowerCAmelCase__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowerCAmelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCAmelCase__ : str = model(**lowerCAmelCase__ ) UpperCAmelCase__ : Any = outputs.logits.argmax(dim=-1 ) UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=lowerCAmelCase__ , references=lowerCAmelCase__ , ) UpperCAmelCase__ : str = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" , lowerCAmelCase__ ) # New Code # # We also run predictions on the test set at the very end UpperCAmelCase__ : int = [] for step, batch in enumerate(lowerCAmelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCAmelCase__ : str = model(**lowerCAmelCase__ ) UpperCAmelCase__ : Union[str, Any] = outputs.logits UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) fold_predictions.append(predictions.cpu() ) if i == 0: # We need all of the test predictions test_references.append(references.cpu() ) # Use accelerator.print to print only on the main process. test_predictions.append(torch.cat(lowerCAmelCase__ , dim=0 ) ) # We now need to release all our memory and get rid of the current model, optimizer, etc accelerator.free_memory() # New Code # # Finally we check the accuracy of our folded results: UpperCAmelCase__ : Union[str, Any] = torch.cat(lowerCAmelCase__ , dim=0 ) UpperCAmelCase__ : Tuple = torch.stack(lowerCAmelCase__ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 ) UpperCAmelCase__ : Optional[Any] = metric.compute(predictions=lowerCAmelCase__ , references=lowerCAmelCase__ ) accelerator.print('''Average test metrics from all folds:''' , lowerCAmelCase__ ) def a__ ( ) -> Any: UpperCAmelCase__ : Tuple = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) # New Code # parser.add_argument('''--num_folds''' , type=lowerCAmelCase__ , default=3 , help='''The number of splits to perform across the dataset''' ) UpperCAmelCase__ : Tuple = parser.parse_args() UpperCAmelCase__ : Any = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(lowerCAmelCase__ , lowerCAmelCase__ ) if __name__ == "__main__": main()
299
1
'''simple docstring''' import unittest import numpy as np import torch from torch import nn from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import enable_full_determinism, skip_mps from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowerCamelCase_ ( __a , unittest.TestCase ): lowerCAmelCase__ = KandinskyVaaPriorPipeline lowerCAmelCase__ = ['prompt'] lowerCAmelCase__ = ['prompt', 'negative_prompt'] lowerCAmelCase__ = [ 'num_images_per_prompt', 'generator', 'num_inference_steps', 'latents', 'negative_prompt', 'guidance_scale', 'output_type', 'return_dict', ] lowerCAmelCase__ = False @property def lowercase_ ( self : Dict ): '''simple docstring''' return 32 @property def lowercase_ ( self : Optional[int] ): '''simple docstring''' return 32 @property def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' return self.time_input_dim @property def lowercase_ ( self : int ): '''simple docstring''' return self.time_input_dim * 4 @property def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' return 100 @property def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) return tokenizer @property def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase__ : Optional[int] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) return CLIPTextModelWithProjection(_A ) @property def lowercase_ ( self : Optional[int] ): '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase__ : str = { '''num_attention_heads''': 2, '''attention_head_dim''': 12, '''embedding_dim''': self.text_embedder_hidden_size, '''num_layers''': 1, } UpperCAmelCase__ : List[Any] = PriorTransformer(**_A ) # clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0 UpperCAmelCase__ : Any = nn.Parameter(torch.ones(model.clip_std.shape ) ) return model @property def lowercase_ ( self : str ): '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase__ : Tuple = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , ) UpperCAmelCase__ : Any = CLIPVisionModelWithProjection(_A ) return model @property def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : int = CLIPImageProcessor( crop_size=224 , do_center_crop=_A , do_normalize=_A , do_resize=_A , image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , resample=3 , size=224 , ) return image_processor def lowercase_ ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : Dict = self.dummy_prior UpperCAmelCase__ : Optional[int] = self.dummy_image_encoder UpperCAmelCase__ : Dict = self.dummy_text_encoder UpperCAmelCase__ : Union[str, Any] = self.dummy_tokenizer UpperCAmelCase__ : Optional[int] = self.dummy_image_processor UpperCAmelCase__ : str = UnCLIPScheduler( variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1_000 , clip_sample=_A , clip_sample_range=1_0.0 , ) UpperCAmelCase__ : Tuple = { '''prior''': prior, '''image_encoder''': image_encoder, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''scheduler''': scheduler, '''image_processor''': image_processor, } return components def lowercase_ ( self : Tuple , _A : Dict , _A : Any=0 ): '''simple docstring''' if str(_A ).startswith('''mps''' ): UpperCAmelCase__ : Any = torch.manual_seed(_A ) else: UpperCAmelCase__ : str = torch.Generator(device=_A ).manual_seed(_A ) UpperCAmelCase__ : Union[str, Any] = { '''prompt''': '''horse''', '''generator''': generator, '''guidance_scale''': 4.0, '''num_inference_steps''': 2, '''output_type''': '''np''', } return inputs def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : str = '''cpu''' UpperCAmelCase__ : Union[str, Any] = self.get_dummy_components() UpperCAmelCase__ : int = self.pipeline_class(**_A ) UpperCAmelCase__ : List[Any] = pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase__ : Any = pipe(**self.get_dummy_inputs(_A ) ) UpperCAmelCase__ : List[Any] = output.image_embeds UpperCAmelCase__ : List[Any] = pipe( **self.get_dummy_inputs(_A ) , return_dict=_A , )[0] UpperCAmelCase__ : Dict = image[0, -10:] UpperCAmelCase__ : List[Any] = image_from_tuple[0, -10:] assert image.shape == (1, 32) UpperCAmelCase__ : Union[str, Any] = np.array( [-0.0_5_3_2, 1.7_1_2_0, 0.3_6_5_6, -1.0_8_5_2, -0.8_9_4_6, -1.1_7_5_6, 0.4_3_4_8, 0.2_4_8_2, 0.5_1_4_6, -0.1_1_5_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @skip_mps def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = torch_device == '''cpu''' UpperCAmelCase__ : Optional[Any] = True UpperCAmelCase__ : Union[str, Any] = False self._test_inference_batch_single_identical( test_max_difference=_A , relax_max_difference=_A , test_mean_pixel_difference=_A , ) @skip_mps def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = torch_device == '''cpu''' UpperCAmelCase__ : str = False self._test_attention_slicing_forward_pass( test_max_difference=_A , test_mean_pixel_difference=_A , )
299
'''simple docstring''' import json import os import shutil import tempfile import unittest from multiprocessing import get_context from pathlib import Path import datasets import numpy as np from datasets import load_dataset from parameterized import parameterized from transformers import AutoProcessor from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available from ..wavaveca.test_feature_extraction_wavaveca import floats_list if is_pyctcdecode_available(): from huggingface_hub import snapshot_download from pyctcdecode import BeamSearchDecoderCTC from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput if is_torch_available(): from transformers import WavaVecaForCTC @require_pyctcdecode class lowerCamelCase_ ( unittest.TestCase ): def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Any = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split() UpperCAmelCase__ : Optional[Any] = dict(zip(_A , range(len(_A ) ) ) ) UpperCAmelCase__ : Tuple = { '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>''', } UpperCAmelCase__ : Optional[int] = { '''feature_size''': 1, '''padding_value''': 0.0, '''sampling_rate''': 16_000, '''return_attention_mask''': False, '''do_normalize''': True, } UpperCAmelCase__ : Union[str, Any] = tempfile.mkdtemp() UpperCAmelCase__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) UpperCAmelCase__ : int = os.path.join(self.tmpdirname , _A ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_A ) + '''\n''' ) with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_A ) + '''\n''' ) # load decoder from hub UpperCAmelCase__ : Any = '''hf-internal-testing/ngram-beam-search-decoder''' def lowercase_ ( self : int , **_A : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Dict = self.add_kwargs_tokens_map.copy() kwargs.update(_A ) return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_A ) def lowercase_ ( self : str , **_A : Any ): '''simple docstring''' return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_A ) def lowercase_ ( self : str , **_A : Any ): '''simple docstring''' return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_A ) def lowercase_ ( self : Any ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.get_tokenizer() UpperCAmelCase__ : Any = self.get_feature_extractor() UpperCAmelCase__ : Tuple = self.get_decoder() UpperCAmelCase__ : Tuple = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase__ : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname ) # tokenizer self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , _A ) # feature extractor self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , _A ) # decoder self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels ) self.assertEqual( processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , ) self.assertIsInstance(processor.decoder , _A ) def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM( tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) processor.save_pretrained(self.tmpdirname ) # make sure that error is thrown when decoder alphabet doesn't match UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM.from_pretrained( self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 ) # decoder self.assertEqual(processor.language_model.alpha , 5.0 ) self.assertEqual(processor.language_model.beta , 3.0 ) self.assertEqual(processor.language_model.score_boundary , -7.0 ) self.assertEqual(processor.language_model.unk_score_offset , 3 ) def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.get_tokenizer() # add token to trigger raise tokenizer.add_tokens(['''xx'''] ) with self.assertRaisesRegex(_A , '''include''' ): WavaVecaProcessorWithLM( tokenizer=_A , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Any = self.get_feature_extractor() UpperCAmelCase__ : Optional[int] = self.get_tokenizer() UpperCAmelCase__ : Any = self.get_decoder() UpperCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A ) UpperCAmelCase__ : List[Any] = floats_list((3, 1_000) ) UpperCAmelCase__ : Dict = feature_extractor(_A , return_tensors='''np''' ) UpperCAmelCase__ : str = processor(_A , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.get_feature_extractor() UpperCAmelCase__ : str = self.get_tokenizer() UpperCAmelCase__ : str = self.get_decoder() UpperCAmelCase__ : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A ) UpperCAmelCase__ : Union[str, Any] = '''This is a test string''' UpperCAmelCase__ : Optional[int] = processor(text=_A ) UpperCAmelCase__ : List[str] = tokenizer(_A ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowercase_ ( self : Dict , _A : Optional[int]=(2, 10, 16) , _A : List[str]=77 ): '''simple docstring''' np.random.seed(_A ) return np.random.rand(*_A ) def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.get_feature_extractor() UpperCAmelCase__ : Union[str, Any] = self.get_tokenizer() UpperCAmelCase__ : Optional[Any] = self.get_decoder() UpperCAmelCase__ : Tuple = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A ) UpperCAmelCase__ : int = self._get_dummy_logits(shape=(10, 16) , seed=13 ) UpperCAmelCase__ : List[Any] = processor.decode(_A ) UpperCAmelCase__ : List[Any] = decoder.decode_beams(_A )[0] self.assertEqual(decoded_decoder[0] , decoded_processor.text ) self.assertEqual('''</s> <s> </s>''' , decoded_processor.text ) self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score ) self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score ) @parameterized.expand([[None], ['''fork'''], ['''spawn''']] ) def lowercase_ ( self : Any , _A : str ): '''simple docstring''' UpperCAmelCase__ : Any = self.get_feature_extractor() UpperCAmelCase__ : Tuple = self.get_tokenizer() UpperCAmelCase__ : Tuple = self.get_decoder() UpperCAmelCase__ : Any = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A ) UpperCAmelCase__ : Optional[Any] = self._get_dummy_logits() # note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM. # otherwise, the LM won't be available to the pool's sub-processes. # manual logic used to allow parameterized test for both pool=None and pool=Pool(...) if pool_context is None: UpperCAmelCase__ : Union[str, Any] = processor.batch_decode(_A ) else: with get_context(_A ).Pool() as pool: UpperCAmelCase__ : Union[str, Any] = processor.batch_decode(_A , _A ) UpperCAmelCase__ : str = list(_A ) with get_context('''fork''' ).Pool() as p: UpperCAmelCase__ : Dict = decoder.decode_beams_batch(_A , _A ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = [], [], [] for beams in decoded_beams: texts_decoder.append(beams[0][0] ) logit_scores_decoder.append(beams[0][-2] ) lm_scores_decoder.append(beams[0][-1] ) self.assertListEqual(_A , decoded_processor.text ) self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text ) self.assertListEqual(_A , decoded_processor.logit_score ) self.assertListEqual(_A , decoded_processor.lm_score ) def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : str = self.get_feature_extractor() UpperCAmelCase__ : List[Any] = self.get_tokenizer() UpperCAmelCase__ : int = self.get_decoder() UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A ) UpperCAmelCase__ : str = self._get_dummy_logits() UpperCAmelCase__ : Optional[int] = 15 UpperCAmelCase__ : Dict = -2_0.0 UpperCAmelCase__ : Optional[Any] = -4.0 UpperCAmelCase__ : Union[str, Any] = processor.batch_decode( _A , beam_width=_A , beam_prune_logp=_A , token_min_logp=_A , ) UpperCAmelCase__ : List[Any] = decoded_processor_out.text UpperCAmelCase__ : List[str] = list(_A ) with get_context('''fork''' ).Pool() as pool: UpperCAmelCase__ : Tuple = decoder.decode_beams_batch( _A , _A , beam_width=_A , beam_prune_logp=_A , token_min_logp=_A , ) UpperCAmelCase__ : Optional[int] = [d[0][0] for d in decoded_decoder_out] UpperCAmelCase__ : Optional[Any] = [d[0][2] for d in decoded_decoder_out] UpperCAmelCase__ : Optional[int] = [d[0][3] for d in decoded_decoder_out] self.assertListEqual(_A , _A ) self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , _A ) self.assertTrue(np.array_equal(_A , decoded_processor_out.logit_score ) ) self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] , _A , atol=1e-3 ) ) self.assertTrue(np.array_equal(_A , decoded_processor_out.lm_score ) ) self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] , _A , atol=1e-3 ) ) def lowercase_ ( self : str ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.get_feature_extractor() UpperCAmelCase__ : Optional[Any] = self.get_tokenizer() UpperCAmelCase__ : Dict = self.get_decoder() UpperCAmelCase__ : int = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A ) UpperCAmelCase__ : Optional[int] = self._get_dummy_logits() UpperCAmelCase__ : List[str] = 2.0 UpperCAmelCase__ : Union[str, Any] = 5.0 UpperCAmelCase__ : str = -2_0.0 UpperCAmelCase__ : Optional[int] = True UpperCAmelCase__ : Union[str, Any] = processor.batch_decode( _A , alpha=_A , beta=_A , unk_score_offset=_A , lm_score_boundary=_A , ) UpperCAmelCase__ : Union[str, Any] = decoded_processor_out.text UpperCAmelCase__ : Tuple = list(_A ) decoder.reset_params( alpha=_A , beta=_A , unk_score_offset=_A , lm_score_boundary=_A , ) with get_context('''fork''' ).Pool() as pool: UpperCAmelCase__ : Optional[Any] = decoder.decode_beams_batch( _A , _A , ) UpperCAmelCase__ : str = [d[0][0] for d in decoded_decoder_out] self.assertListEqual(_A , _A ) self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , _A ) UpperCAmelCase__ : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key] self.assertEqual(lm_model.alpha , 2.0 ) self.assertEqual(lm_model.beta , 5.0 ) self.assertEqual(lm_model.unk_score_offset , -2_0.0 ) self.assertEqual(lm_model.score_boundary , _A ) def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase__ : Dict = processor.decoder.model_container[processor.decoder._model_key] UpperCAmelCase__ : Optional[int] = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() UpperCAmelCase__ : Dict = os.listdir(_A ) UpperCAmelCase__ : Optional[Any] = ['''alphabet.json''', '''language_model'''] downloaded_decoder_files.sort() expected_decoder_files.sort() # test that only decoder relevant files from # https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main # are downloaded and none of the rest (e.g. README.md, ...) self.assertListEqual(_A , _A ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : str = snapshot_download('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase__ : Any = WavaVecaProcessorWithLM.from_pretrained(_A ) UpperCAmelCase__ : Optional[int] = processor.decoder.model_container[processor.decoder._model_key] UpperCAmelCase__ : str = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() UpperCAmelCase__ : List[str] = os.listdir(_A ) UpperCAmelCase__ : Any = os.listdir(_A ) local_decoder_files.sort() expected_decoder_files.sort() # test that both decoder form hub and local files in cache are the same self.assertListEqual(_A , _A ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Dict = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase__ : Dict = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase__ : Tuple = floats_list((3, 1_000) ) UpperCAmelCase__ : int = processor_wavaveca(_A , return_tensors='''np''' ) UpperCAmelCase__ : List[str] = processor_auto(_A , return_tensors='''np''' ) for key in input_wavaveca.keys(): self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 ) UpperCAmelCase__ : Tuple = self._get_dummy_logits() UpperCAmelCase__ : List[str] = processor_wavaveca.batch_decode(_A ) UpperCAmelCase__ : int = processor_auto.batch_decode(_A ) self.assertListEqual(decoded_wavaveca.text , decoded_auto.text ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : int = self.get_feature_extractor() UpperCAmelCase__ : int = self.get_tokenizer() UpperCAmelCase__ : Optional[Any] = self.get_decoder() UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A ) self.assertListEqual( processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , ) @staticmethod def lowercase_ ( _A : Dict , _A : str ): '''simple docstring''' UpperCAmelCase__ : int = [d[key] for d in offsets] return retrieved_list def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase__ : str = self._get_dummy_logits()[0] UpperCAmelCase__ : List[str] = processor.decode(_A , output_word_offsets=_A ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(_A , _A ) ) self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Any = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase__ : Dict = self._get_dummy_logits() UpperCAmelCase__ : Dict = processor.batch_decode(_A , output_word_offsets=_A ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(_A , _A ) ) self.assertListEqual( [''' '''.join(self.get_from_offsets(_A , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] ) @slow @require_torch @require_torchaudio def lowercase_ ( self : Optional[Any] ): '''simple docstring''' import torch UpperCAmelCase__ : Any = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=_A ) UpperCAmelCase__ : Dict = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=16_000 ) ) UpperCAmelCase__ : List[Any] = iter(_A ) UpperCAmelCase__ : Optional[Any] = next(_A ) UpperCAmelCase__ : Any = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) UpperCAmelCase__ : int = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) # compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train UpperCAmelCase__ : int = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values with torch.no_grad(): UpperCAmelCase__ : Dict = model(_A ).logits.cpu().numpy() UpperCAmelCase__ : int = processor.decode(logits[0] , output_word_offsets=_A ) UpperCAmelCase__ : Any = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate UpperCAmelCase__ : Any = [ { '''start_time''': d['''start_offset'''] * time_offset, '''end_time''': d['''end_offset'''] * time_offset, '''word''': d['''word'''], } for d in output['''word_offsets'''] ] UpperCAmelCase__ : int = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL''' # output words self.assertEqual(''' '''.join(self.get_from_offsets(_A , '''word''' ) ) , _A ) self.assertEqual(''' '''.join(self.get_from_offsets(_A , '''word''' ) ) , output.text ) # output times UpperCAmelCase__ : List[Any] = torch.tensor(self.get_from_offsets(_A , '''start_time''' ) ) UpperCAmelCase__ : List[str] = torch.tensor(self.get_from_offsets(_A , '''end_time''' ) ) # fmt: off UpperCAmelCase__ : int = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] ) UpperCAmelCase__ : List[str] = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] ) # fmt: on self.assertTrue(torch.allclose(_A , _A , atol=0.0_1 ) ) self.assertTrue(torch.allclose(_A , _A , atol=0.0_1 ) )
299
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase__ = { '''configuration_lilt''': ['''LILT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LiltConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = [ '''LILT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LiltForQuestionAnswering''', '''LiltForSequenceClassification''', '''LiltForTokenClassification''', '''LiltModel''', '''LiltPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_lilt import ( LILT_PRETRAINED_MODEL_ARCHIVE_LIST, LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, LiltPreTrainedModel, ) else: import sys UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
299
'''simple docstring''' # Logistic Regression from scratch # In[62]: # In[63]: # importing all the required libraries import numpy as np from matplotlib import pyplot as plt from sklearn import datasets def a__ ( lowerCAmelCase__ ) -> List[Any]: return 1 / (1 + np.exp(-z )) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple: return (-y * np.log(lowerCAmelCase__ ) - (1 - y) * np.log(1 - h )).mean() def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict: UpperCAmelCase__ : str = np.dot(lowerCAmelCase__ , lowerCAmelCase__ ) return np.sum(y * scores - np.log(1 + np.exp(lowerCAmelCase__ ) ) ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=7_00_00 ) -> List[Any]: UpperCAmelCase__ : Tuple = np.zeros(x.shape[1] ) for iterations in range(lowerCAmelCase__ ): UpperCAmelCase__ : List[Any] = np.dot(lowerCAmelCase__ , lowerCAmelCase__ ) UpperCAmelCase__ : List[str] = sigmoid_function(lowerCAmelCase__ ) UpperCAmelCase__ : int = np.dot(x.T , h - y ) / y.size UpperCAmelCase__ : Optional[int] = theta - alpha * gradient # updating the weights UpperCAmelCase__ : Dict = np.dot(lowerCAmelCase__ , lowerCAmelCase__ ) UpperCAmelCase__ : int = sigmoid_function(lowerCAmelCase__ ) UpperCAmelCase__ : Tuple = cost_function(lowerCAmelCase__ , lowerCAmelCase__ ) if iterations % 1_00 == 0: print(F"""loss: {j} \t""" ) # printing the loss after every 100 iterations return theta # In[68]: if __name__ == "__main__": UpperCamelCase__ = datasets.load_iris() UpperCamelCase__ = iris.data[:, :2] UpperCamelCase__ = (iris.target != 0) * 1 UpperCamelCase__ = 0.1 UpperCamelCase__ = logistic_reg(alpha, x, y, max_iterations=7_0_0_0_0) print('''theta: ''', theta) # printing the theta i.e our weights vector def a__ ( lowerCAmelCase__ ) -> Dict: return sigmoid_function( np.dot(lowerCAmelCase__ , lowerCAmelCase__ ) ) # predicting the value of probability from the logistic regression algorithm plt.figure(figsize=(1_0, 6)) plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''') plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''') ((UpperCamelCase__) , (UpperCamelCase__)) = (x[:, 0].min(), x[:, 0].max()) ((UpperCamelCase__) , (UpperCamelCase__)) = (x[:, 1].min(), x[:, 1].max()) ((UpperCamelCase__) , (UpperCamelCase__)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max)) UpperCamelCase__ = np.c_[xxa.ravel(), xxa.ravel()] UpperCamelCase__ = predict_prob(grid).reshape(xxa.shape) plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''') plt.legend() plt.show()
299
1
'''simple docstring''' import collections import os from typing import List, Optional, Tuple from transformers.utils import is_jieba_available, requires_backends if is_jieba_available(): import jieba from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = {'''vocab_file''': '''vocab.txt'''} UpperCamelCase__ = { '''vocab_file''': { '''openbmb/cpm-ant-10b''': '''https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt''', }, } UpperCamelCase__ = { '''openbmb/cpm-ant-10b''': 1_0_2_4, } def a__ ( lowerCAmelCase__ ) -> List[str]: UpperCAmelCase__ : Any = collections.OrderedDict() with open(lowerCAmelCase__ , '''r''' , encoding='''utf-8''' ) as reader: UpperCAmelCase__ : Tuple = reader.readlines() for index, token in enumerate(lowerCAmelCase__ ): UpperCAmelCase__ : int = token.rstrip('''\n''' ) UpperCAmelCase__ : Union[str, Any] = index return vocab class lowerCamelCase_ ( __a ): def __init__( self : List[Any] , _A : Optional[Any] , _A : Any="<unk>" , _A : Optional[Any]=200 ): '''simple docstring''' UpperCAmelCase__ : List[str] = vocab UpperCAmelCase__ : int = unk_token UpperCAmelCase__ : Optional[int] = max_input_chars_per_word def lowercase_ ( self : Optional[Any] , _A : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = list(_A ) if len(_A ) > self.max_input_chars_per_word: return [self.unk_token] UpperCAmelCase__ : Optional[Any] = 0 UpperCAmelCase__ : str = [] while start < len(_A ): UpperCAmelCase__ : Dict = len(_A ) UpperCAmelCase__ : List[Any] = None while start < end: UpperCAmelCase__ : int = ''''''.join(chars[start:end] ) if substr in self.vocab: UpperCAmelCase__ : str = substr break end -= 1 if cur_substr is None: sub_tokens.append(self.unk_token ) start += 1 else: sub_tokens.append(_A ) UpperCAmelCase__ : Optional[int] = end return sub_tokens class lowerCamelCase_ ( __a ): lowerCAmelCase__ = VOCAB_FILES_NAMES lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase__ = ['input_ids', 'attention_mask'] lowerCAmelCase__ = False def __init__( self : Dict , _A : Tuple , _A : Optional[int]="<d>" , _A : List[Any]="</d>" , _A : List[Any]="<s>" , _A : Optional[int]="</s>" , _A : Optional[int]="<pad>" , _A : Optional[Any]="<unk>" , _A : Tuple="</n>" , _A : Union[str, Any]="</_>" , _A : Union[str, Any]="left" , **_A : Dict , ): '''simple docstring''' requires_backends(self , ['''jieba'''] ) super().__init__( bod_token=_A , eod_token=_A , bos_token=_A , eos_token=_A , pad_token=_A , unk_token=_A , line_token=_A , space_token=_A , padding_side=_A , **_A , ) UpperCAmelCase__ : Any = bod_token UpperCAmelCase__ : List[Any] = eod_token UpperCAmelCase__ : Union[str, Any] = load_vocab(_A ) UpperCAmelCase__ : List[Any] = self.encoder[space_token] UpperCAmelCase__ : List[str] = self.encoder[line_token] del self.encoder[space_token] del self.encoder[line_token] UpperCAmelCase__ : Optional[Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _A : x[1] ) ) UpperCAmelCase__ : Tuple = {v: k for k, v in self.encoder.items()} UpperCAmelCase__ : List[Any] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token ) @property def lowercase_ ( self : int ): '''simple docstring''' return self.encoder[self.bod_token] @property def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' return self.encoder[self.eod_token] @property def lowercase_ ( self : str ): '''simple docstring''' return self.encoder["\n"] @property def lowercase_ ( self : List[Any] ): '''simple docstring''' return len(self.encoder ) def lowercase_ ( self : Optional[Any] ): '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def lowercase_ ( self : Union[str, Any] , _A : Dict ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = [] for x in jieba.cut(_A , cut_all=_A ): output_tokens.extend(self.wordpiece_tokenizer.tokenize(_A ) ) return output_tokens def lowercase_ ( self : Tuple , _A : Any , **_A : List[str] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = [i for i in token_ids if i >= 0] UpperCAmelCase__ : Union[str, Any] = [ x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id ] return super()._decode(_A , **_A ) def lowercase_ ( self : Union[str, Any] , _A : str ): '''simple docstring''' return token in self.encoder def lowercase_ ( self : Dict , _A : List[str] ): '''simple docstring''' return "".join(_A ) def lowercase_ ( self : Optional[Any] , _A : Union[str, Any] ): '''simple docstring''' return self.encoder.get(_A , self.encoder.get(self.unk_token ) ) def lowercase_ ( self : Union[str, Any] , _A : List[str] ): '''simple docstring''' return self.decoder.get(_A , self.unk_token ) def lowercase_ ( self : Tuple , _A : str , _A : Optional[str] = None ): '''simple docstring''' if os.path.isdir(_A ): UpperCAmelCase__ : Tuple = os.path.join( _A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) else: UpperCAmelCase__ : Dict = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory UpperCAmelCase__ : Optional[Any] = 0 if " " in self.encoder: UpperCAmelCase__ : Union[str, Any] = self.encoder[''' '''] del self.encoder[" "] if "\n" in self.encoder: UpperCAmelCase__ : Union[str, Any] = self.encoder['''\n'''] del self.encoder["\n"] UpperCAmelCase__ : Any = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _A : x[1] ) ) with open(_A , '''w''' , encoding='''utf-8''' ) as writer: for token, token_index in self.encoder.items(): if index != token_index: logger.warning( f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.""" ''' Please check that the vocabulary is not corrupted!''' ) UpperCAmelCase__ : List[Any] = token_index writer.write(token + '''\n''' ) index += 1 return (vocab_file,) def lowercase_ ( self : List[Any] , _A : List[int] , _A : List[int] = None ): '''simple docstring''' if token_ids_a is None: return [self.bos_token_id] + token_ids_a return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a def lowercase_ ( self : Dict , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A ) if token_ids_a is not None: return [1] + ([0] * len(_A )) + [1] + ([0] * len(_A )) return [1] + ([0] * len(_A ))
299
'''simple docstring''' from __future__ import annotations import copy import tempfile import unittest from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available from transformers.testing_utils import ( DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tensorflow_probability, require_tf, slow, ) from ..bert.test_modeling_bert import BertModelTester if is_tf_available(): from transformers import ( TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelForTableQuestionAnswering, TFAutoModelForTokenClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFFunnelBaseModel, TFFunnelModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, TFTapasForQuestionAnswering, ) from transformers.models.auto.modeling_tf_auto import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_MAPPING, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCamelCase_ ( __a ): lowerCAmelCase__ = 'new-model' if is_tf_available(): class lowerCamelCase_ ( __a ): lowerCAmelCase__ = NewModelConfig @require_tf class lowerCamelCase_ ( unittest.TestCase ): @slow def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : List[str] = '''bert-base-cased''' UpperCAmelCase__ : int = AutoConfig.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) UpperCAmelCase__ : Dict = TFAutoModel.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) @slow def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : str = '''bert-base-cased''' UpperCAmelCase__ : Any = AutoConfig.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) UpperCAmelCase__ : List[str] = TFAutoModelForPreTraining.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) @slow def lowercase_ ( self : int ): '''simple docstring''' for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : int = AutoConfig.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) UpperCAmelCase__ : str = TFAutoModelForCausalLM.from_pretrained(_A ) UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = TFAutoModelForCausalLM.from_pretrained(_A , output_loading_info=_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) @slow def lowercase_ ( self : List[Any] ): '''simple docstring''' for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : List[Any] = AutoConfig.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) UpperCAmelCase__ : List[Any] = TFAutoModelWithLMHead.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) @slow def lowercase_ ( self : Optional[Any] ): '''simple docstring''' for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : int = AutoConfig.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) UpperCAmelCase__ : List[Any] = TFAutoModelForMaskedLM.from_pretrained(_A ) UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = TFAutoModelForMaskedLM.from_pretrained(_A , output_loading_info=_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) @slow def lowercase_ ( self : Optional[int] ): '''simple docstring''' for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : Optional[Any] = AutoConfig.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) UpperCAmelCase__ : Dict = TFAutoModelForSeqaSeqLM.from_pretrained(_A ) UpperCAmelCase__ , UpperCAmelCase__ : Dict = TFAutoModelForSeqaSeqLM.from_pretrained(_A , output_loading_info=_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) @slow def lowercase_ ( self : Any ): '''simple docstring''' for model_name in ["bert-base-uncased"]: UpperCAmelCase__ : Any = AutoConfig.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) UpperCAmelCase__ : Any = TFAutoModelForSequenceClassification.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) @slow def lowercase_ ( self : Any ): '''simple docstring''' for model_name in ["bert-base-uncased"]: UpperCAmelCase__ : Optional[Any] = AutoConfig.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) UpperCAmelCase__ : Dict = TFAutoModelForQuestionAnswering.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) @slow @require_tensorflow_probability def lowercase_ ( self : Optional[int] ): '''simple docstring''' for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]: UpperCAmelCase__ : List[str] = AutoConfig.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) UpperCAmelCase__ : List[str] = TFAutoModelForTableQuestionAnswering.from_pretrained(_A ) UpperCAmelCase__ , UpperCAmelCase__ : Dict = TFAutoModelForTableQuestionAnswering.from_pretrained( _A , output_loading_info=_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : List[Any] = TFAutoModelWithLMHead.from_pretrained(_A ) self.assertIsInstance(_A , _A ) self.assertEqual(model.num_parameters() , 14_410 ) self.assertEqual(model.num_parameters(only_trainable=_A ) , 14_410 ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = TFAutoModelWithLMHead.from_pretrained(_A ) self.assertIsInstance(_A , _A ) self.assertEqual(model.num_parameters() , 14_410 ) self.assertEqual(model.num_parameters(only_trainable=_A ) , 14_410 ) def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : int = TFAutoModel.from_pretrained('''sgugger/funnel-random-tiny''' ) self.assertIsInstance(_A , _A ) UpperCAmelCase__ : Any = copy.deepcopy(model.config ) UpperCAmelCase__ : Tuple = ['''FunnelBaseModel'''] UpperCAmelCase__ : int = TFAutoModel.from_config(_A ) self.assertIsInstance(_A , _A ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(_A ) UpperCAmelCase__ : str = TFAutoModel.from_pretrained(_A ) self.assertIsInstance(_A , _A ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' try: AutoConfig.register('''new-model''' , _A ) UpperCAmelCase__ : List[Any] = [ TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSequenceClassification, TFAutoModelForTokenClassification, ] for auto_class in auto_classes: with self.subTest(auto_class.__name__ ): # Wrong config class will raise an error with self.assertRaises(_A ): auto_class.register(_A , _A ) auto_class.register(_A , _A ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_A ): auto_class.register(_A , _A ) # Now that the config is registered, it can be used as any other config with the auto-API UpperCAmelCase__ : Tuple = BertModelTester(self ).get_config() UpperCAmelCase__ : str = NewModelConfig(**tiny_config.to_dict() ) UpperCAmelCase__ : str = auto_class.from_config(_A ) self.assertIsInstance(_A , _A ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(_A ) UpperCAmelCase__ : str = auto_class.from_pretrained(_A ) self.assertIsInstance(_A , _A ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"] for mapping in ( TF_MODEL_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, ): if NewModelConfig in mapping._extra_content: del mapping._extra_content[NewModelConfig] def lowercase_ ( self : str ): '''simple docstring''' with self.assertRaisesRegex( _A , '''bert-base is not a local folder and is not a valid model identifier''' ): UpperCAmelCase__ : Dict = TFAutoModel.from_pretrained('''bert-base''' ) def lowercase_ ( self : Tuple ): '''simple docstring''' with self.assertRaisesRegex( _A , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): UpperCAmelCase__ : int = TFAutoModel.from_pretrained(_A , revision='''aaaaaa''' ) def lowercase_ ( self : Tuple ): '''simple docstring''' with self.assertRaisesRegex( _A , '''hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin''' , ): UpperCAmelCase__ : List[Any] = TFAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' with self.assertRaisesRegex(_A , '''Use `from_pt=True` to load this model''' ): UpperCAmelCase__ : int = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' ) def lowercase_ ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : List[str] = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) with RequestCounter() as counter: UpperCAmelCase__ : Union[str, Any] = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 ) # With a sharded checkpoint UpperCAmelCase__ : Optional[Any] = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' ) with RequestCounter() as counter: UpperCAmelCase__ : List[Any] = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
299
1
'''simple docstring''' import unittest from transformers import BigBirdConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax from transformers.models.big_bird.modeling_flax_big_bird import ( FlaxBigBirdForCausalLM, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForPreTraining, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, FlaxBigBirdModel, ) class lowerCamelCase_ ( unittest.TestCase ): def __init__( self : int , _A : Any , _A : List[Any]=2 , _A : Dict=56 , _A : Optional[Any]=True , _A : Union[str, Any]=True , _A : str=True , _A : Dict=True , _A : Optional[Any]=99 , _A : int=32 , _A : str=2 , _A : Optional[int]=2 , _A : List[Any]=7 , _A : Optional[Any]="gelu_new" , _A : Tuple=0.1 , _A : List[str]=0.1 , _A : Dict=512 , _A : int=16 , _A : int=2 , _A : Any=0.0_2 , _A : List[str]=4 , _A : int="block_sparse" , _A : List[str]=True , _A : str=False , _A : Optional[int]=2 , _A : Optional[int]=3 , ): '''simple docstring''' UpperCAmelCase__ : Dict = parent UpperCAmelCase__ : Any = batch_size UpperCAmelCase__ : Any = seq_length UpperCAmelCase__ : Dict = is_training UpperCAmelCase__ : List[str] = use_attention_mask UpperCAmelCase__ : Tuple = use_token_type_ids UpperCAmelCase__ : Any = use_labels UpperCAmelCase__ : Tuple = vocab_size UpperCAmelCase__ : Tuple = hidden_size UpperCAmelCase__ : int = num_hidden_layers UpperCAmelCase__ : List[str] = num_attention_heads UpperCAmelCase__ : Any = intermediate_size UpperCAmelCase__ : Tuple = hidden_act UpperCAmelCase__ : List[Any] = hidden_dropout_prob UpperCAmelCase__ : int = attention_probs_dropout_prob UpperCAmelCase__ : Dict = max_position_embeddings UpperCAmelCase__ : str = type_vocab_size UpperCAmelCase__ : Tuple = type_sequence_label_size UpperCAmelCase__ : Dict = initializer_range UpperCAmelCase__ : int = num_choices UpperCAmelCase__ : Optional[Any] = rescale_embeddings UpperCAmelCase__ : Optional[int] = attention_type UpperCAmelCase__ : Tuple = use_bias UpperCAmelCase__ : Dict = block_size UpperCAmelCase__ : Tuple = num_random_blocks def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase__ : List[str] = None if self.use_attention_mask: UpperCAmelCase__ : str = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase__ : Tuple = None if self.use_token_type_ids: UpperCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase__ : Optional[int] = BigBirdConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , ) return config, input_ids, token_type_ids, attention_mask def lowercase_ ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.prepare_config_and_inputs() UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = config_and_inputs UpperCAmelCase__ : Optional[Any] = { '''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask, } return config, inputs_dict @require_flax class lowerCamelCase_ ( __a , unittest.TestCase ): lowerCAmelCase__ = ( ( FlaxBigBirdForCausalLM, FlaxBigBirdModel, FlaxBigBirdForPreTraining, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, ) if is_flax_available() else () ) lowerCAmelCase__ = False lowerCAmelCase__ = False def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = FlaxBigBirdModelTester(self ) @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def lowercase_ ( self : Tuple ): '''simple docstring''' super().test_from_pretrained_save_pretrained() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' super().test_from_pretrained_with_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' super().test_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' super().test_hidden_states_output() @slow def lowercase_ ( self : Optional[Any] ): '''simple docstring''' for model_class_name in self.all_model_classes: UpperCAmelCase__ : List[Any] = model_class_name.from_pretrained('''google/bigbird-roberta-base''' ) self.assertIsNotNone(_A ) def lowercase_ ( self : str ): '''simple docstring''' if self.test_attn_probs: super().test_attention_outputs() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCAmelCase__ : Dict = self._prepare_for_class(_A , _A ) UpperCAmelCase__ : List[str] = model_class(_A ) @jax.jit def model_jitted(_A : str , _A : List[str]=None , **_A : str ): return model(input_ids=_A , attention_mask=_A , **_A ) with self.subTest('''JIT Enabled''' ): UpperCAmelCase__ : int = model_jitted(**_A ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): UpperCAmelCase__ : Optional[int] = model_jitted(**_A ).to_tuple() self.assertEqual(len(_A ) , len(_A ) ) for jitted_output, output in zip(_A , _A ): self.assertEqual(jitted_output.shape , output.shape ) def lowercase_ ( self : Optional[int] , _A : int , _A : Union[str, Any] , _A : Optional[int] , _A : Optional[Any]=1e-5 , _A : int="outputs" , _A : Dict=None ): '''simple docstring''' if name.startswith('''outputs.attentions''' ): return else: super().check_pt_flax_outputs(_A , _A , _A , _A , _A , _A )
299
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetaImageProcessor class lowerCamelCase_ ( unittest.TestCase ): def __init__( self : List[str] , _A : List[Any] , _A : Union[str, Any]=7 , _A : List[str]=3 , _A : str=30 , _A : Tuple=400 , _A : Optional[int]=True , _A : List[str]=None , _A : int=True , _A : int=[0.5, 0.5, 0.5] , _A : Optional[int]=[0.5, 0.5, 0.5] , _A : List[Any]=True , _A : str=1 / 255 , _A : Tuple=True , ): '''simple docstring''' UpperCAmelCase__ : str = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1_333} UpperCAmelCase__ : Optional[Any] = parent UpperCAmelCase__ : Optional[Any] = batch_size UpperCAmelCase__ : List[str] = num_channels UpperCAmelCase__ : List[Any] = min_resolution UpperCAmelCase__ : List[str] = max_resolution UpperCAmelCase__ : Tuple = do_resize UpperCAmelCase__ : Union[str, Any] = size UpperCAmelCase__ : Dict = do_normalize UpperCAmelCase__ : Union[str, Any] = image_mean UpperCAmelCase__ : Optional[int] = image_std UpperCAmelCase__ : Dict = do_rescale UpperCAmelCase__ : Union[str, Any] = rescale_factor UpperCAmelCase__ : int = do_pad def lowercase_ ( self : Any ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def lowercase_ ( self : Any , _A : Union[str, Any] , _A : Union[str, Any]=False ): '''simple docstring''' if not batched: UpperCAmelCase__ : Optional[int] = image_inputs[0] if isinstance(_A , Image.Image ): UpperCAmelCase__ , UpperCAmelCase__ : str = image.size else: UpperCAmelCase__ , UpperCAmelCase__ : int = image.shape[1], image.shape[2] if w < h: UpperCAmelCase__ : Optional[Any] = int(self.size['''shortest_edge'''] * h / w ) UpperCAmelCase__ : List[Any] = self.size['''shortest_edge'''] elif w > h: UpperCAmelCase__ : int = self.size['''shortest_edge'''] UpperCAmelCase__ : Dict = int(self.size['''shortest_edge'''] * w / h ) else: UpperCAmelCase__ : List[str] = self.size['''shortest_edge'''] UpperCAmelCase__ : Dict = self.size['''shortest_edge'''] else: UpperCAmelCase__ : int = [] for image in image_inputs: UpperCAmelCase__ , UpperCAmelCase__ : str = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) UpperCAmelCase__ : Union[str, Any] = max(_A , key=lambda _A : item[0] )[0] UpperCAmelCase__ : Union[str, Any] = max(_A , key=lambda _A : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class lowerCamelCase_ ( __a , unittest.TestCase ): lowerCAmelCase__ = DetaImageProcessor if is_vision_available() else None def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : List[Any] = DetaImageProcessingTester(self ) @property def lowercase_ ( self : int ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_A , '''image_mean''' ) ) self.assertTrue(hasattr(_A , '''image_std''' ) ) self.assertTrue(hasattr(_A , '''do_normalize''' ) ) self.assertTrue(hasattr(_A , '''do_resize''' ) ) self.assertTrue(hasattr(_A , '''do_rescale''' ) ) self.assertTrue(hasattr(_A , '''do_pad''' ) ) self.assertTrue(hasattr(_A , '''size''' ) ) def lowercase_ ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1_333} ) self.assertEqual(image_processor.do_pad , _A ) def lowercase_ ( self : Dict ): '''simple docstring''' pass def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A ) for image in image_inputs: self.assertIsInstance(_A , Image.Image ) # Test not batched input UpperCAmelCase__ : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values UpperCAmelCase__ , UpperCAmelCase__ : int = self.image_processor_tester.get_expected_values(_A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase__ , UpperCAmelCase__ : str = self.image_processor_tester.get_expected_values(_A , batched=_A ) UpperCAmelCase__ : Union[str, Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A ) for image in image_inputs: self.assertIsInstance(_A , np.ndarray ) # Test not batched input UpperCAmelCase__ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.image_processor_tester.get_expected_values(_A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase__ : List[str] = image_processing(_A , return_tensors='''pt''' ).pixel_values UpperCAmelCase__ , UpperCAmelCase__ : int = self.image_processor_tester.get_expected_values(_A , batched=_A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A ) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor ) # Test not batched input UpperCAmelCase__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.image_processor_tester.get_expected_values(_A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase__ : List[Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values UpperCAmelCase__ , UpperCAmelCase__ : Any = self.image_processor_tester.get_expected_values(_A , batched=_A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def lowercase_ ( self : str ): '''simple docstring''' UpperCAmelCase__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f: UpperCAmelCase__ : str = json.loads(f.read() ) UpperCAmelCase__ : Tuple = {'''image_id''': 39_769, '''annotations''': target} # encode them UpperCAmelCase__ : Optional[int] = DetaImageProcessor() UpperCAmelCase__ : str = image_processing(images=_A , annotations=_A , return_tensors='''pt''' ) # verify pixel values UpperCAmelCase__ : Optional[int] = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding['''pixel_values'''].shape , _A ) UpperCAmelCase__ : Any = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _A , atol=1e-4 ) ) # verify area UpperCAmelCase__ : List[Any] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _A ) ) # verify boxes UpperCAmelCase__ : int = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _A ) UpperCAmelCase__ : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _A , atol=1e-3 ) ) # verify image_id UpperCAmelCase__ : str = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _A ) ) # verify is_crowd UpperCAmelCase__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _A ) ) # verify class_labels UpperCAmelCase__ : Union[str, Any] = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _A ) ) # verify orig_size UpperCAmelCase__ : int = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _A ) ) # verify size UpperCAmelCase__ : int = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _A ) ) @slow def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f: UpperCAmelCase__ : int = json.loads(f.read() ) UpperCAmelCase__ : str = {'''file_name''': '''000000039769.png''', '''image_id''': 39_769, '''segments_info''': target} UpperCAmelCase__ : Dict = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' ) # encode them UpperCAmelCase__ : Any = DetaImageProcessor(format='''coco_panoptic''' ) UpperCAmelCase__ : str = image_processing(images=_A , annotations=_A , masks_path=_A , return_tensors='''pt''' ) # verify pixel values UpperCAmelCase__ : str = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding['''pixel_values'''].shape , _A ) UpperCAmelCase__ : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _A , atol=1e-4 ) ) # verify area UpperCAmelCase__ : Any = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _A ) ) # verify boxes UpperCAmelCase__ : Dict = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _A ) UpperCAmelCase__ : List[str] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _A , atol=1e-3 ) ) # verify image_id UpperCAmelCase__ : Optional[int] = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _A ) ) # verify is_crowd UpperCAmelCase__ : Any = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _A ) ) # verify class_labels UpperCAmelCase__ : Tuple = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _A ) ) # verify masks UpperCAmelCase__ : Dict = 822_873 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , _A ) # verify orig_size UpperCAmelCase__ : str = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _A ) ) # verify size UpperCAmelCase__ : Optional[Any] = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _A ) )
299
1
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_flax, require_tf, require_torch from transformers.utils import ( expand_dims, flatten_dict, is_flax_available, is_tf_available, is_torch_available, reshape, squeeze, transpose, ) if is_flax_available(): import jax.numpy as jnp if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch class lowerCamelCase_ ( unittest.TestCase ): def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : str = { '''task_specific_params''': { '''summarization''': {'''length_penalty''': 1.0, '''max_length''': 128, '''min_length''': 12, '''num_beams''': 4}, '''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 142, '''min_length''': 56, '''num_beams''': 4}, '''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 62, '''min_length''': 11, '''num_beams''': 6}, } } UpperCAmelCase__ : Union[str, Any] = { '''task_specific_params.summarization.length_penalty''': 1.0, '''task_specific_params.summarization.max_length''': 128, '''task_specific_params.summarization.min_length''': 12, '''task_specific_params.summarization.num_beams''': 4, '''task_specific_params.summarization_cnn.length_penalty''': 2.0, '''task_specific_params.summarization_cnn.max_length''': 142, '''task_specific_params.summarization_cnn.min_length''': 56, '''task_specific_params.summarization_cnn.num_beams''': 4, '''task_specific_params.summarization_xsum.length_penalty''': 1.0, '''task_specific_params.summarization_xsum.max_length''': 62, '''task_specific_params.summarization_xsum.min_length''': 11, '''task_specific_params.summarization_xsum.num_beams''': 6, } self.assertEqual(flatten_dict(_A ) , _A ) def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(transpose(_A ) , x.transpose() ) ) UpperCAmelCase__ : Optional[int] = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(transpose(_A , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) ) @require_torch def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = np.random.randn(3 , 4 ) UpperCAmelCase__ : List[str] = torch.tensor(_A ) self.assertTrue(np.allclose(transpose(_A ) , transpose(_A ).numpy() ) ) UpperCAmelCase__ : Optional[Any] = np.random.randn(3 , 4 , 5 ) UpperCAmelCase__ : Tuple = torch.tensor(_A ) self.assertTrue(np.allclose(transpose(_A , axes=(1, 2, 0) ) , transpose(_A , axes=(1, 2, 0) ).numpy() ) ) @require_tf def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = np.random.randn(3 , 4 ) UpperCAmelCase__ : List[Any] = tf.constant(_A ) self.assertTrue(np.allclose(transpose(_A ) , transpose(_A ).numpy() ) ) UpperCAmelCase__ : Union[str, Any] = np.random.randn(3 , 4 , 5 ) UpperCAmelCase__ : List[Any] = tf.constant(_A ) self.assertTrue(np.allclose(transpose(_A , axes=(1, 2, 0) ) , transpose(_A , axes=(1, 2, 0) ).numpy() ) ) @require_flax def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = np.random.randn(3 , 4 ) UpperCAmelCase__ : Union[str, Any] = jnp.array(_A ) self.assertTrue(np.allclose(transpose(_A ) , np.asarray(transpose(_A ) ) ) ) UpperCAmelCase__ : Tuple = np.random.randn(3 , 4 , 5 ) UpperCAmelCase__ : Optional[Any] = jnp.array(_A ) self.assertTrue(np.allclose(transpose(_A , axes=(1, 2, 0) ) , np.asarray(transpose(_A , axes=(1, 2, 0) ) ) ) ) def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Any = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(reshape(_A , (4, 3) ) , np.reshape(_A , (4, 3) ) ) ) UpperCAmelCase__ : str = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(reshape(_A , (12, 5) ) , np.reshape(_A , (12, 5) ) ) ) @require_torch def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = np.random.randn(3 , 4 ) UpperCAmelCase__ : Any = torch.tensor(_A ) self.assertTrue(np.allclose(reshape(_A , (4, 3) ) , reshape(_A , (4, 3) ).numpy() ) ) UpperCAmelCase__ : int = np.random.randn(3 , 4 , 5 ) UpperCAmelCase__ : Union[str, Any] = torch.tensor(_A ) self.assertTrue(np.allclose(reshape(_A , (12, 5) ) , reshape(_A , (12, 5) ).numpy() ) ) @require_tf def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : List[Any] = np.random.randn(3 , 4 ) UpperCAmelCase__ : List[str] = tf.constant(_A ) self.assertTrue(np.allclose(reshape(_A , (4, 3) ) , reshape(_A , (4, 3) ).numpy() ) ) UpperCAmelCase__ : List[Any] = np.random.randn(3 , 4 , 5 ) UpperCAmelCase__ : Optional[Any] = tf.constant(_A ) self.assertTrue(np.allclose(reshape(_A , (12, 5) ) , reshape(_A , (12, 5) ).numpy() ) ) @require_flax def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Dict = np.random.randn(3 , 4 ) UpperCAmelCase__ : Any = jnp.array(_A ) self.assertTrue(np.allclose(reshape(_A , (4, 3) ) , np.asarray(reshape(_A , (4, 3) ) ) ) ) UpperCAmelCase__ : Union[str, Any] = np.random.randn(3 , 4 , 5 ) UpperCAmelCase__ : Optional[Any] = jnp.array(_A ) self.assertTrue(np.allclose(reshape(_A , (12, 5) ) , np.asarray(reshape(_A , (12, 5) ) ) ) ) def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = np.random.randn(1 , 3 , 4 ) self.assertTrue(np.allclose(squeeze(_A ) , np.squeeze(_A ) ) ) UpperCAmelCase__ : List[Any] = np.random.randn(1 , 4 , 1 , 5 ) self.assertTrue(np.allclose(squeeze(_A , axis=2 ) , np.squeeze(_A , axis=2 ) ) ) @require_torch def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = np.random.randn(1 , 3 , 4 ) UpperCAmelCase__ : Union[str, Any] = torch.tensor(_A ) self.assertTrue(np.allclose(squeeze(_A ) , squeeze(_A ).numpy() ) ) UpperCAmelCase__ : int = np.random.randn(1 , 4 , 1 , 5 ) UpperCAmelCase__ : int = torch.tensor(_A ) self.assertTrue(np.allclose(squeeze(_A , axis=2 ) , squeeze(_A , axis=2 ).numpy() ) ) @require_tf def lowercase_ ( self : str ): '''simple docstring''' UpperCAmelCase__ : Dict = np.random.randn(1 , 3 , 4 ) UpperCAmelCase__ : List[str] = tf.constant(_A ) self.assertTrue(np.allclose(squeeze(_A ) , squeeze(_A ).numpy() ) ) UpperCAmelCase__ : Dict = np.random.randn(1 , 4 , 1 , 5 ) UpperCAmelCase__ : int = tf.constant(_A ) self.assertTrue(np.allclose(squeeze(_A , axis=2 ) , squeeze(_A , axis=2 ).numpy() ) ) @require_flax def lowercase_ ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = np.random.randn(1 , 3 , 4 ) UpperCAmelCase__ : List[str] = jnp.array(_A ) self.assertTrue(np.allclose(squeeze(_A ) , np.asarray(squeeze(_A ) ) ) ) UpperCAmelCase__ : Any = np.random.randn(1 , 4 , 1 , 5 ) UpperCAmelCase__ : Any = jnp.array(_A ) self.assertTrue(np.allclose(squeeze(_A , axis=2 ) , np.asarray(squeeze(_A , axis=2 ) ) ) ) def lowercase_ ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : Tuple = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(expand_dims(_A , axis=1 ) , np.expand_dims(_A , axis=1 ) ) ) @require_torch def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = np.random.randn(3 , 4 ) UpperCAmelCase__ : Optional[Any] = torch.tensor(_A ) self.assertTrue(np.allclose(expand_dims(_A , axis=1 ) , expand_dims(_A , axis=1 ).numpy() ) ) @require_tf def lowercase_ ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : Tuple = np.random.randn(3 , 4 ) UpperCAmelCase__ : Tuple = tf.constant(_A ) self.assertTrue(np.allclose(expand_dims(_A , axis=1 ) , expand_dims(_A , axis=1 ).numpy() ) ) @require_flax def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Dict = np.random.randn(3 , 4 ) UpperCAmelCase__ : Union[str, Any] = jnp.array(_A ) self.assertTrue(np.allclose(expand_dims(_A , axis=1 ) , np.asarray(expand_dims(_A , axis=1 ) ) ) )
299
'''simple docstring''' from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def a__ ( lowerCAmelCase__ ) -> None: UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = analyze_text(lowerCAmelCase__ ) UpperCAmelCase__ : List[Any] = list(''' ''' + ascii_lowercase ) # what is our total sum of probabilities. UpperCAmelCase__ : str = sum(single_char_strings.values() ) # one length string UpperCAmelCase__ : int = 0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: UpperCAmelCase__ : Optional[int] = single_char_strings[ch] UpperCAmelCase__ : int = my_str / all_sum my_fir_sum += prob * math.loga(lowerCAmelCase__ ) # entropy formula. # print entropy print(F"""{round(-1 * my_fir_sum ):.1f}""" ) # two len string UpperCAmelCase__ : str = sum(two_char_strings.values() ) UpperCAmelCase__ : Optional[Any] = 0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: UpperCAmelCase__ : Optional[int] = cha + cha if sequence in two_char_strings: UpperCAmelCase__ : Dict = two_char_strings[sequence] UpperCAmelCase__ : Optional[int] = int(lowerCAmelCase__ ) / all_sum my_sec_sum += prob * math.loga(lowerCAmelCase__ ) # print second entropy print(F"""{round(-1 * my_sec_sum ):.1f}""" ) # print the difference between them print(F"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" ) def a__ ( lowerCAmelCase__ ) -> tuple[dict, dict]: UpperCAmelCase__ : Union[str, Any] = Counter() # type: ignore UpperCAmelCase__ : Tuple = Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0 , len(lowerCAmelCase__ ) - 1 ): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def a__ ( ) -> Tuple: import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
299
1
'''simple docstring''' from typing import Callable, List, Optional, Tuple, Union import torch from transformers import CLIPTextModel, CLIPTokenizer from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin, TransformeraDModel, VQModel from ...schedulers import VQDiffusionScheduler from ...utils import logging from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name class lowerCamelCase_ ( __a , __a ): @register_to_config def __init__( self : List[Any] , _A : bool , _A : Optional[int] = None , _A : Optional[int] = None ): '''simple docstring''' super().__init__() UpperCAmelCase__ : str = learnable if self.learnable: assert hidden_size is not None, "learnable=True requires `hidden_size` to be set" assert length is not None, "learnable=True requires `length` to be set" UpperCAmelCase__ : Tuple = torch.zeros(_A , _A ) else: UpperCAmelCase__ : Optional[int] = None UpperCAmelCase__ : Optional[Any] = torch.nn.Parameter(_A ) class lowerCamelCase_ ( __a ): lowerCAmelCase__ = 42 lowerCAmelCase__ = 42 lowerCAmelCase__ = 42 lowerCAmelCase__ = 42 lowerCAmelCase__ = 42 lowerCAmelCase__ = 42 def __init__( self : Any , _A : VQModel , _A : CLIPTextModel , _A : CLIPTokenizer , _A : TransformeraDModel , _A : VQDiffusionScheduler , _A : LearnedClassifierFreeSamplingEmbeddings , ): '''simple docstring''' super().__init__() self.register_modules( vqvae=_A , transformer=_A , text_encoder=_A , tokenizer=_A , scheduler=_A , learned_classifier_free_sampling_embeddings=_A , ) def lowercase_ ( self : str , _A : Any , _A : int , _A : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = len(_A ) if isinstance(_A , _A ) else 1 # get prompt text embeddings UpperCAmelCase__ : int = self.tokenizer( _A , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , ) UpperCAmelCase__ : List[Any] = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: UpperCAmelCase__ : str = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( '''The following part of your input was truncated because CLIP can only handle sequences up to''' f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" ) UpperCAmelCase__ : Optional[Any] = text_input_ids[:, : self.tokenizer.model_max_length] UpperCAmelCase__ : str = self.text_encoder(text_input_ids.to(self.device ) )[0] # NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion. # While CLIP does normalize the pooled output of the text transformer when combining # the image and text embeddings, CLIP does not directly normalize the last hidden state. # # CLIP normalizing the pooled output. # https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053 UpperCAmelCase__ : Tuple = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=_A ) # duplicate text embeddings for each generation per prompt UpperCAmelCase__ : str = prompt_embeds.repeat_interleave(_A , dim=0 ) if do_classifier_free_guidance: if self.learned_classifier_free_sampling_embeddings.learnable: UpperCAmelCase__ : List[Any] = self.learned_classifier_free_sampling_embeddings.embeddings UpperCAmelCase__ : List[str] = negative_prompt_embeds.unsqueeze(0 ).repeat(_A , 1 , 1 ) else: UpperCAmelCase__ : Union[str, Any] = [''''''] * batch_size UpperCAmelCase__ : int = text_input_ids.shape[-1] UpperCAmelCase__ : str = self.tokenizer( _A , padding='''max_length''' , max_length=_A , truncation=_A , return_tensors='''pt''' , ) UpperCAmelCase__ : Union[str, Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # See comment for normalizing text embeddings UpperCAmelCase__ : List[Any] = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=_A ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method UpperCAmelCase__ : List[Any] = negative_prompt_embeds.shape[1] UpperCAmelCase__ : Optional[int] = negative_prompt_embeds.repeat(1 , _A , 1 ) UpperCAmelCase__ : Dict = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _A , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes UpperCAmelCase__ : List[str] = torch.cat([negative_prompt_embeds, prompt_embeds] ) return prompt_embeds @torch.no_grad() def __call__( self : Optional[int] , _A : Union[str, List[str]] , _A : int = 100 , _A : float = 5.0 , _A : float = 1.0 , _A : int = 1 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : Optional[torch.FloatTensor] = None , _A : Optional[str] = "pil" , _A : bool = True , _A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _A : int = 1 , ): '''simple docstring''' if isinstance(_A , _A ): UpperCAmelCase__ : Union[str, Any] = 1 elif isinstance(_A , _A ): UpperCAmelCase__ : Optional[int] = len(_A ) else: raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(_A )}""" ) UpperCAmelCase__ : Dict = batch_size * num_images_per_prompt UpperCAmelCase__ : Optional[Any] = guidance_scale > 1.0 UpperCAmelCase__ : Optional[Any] = self._encode_prompt(_A , _A , _A ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(_A , _A ) or callback_steps <= 0) ): raise ValueError( f"""`callback_steps` has to be a positive integer but is {callback_steps} of type""" f""" {type(_A )}.""" ) # get the initial completely masked latents unless the user supplied it UpperCAmelCase__ : int = (batch_size, self.transformer.num_latent_pixels) if latents is None: UpperCAmelCase__ : Dict = self.transformer.num_vector_embeds - 1 UpperCAmelCase__ : Dict = torch.full(_A , _A ).to(self.device ) else: if latents.shape != latents_shape: raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" ) if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any(): raise ValueError( '''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,''' f""" {self.transformer.num_vector_embeds - 1} (inclusive).""" ) UpperCAmelCase__ : List[Any] = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(_A , device=self.device ) UpperCAmelCase__ : Optional[Any] = self.scheduler.timesteps.to(self.device ) UpperCAmelCase__ : Optional[int] = latents for i, t in enumerate(self.progress_bar(_A ) ): # expand the sample if we are doing classifier free guidance UpperCAmelCase__ : int = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample # predict the un-noised image # model_output == `log_p_x_0` UpperCAmelCase__ : Tuple = self.transformer(_A , encoder_hidden_states=_A , timestep=_A ).sample if do_classifier_free_guidance: UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = model_output.chunk(2 ) UpperCAmelCase__ : List[Any] = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond) model_output -= torch.logsumexp(_A , dim=1 , keepdim=_A ) UpperCAmelCase__ : Any = self.truncate(_A , _A ) # remove `log(0)`'s (`-inf`s) UpperCAmelCase__ : Dict = model_output.clamp(-70 ) # compute the previous noisy sample x_t -> x_t-1 UpperCAmelCase__ : List[Any] = self.scheduler.step(_A , timestep=_A , sample=_A , generator=_A ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(_A , _A , _A ) UpperCAmelCase__ : Any = self.vqvae.config.vq_embed_dim UpperCAmelCase__ : Optional[Any] = (batch_size, self.transformer.height, self.transformer.width, embedding_channels) UpperCAmelCase__ : int = self.vqvae.quantize.get_codebook_entry(_A , shape=_A ) UpperCAmelCase__ : Optional[int] = self.vqvae.decode(_A , force_not_quantize=_A ).sample UpperCAmelCase__ : Dict = (image / 2 + 0.5).clamp(0 , 1 ) UpperCAmelCase__ : int = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": UpperCAmelCase__ : Optional[Any] = self.numpy_to_pil(_A ) if not return_dict: return (image,) return ImagePipelineOutput(images=_A ) def lowercase_ ( self : Tuple , _A : torch.FloatTensor , _A : float ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = torch.sort(_A , 1 , descending=_A ) UpperCAmelCase__ : Dict = torch.exp(_A ) UpperCAmelCase__ : Any = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate # Ensure that at least the largest probability is not zeroed out UpperCAmelCase__ : List[Any] = torch.full_like(keep_mask[:, 0:1, :] , _A ) UpperCAmelCase__ : Optional[Any] = torch.cat((all_true, keep_mask) , dim=1 ) UpperCAmelCase__ : Tuple = keep_mask[:, :-1, :] UpperCAmelCase__ : Optional[int] = keep_mask.gather(1 , indices.argsort(1 ) ) UpperCAmelCase__ : List[Any] = log_p_x_0.clone() UpperCAmelCase__ : Optional[Any] = -torch.inf # -inf = log(0) return rv
299
'''simple docstring''' from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_config_file''': '''tokenizer_config.json''', } UpperCamelCase__ = { '''vocab_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json''' }, '''merges_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt''' }, '''tokenizer_config_file''': { '''facebook/blenderbot_small-90M''': ( '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json''' ) }, } UpperCamelCase__ = { '''facebook/blenderbot_small-90M''': 5_1_2, } class lowerCamelCase_ ( __a ): lowerCAmelCase__ = VOCAB_FILES_NAMES lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase__ = BlenderbotSmallTokenizer def __init__( self : List[Any] , _A : List[Any]=None , _A : Optional[Any]=None , _A : Optional[int]="<|endoftext|>" , _A : List[str]="<|endoftext|>" , _A : List[str]="<|endoftext|>" , _A : Any=False , _A : Union[str, Any]=True , **_A : Optional[int] , ): '''simple docstring''' super().__init__( ByteLevelBPETokenizer( vocab=_A , merges=_A , add_prefix_space=_A , trim_offsets=_A , ) , bos_token=_A , eos_token=_A , unk_token=_A , **_A , ) UpperCAmelCase__ : List[Any] = add_prefix_space def lowercase_ ( self : str , _A : Any , _A : Any=None ): '''simple docstring''' UpperCAmelCase__ : Dict = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def lowercase_ ( self : Optional[int] , _A : List[int] , _A : Optional[List[int]] = None ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = [self.sep_token_id] UpperCAmelCase__ : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
299
1
'''simple docstring''' from math import ceil, sqrt def a__ ( lowerCAmelCase__ = 1_00_00_00 ) -> int: UpperCAmelCase__ : Optional[int] = 0 for outer_width in range(3 , (limit // 4) + 2 ): if outer_width**2 > limit: UpperCAmelCase__ : Dict = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 ) else: UpperCAmelCase__ : Union[str, Any] = 1 if (outer_width - hole_width_lower_bound) % 2: hole_width_lower_bound += 1 answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1 return answer if __name__ == "__main__": print(F"""{solution() = }""")
299
'''simple docstring''' import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece @require_tokenizers class lowerCamelCase_ ( __a , unittest.TestCase ): lowerCAmelCase__ = XLMRobertaTokenizer lowerCAmelCase__ = XLMRobertaTokenizerFast lowerCAmelCase__ = True lowerCAmelCase__ = True def lowercase_ ( self : Dict ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing UpperCAmelCase__ : Union[str, Any] = XLMRobertaTokenizer(_A , keep_accents=_A ) tokenizer.save_pretrained(self.tmpdirname ) def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = '''<pad>''' UpperCAmelCase__ : Dict = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A ) def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(vocab_keys[-1] , '''<mask>''' ) self.assertEqual(len(_A ) , 1_002 ) def lowercase_ ( self : int ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1_002 ) def lowercase_ ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : str = XLMRobertaTokenizer(_A , keep_accents=_A ) UpperCAmelCase__ : int = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) UpperCAmelCase__ : Union[str, Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( _A , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) UpperCAmelCase__ : Dict = tokenizer.convert_tokens_to_ids(_A ) self.assertListEqual( _A , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) UpperCAmelCase__ : Optional[int] = tokenizer.convert_ids_to_tokens(_A ) self.assertListEqual( _A , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) def lowercase_ ( self : str ): '''simple docstring''' if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return UpperCAmelCase__ : List[str] = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): UpperCAmelCase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(_A , **_A ) UpperCAmelCase__ : Optional[int] = self.tokenizer_class.from_pretrained(_A , **_A ) UpperCAmelCase__ : List[str] = tempfile.mkdtemp() UpperCAmelCase__ : Any = tokenizer_r.save_pretrained(_A ) UpperCAmelCase__ : Tuple = tokenizer_p.save_pretrained(_A ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) UpperCAmelCase__ : Optional[int] = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f ) self.assertSequenceEqual(_A , _A ) # Checks everything loads correctly in the same way UpperCAmelCase__ : Any = tokenizer_r.from_pretrained(_A ) UpperCAmelCase__ : Dict = tokenizer_p.from_pretrained(_A ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_A , _A ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(_A ) # Save tokenizer rust, legacy_format=True UpperCAmelCase__ : Union[str, Any] = tempfile.mkdtemp() UpperCAmelCase__ : Union[str, Any] = tokenizer_r.save_pretrained(_A , legacy_format=_A ) UpperCAmelCase__ : List[str] = tokenizer_p.save_pretrained(_A ) # Checks it save with the same files self.assertSequenceEqual(_A , _A ) # Checks everything loads correctly in the same way UpperCAmelCase__ : List[str] = tokenizer_r.from_pretrained(_A ) UpperCAmelCase__ : List[str] = tokenizer_p.from_pretrained(_A ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_A , _A ) ) shutil.rmtree(_A ) # Save tokenizer rust, legacy_format=False UpperCAmelCase__ : Union[str, Any] = tempfile.mkdtemp() UpperCAmelCase__ : Dict = tokenizer_r.save_pretrained(_A , legacy_format=_A ) UpperCAmelCase__ : str = tokenizer_p.save_pretrained(_A ) # Checks it saved the tokenizer.json file self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way UpperCAmelCase__ : Union[str, Any] = tokenizer_r.from_pretrained(_A ) UpperCAmelCase__ : Optional[Any] = tokenizer_p.from_pretrained(_A ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_A , _A ) ) shutil.rmtree(_A ) @cached_property def lowercase_ ( self : Optional[Any] ): '''simple docstring''' return XLMRobertaTokenizer.from_pretrained('''xlm-roberta-base''' ) def lowercase_ ( self : Any ): '''simple docstring''' with tempfile.NamedTemporaryFile() as f: shutil.copyfile(_A , f.name ) UpperCAmelCase__ : int = XLMRobertaTokenizer(f.name , keep_accents=_A ) UpperCAmelCase__ : str = pickle.dumps(_A ) pickle.loads(_A ) def lowercase_ ( self : int ): '''simple docstring''' if not self.test_rust_tokenizer: return UpperCAmelCase__ : Optional[Any] = self.get_tokenizer() UpperCAmelCase__ : Union[str, Any] = self.get_rust_tokenizer() UpperCAmelCase__ : Dict = '''I was born in 92000, and this is falsé.''' UpperCAmelCase__ : Dict = tokenizer.tokenize(_A ) UpperCAmelCase__ : List[Any] = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) UpperCAmelCase__ : int = tokenizer.encode(_A , add_special_tokens=_A ) UpperCAmelCase__ : Optional[Any] = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) UpperCAmelCase__ : Any = self.get_rust_tokenizer() UpperCAmelCase__ : List[Any] = tokenizer.encode(_A ) UpperCAmelCase__ : Union[str, Any] = rust_tokenizer.encode(_A ) self.assertListEqual(_A , _A ) @slow def lowercase_ ( self : str ): '''simple docstring''' UpperCAmelCase__ : str = '''Hello World!''' UpperCAmelCase__ : Tuple = [0, 35_378, 6_661, 38, 2] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(_A , self.big_tokenizer.encode(_A ) ) @slow def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : List[str] = ( '''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will''' ''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth''' ) UpperCAmelCase__ : Any = [ 0, 3_293, 83, 10, 4_552, 4_989, 7_986, 678, 10, 5_915, 111, 179_459, 124_850, 4, 6_044, 237, 12, 6, 5, 6, 4, 6_780, 705, 15, 1_388, 44, 378, 10_114, 711, 152, 20, 6, 5, 22_376, 642, 1_221, 15_190, 34_153, 450, 5_608, 959, 1_119, 57_702, 136, 186, 47, 1_098, 29_367, 47, # 4426, # What fairseq tokenizes from "<unk>": "_<" # 3678, # What fairseq tokenizes from "<unk>": "unk" # 2740, # What fairseq tokenizes from "<unk>": ">" 3, # What we tokenize from "<unk>": "<unk>" 6, # Residue from the tokenization: an extra sentencepiece underline 4, 6_044, 237, 6_284, 50_901, 528, 31, 90, 34, 927, 2, ] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(_A , self.big_tokenizer.encode(_A ) ) @slow def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : int = {'''input_ids''': [[0, 11_062, 82_772, 7, 15, 82_772, 538, 51_529, 237, 17_198, 1_290, 206, 9, 215_175, 1_314, 136, 17_198, 1_290, 206, 9, 56_359, 42, 122_009, 9, 16_466, 16, 87_344, 4_537, 9, 4_717, 78_381, 6, 159_958, 7, 15, 24_480, 618, 4, 527, 22_693, 5_428, 4, 2_777, 24_480, 9_874, 4, 43_523, 594, 4, 803, 18_392, 33_189, 18, 4, 43_523, 24_447, 12_399, 100, 24_955, 83_658, 9_626, 144_057, 15, 839, 22_335, 16, 136, 24_955, 83_658, 83_479, 15, 39_102, 724, 16, 678, 645, 2_789, 1_328, 4_589, 42, 122_009, 115_774, 23, 805, 1_328, 46_876, 7, 136, 53_894, 1_940, 42_227, 41_159, 17_721, 823, 425, 4, 27_512, 98_722, 206, 136, 5_531, 4_970, 919, 17_336, 5, 2], [0, 20_080, 618, 83, 82_775, 47, 479, 9, 1_517, 73, 53_894, 333, 80_581, 110_117, 18_811, 5_256, 1_295, 51, 152_526, 297, 7_986, 390, 124_416, 538, 35_431, 214, 98, 15_044, 25_737, 136, 7_108, 43_701, 23, 756, 135_355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63_773, 119_455, 6, 147_797, 88_203, 7, 645, 70, 21, 3_285, 10_269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_A , model_name='''xlm-roberta-base''' , revision='''d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3''' , )
299
1
'''simple docstring''' from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_config_file''': '''tokenizer_config.json''', } UpperCamelCase__ = { '''vocab_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json''' }, '''merges_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt''' }, '''tokenizer_config_file''': { '''facebook/blenderbot_small-90M''': ( '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json''' ) }, } UpperCamelCase__ = { '''facebook/blenderbot_small-90M''': 5_1_2, } class lowerCamelCase_ ( __a ): lowerCAmelCase__ = VOCAB_FILES_NAMES lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase__ = BlenderbotSmallTokenizer def __init__( self : List[Any] , _A : List[Any]=None , _A : Optional[Any]=None , _A : Optional[int]="<|endoftext|>" , _A : List[str]="<|endoftext|>" , _A : List[str]="<|endoftext|>" , _A : Any=False , _A : Union[str, Any]=True , **_A : Optional[int] , ): '''simple docstring''' super().__init__( ByteLevelBPETokenizer( vocab=_A , merges=_A , add_prefix_space=_A , trim_offsets=_A , ) , bos_token=_A , eos_token=_A , unk_token=_A , **_A , ) UpperCAmelCase__ : List[Any] = add_prefix_space def lowercase_ ( self : str , _A : Any , _A : Any=None ): '''simple docstring''' UpperCAmelCase__ : Dict = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def lowercase_ ( self : Optional[int] , _A : List[int] , _A : Optional[List[int]] = None ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = [self.sep_token_id] UpperCAmelCase__ : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
299
'''simple docstring''' from __future__ import annotations import math def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int: if depth < 0: raise ValueError('''Depth cannot be less than 0''' ) if not scores: raise ValueError('''Scores cannot be empty''' ) if depth == height: return scores[node_index] return ( max( minimax(depth + 1 , node_index * 2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , ) if is_max else min( minimax(depth + 1 , node_index * 2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , ) ) def a__ ( ) -> None: UpperCAmelCase__ : Union[str, Any] = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23] UpperCAmelCase__ : Optional[Any] = math.log(len(lowerCAmelCase__ ) , 2 ) print(F"""Optimal value : {minimax(0 , 0 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )}""" ) if __name__ == "__main__": import doctest doctest.testmod() main()
299
1
'''simple docstring''' import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = { '''b0''': efficientnet.EfficientNetBa, '''b1''': efficientnet.EfficientNetBa, '''b2''': efficientnet.EfficientNetBa, '''b3''': efficientnet.EfficientNetBa, '''b4''': efficientnet.EfficientNetBa, '''b5''': efficientnet.EfficientNetBa, '''b6''': efficientnet.EfficientNetBa, '''b7''': efficientnet.EfficientNetBa, } UpperCamelCase__ = { '''b0''': { '''hidden_dim''': 1_2_8_0, '''width_coef''': 1.0, '''depth_coef''': 1.0, '''image_size''': 2_2_4, '''dropout_rate''': 0.2, '''dw_padding''': [], }, '''b1''': { '''hidden_dim''': 1_2_8_0, '''width_coef''': 1.0, '''depth_coef''': 1.1, '''image_size''': 2_4_0, '''dropout_rate''': 0.2, '''dw_padding''': [1_6], }, '''b2''': { '''hidden_dim''': 1_4_0_8, '''width_coef''': 1.1, '''depth_coef''': 1.2, '''image_size''': 2_6_0, '''dropout_rate''': 0.3, '''dw_padding''': [5, 8, 1_6], }, '''b3''': { '''hidden_dim''': 1_5_3_6, '''width_coef''': 1.2, '''depth_coef''': 1.4, '''image_size''': 3_0_0, '''dropout_rate''': 0.3, '''dw_padding''': [5, 1_8], }, '''b4''': { '''hidden_dim''': 1_7_9_2, '''width_coef''': 1.4, '''depth_coef''': 1.8, '''image_size''': 3_8_0, '''dropout_rate''': 0.4, '''dw_padding''': [6], }, '''b5''': { '''hidden_dim''': 2_0_4_8, '''width_coef''': 1.6, '''depth_coef''': 2.2, '''image_size''': 4_5_6, '''dropout_rate''': 0.4, '''dw_padding''': [1_3, 2_7], }, '''b6''': { '''hidden_dim''': 2_3_0_4, '''width_coef''': 1.8, '''depth_coef''': 2.6, '''image_size''': 5_2_8, '''dropout_rate''': 0.5, '''dw_padding''': [3_1], }, '''b7''': { '''hidden_dim''': 2_5_6_0, '''width_coef''': 2.0, '''depth_coef''': 3.1, '''image_size''': 6_0_0, '''dropout_rate''': 0.5, '''dw_padding''': [1_8], }, } def a__ ( lowerCAmelCase__ ) -> Tuple: UpperCAmelCase__ : List[str] = EfficientNetConfig() UpperCAmelCase__ : List[Any] = CONFIG_MAP[model_name]['''hidden_dim'''] UpperCAmelCase__ : Dict = CONFIG_MAP[model_name]['''width_coef'''] UpperCAmelCase__ : Dict = CONFIG_MAP[model_name]['''depth_coef'''] UpperCAmelCase__ : Optional[int] = CONFIG_MAP[model_name]['''image_size'''] UpperCAmelCase__ : Dict = CONFIG_MAP[model_name]['''dropout_rate'''] UpperCAmelCase__ : Optional[int] = CONFIG_MAP[model_name]['''dw_padding'''] UpperCAmelCase__ : List[str] = '''huggingface/label-files''' UpperCAmelCase__ : List[Any] = '''imagenet-1k-id2label.json''' UpperCAmelCase__ : Optional[Any] = 10_00 UpperCAmelCase__ : Any = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type='''dataset''' ) , '''r''' ) ) UpperCAmelCase__ : Optional[int] = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()} UpperCAmelCase__ : List[str] = idalabel UpperCAmelCase__ : Tuple = {v: k for k, v in idalabel.items()} return config def a__ ( ) -> int: UpperCAmelCase__ : List[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg''' UpperCAmelCase__ : Dict = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw ) return im def a__ ( lowerCAmelCase__ ) -> List[str]: UpperCAmelCase__ : Union[str, Any] = CONFIG_MAP[model_name]['''image_size'''] UpperCAmelCase__ : Any = EfficientNetImageProcessor( size={'''height''': size, '''width''': size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=lowerCAmelCase__ , ) return preprocessor def a__ ( lowerCAmelCase__ ) -> List[str]: UpperCAmelCase__ : Union[str, Any] = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )] UpperCAmelCase__ : Union[str, Any] = sorted(set(lowerCAmelCase__ ) ) UpperCAmelCase__ : Dict = len(lowerCAmelCase__ ) UpperCAmelCase__ : Dict = {b: str(lowerCAmelCase__ ) for b, i in zip(lowerCAmelCase__ , range(lowerCAmelCase__ ) )} UpperCAmelCase__ : Optional[int] = [] rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') ) rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') ) rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') ) rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') ) rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') ) for b in block_names: UpperCAmelCase__ : List[str] = block_name_mapping[b] rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") ) rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") ) rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") ) rename_keys.append( (F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") ) rename_keys.append( (F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") ) rename_keys.append( (F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") ) rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") ) rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") ) rename_keys.append( (F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") ) rename_keys.append( (F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") ) rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") ) rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") ) rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") ) rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") ) rename_keys.append( (F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") ) rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") ) rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") ) rename_keys.append( (F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") ) rename_keys.append( (F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") ) rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') ) rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') ) rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') ) rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') ) rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') ) UpperCAmelCase__ : Dict = {} for item in rename_keys: if item[0] in original_param_names: UpperCAmelCase__ : Optional[Any] = '''efficientnet.''' + item[1] UpperCAmelCase__ : Optional[Any] = '''classifier.weight''' UpperCAmelCase__ : Optional[int] = '''classifier.bias''' return key_mapping def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]: for key, value in tf_params.items(): if "normalization" in key: continue UpperCAmelCase__ : List[Any] = key_mapping[key] if "_conv" in key and "kernel" in key: UpperCAmelCase__ : Any = torch.from_numpy(lowerCAmelCase__ ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: UpperCAmelCase__ : List[str] = torch.from_numpy(lowerCAmelCase__ ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: UpperCAmelCase__ : str = torch.from_numpy(np.transpose(lowerCAmelCase__ ) ) else: UpperCAmelCase__ : List[Any] = torch.from_numpy(lowerCAmelCase__ ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(lowerCAmelCase__ ) @torch.no_grad() def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any: UpperCAmelCase__ : List[str] = model_classes[model_name]( include_top=lowerCAmelCase__ , weights='''imagenet''' , input_tensor=lowerCAmelCase__ , input_shape=lowerCAmelCase__ , pooling=lowerCAmelCase__ , classes=10_00 , classifier_activation='''softmax''' , ) UpperCAmelCase__ : Tuple = original_model.trainable_variables UpperCAmelCase__ : int = original_model.non_trainable_variables UpperCAmelCase__ : Any = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: UpperCAmelCase__ : Tuple = param.numpy() UpperCAmelCase__ : Any = list(tf_params.keys() ) # Load HuggingFace model UpperCAmelCase__ : Optional[int] = get_efficientnet_config(lowerCAmelCase__ ) UpperCAmelCase__ : Dict = EfficientNetForImageClassification(lowerCAmelCase__ ).eval() UpperCAmelCase__ : List[str] = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print('''Converting parameters...''' ) UpperCAmelCase__ : Any = rename_keys(lowerCAmelCase__ ) replace_params(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # Initialize preprocessor and preprocess input image UpperCAmelCase__ : List[Any] = convert_image_processor(lowerCAmelCase__ ) UpperCAmelCase__ : Tuple = preprocessor(images=prepare_img() , return_tensors='''pt''' ) # HF model inference hf_model.eval() with torch.no_grad(): UpperCAmelCase__ : List[Any] = hf_model(**lowerCAmelCase__ ) UpperCAmelCase__ : List[Any] = outputs.logits.detach().numpy() # Original model inference UpperCAmelCase__ : Optional[int] = False UpperCAmelCase__ : Optional[Any] = CONFIG_MAP[model_name]['''image_size'''] UpperCAmelCase__ : Any = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) UpperCAmelCase__ : str = image.img_to_array(lowerCAmelCase__ ) UpperCAmelCase__ : int = np.expand_dims(lowerCAmelCase__ , axis=0 ) UpperCAmelCase__ : List[str] = original_model.predict(lowerCAmelCase__ ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ), "The predicted logits are not the same." print('''Model outputs match!''' ) if save_model: # Create folder to save model if not os.path.isdir(lowerCAmelCase__ ): os.mkdir(lowerCAmelCase__ ) # Save converted model and image processor hf_model.save_pretrained(lowerCAmelCase__ ) preprocessor.save_pretrained(lowerCAmelCase__ ) if push_to_hub: # Push model and image processor to hub print(F"""Pushing converted {model_name} to the hub...""" ) UpperCAmelCase__ : int = F"""efficientnet-{model_name}""" preprocessor.push_to_hub(lowerCAmelCase__ ) hf_model.push_to_hub(lowerCAmelCase__ ) if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''b0''', type=str, help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''hf_model''', type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''') parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''') UpperCamelCase__ = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
299
'''simple docstring''' class lowerCamelCase_ : def __init__( self : Union[str, Any] , _A : int ): '''simple docstring''' UpperCAmelCase__ : str = n UpperCAmelCase__ : Union[str, Any] = [None] * self.n UpperCAmelCase__ : Tuple = 0 # index of the first element UpperCAmelCase__ : int = 0 UpperCAmelCase__ : int = 0 def __len__( self : Optional[Any] ): '''simple docstring''' return self.size def lowercase_ ( self : Dict ): '''simple docstring''' return self.size == 0 def lowercase_ ( self : List[str] ): '''simple docstring''' return False if self.is_empty() else self.array[self.front] def lowercase_ ( self : List[Any] , _A : int ): '''simple docstring''' if self.size >= self.n: raise Exception('''QUEUE IS FULL''' ) UpperCAmelCase__ : str = data UpperCAmelCase__ : Optional[Any] = (self.rear + 1) % self.n self.size += 1 return self def lowercase_ ( self : List[Any] ): '''simple docstring''' if self.size == 0: raise Exception('''UNDERFLOW''' ) UpperCAmelCase__ : Any = self.array[self.front] UpperCAmelCase__ : List[Any] = None UpperCAmelCase__ : Tuple = (self.front + 1) % self.n self.size -= 1 return temp
299
1
'''simple docstring''' import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging UpperCamelCase__ = logging.get_logger(__name__) class lowerCamelCase_ ( __a ): lowerCAmelCase__ = ['input_features', 'is_longer'] def __init__( self : Any , _A : List[Any]=64 , _A : Any=48_000 , _A : Tuple=480 , _A : Optional[Any]=10 , _A : List[Any]=1_024 , _A : Optional[int]=0.0 , _A : List[str]=False , _A : float = 0 , _A : float = 14_000 , _A : int = None , _A : str = "fusion" , _A : str = "repeatpad" , **_A : Any , ): '''simple docstring''' super().__init__( feature_size=_A , sampling_rate=_A , padding_value=_A , return_attention_mask=_A , **_A , ) UpperCAmelCase__ : Tuple = top_db UpperCAmelCase__ : Any = truncation UpperCAmelCase__ : Dict = padding UpperCAmelCase__ : List[Any] = fft_window_size UpperCAmelCase__ : str = (fft_window_size >> 1) + 1 UpperCAmelCase__ : List[Any] = hop_length UpperCAmelCase__ : int = max_length_s UpperCAmelCase__ : List[Any] = max_length_s * sampling_rate UpperCAmelCase__ : str = sampling_rate UpperCAmelCase__ : Optional[int] = frequency_min UpperCAmelCase__ : Union[str, Any] = frequency_max UpperCAmelCase__ : str = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_A , min_frequency=_A , max_frequency=_A , sampling_rate=_A , norm=_A , mel_scale='''htk''' , ) UpperCAmelCase__ : Union[str, Any] = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_A , min_frequency=_A , max_frequency=_A , sampling_rate=_A , norm='''slaney''' , mel_scale='''slaney''' , ) def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = copy.deepcopy(self.__dict__ ) UpperCAmelCase__ : Any = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def lowercase_ ( self : int , _A : np.array , _A : Optional[np.array] = None ): '''simple docstring''' UpperCAmelCase__ : str = spectrogram( _A , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=_A , log_mel='''dB''' , ) return log_mel_spectrogram.T def lowercase_ ( self : Dict , _A : List[Any] , _A : Union[str, Any] , _A : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk UpperCAmelCase__ : List[Any] = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk UpperCAmelCase__ : List[str] = [0] # randomly choose index for each part UpperCAmelCase__ : Optional[int] = np.random.choice(ranges[0] ) UpperCAmelCase__ : Optional[int] = np.random.choice(ranges[1] ) UpperCAmelCase__ : Optional[Any] = np.random.choice(ranges[2] ) UpperCAmelCase__ : Optional[int] = mel[idx_front : idx_front + chunk_frames, :] UpperCAmelCase__ : int = mel[idx_middle : idx_middle + chunk_frames, :] UpperCAmelCase__ : Optional[Any] = mel[idx_back : idx_back + chunk_frames, :] UpperCAmelCase__ : Any = torch.tensor(mel[None, None, :] ) UpperCAmelCase__ : int = torch.nn.functional.interpolate( _A , size=[chunk_frames, 64] , mode='''bilinear''' , align_corners=_A ) UpperCAmelCase__ : Optional[Any] = mel_shrink[0][0].numpy() UpperCAmelCase__ : Tuple = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 ) return mel_fusion def lowercase_ ( self : Dict , _A : np.array , _A : Union[str, Any] , _A : Optional[Any] , _A : Any ): '''simple docstring''' if waveform.shape[0] > max_length: if truncation == "rand_trunc": UpperCAmelCase__ : Optional[int] = True # random crop to max_length (for compatibility) -> this should be handled by self.pad UpperCAmelCase__ : str = len(_A ) - max_length UpperCAmelCase__ : Dict = np.random.randint(0 , overflow + 1 ) UpperCAmelCase__ : Optional[Any] = waveform[idx : idx + max_length] UpperCAmelCase__ : Tuple = self._np_extract_fbank_features(_A , self.mel_filters_slaney )[None, :] elif truncation == "fusion": UpperCAmelCase__ : Optional[Any] = self._np_extract_fbank_features(_A , self.mel_filters ) UpperCAmelCase__ : Optional[Any] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed UpperCAmelCase__ : List[Any] = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. UpperCAmelCase__ : int = np.stack([mel, mel, mel, mel] , axis=0 ) UpperCAmelCase__ : int = False else: UpperCAmelCase__ : Optional[Any] = self._random_mel_fusion(_A , _A , _A ) UpperCAmelCase__ : Union[str, Any] = True else: raise NotImplementedError(f"""data_truncating {truncation} not implemented""" ) else: UpperCAmelCase__ : Any = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": UpperCAmelCase__ : List[str] = int(max_length / len(_A ) ) UpperCAmelCase__ : Dict = np.stack(np.tile(_A , n_repeat + 1 ) )[:max_length] if padding == "repeatpad": UpperCAmelCase__ : Optional[Any] = int(max_length / len(_A ) ) UpperCAmelCase__ : Any = np.stack(np.tile(_A , _A ) ) UpperCAmelCase__ : str = np.pad(_A , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 ) if truncation == "fusion": UpperCAmelCase__ : List[Any] = self._np_extract_fbank_features(_A , self.mel_filters ) UpperCAmelCase__ : str = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 ) else: UpperCAmelCase__ : Optional[int] = self._np_extract_fbank_features(_A , self.mel_filters_slaney )[None, :] return input_mel, longer def __call__( self : Dict , _A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _A : str = None , _A : Optional[str] = None , _A : Optional[int] = None , _A : Optional[int] = None , _A : Optional[Union[str, TensorType]] = None , **_A : Dict , ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = truncation if truncation is not None else self.truncation UpperCAmelCase__ : Optional[Any] = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a""" f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input""" f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) UpperCAmelCase__ : Union[str, Any] = isinstance(_A , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) UpperCAmelCase__ : Union[str, Any] = is_batched_numpy or ( isinstance(_A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: UpperCAmelCase__ : Optional[Any] = [np.asarray(_A , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(_A , np.ndarray ): UpperCAmelCase__ : List[Any] = np.asarray(_A , dtype=np.floataa ) elif isinstance(_A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): UpperCAmelCase__ : Tuple = raw_speech.astype(np.floataa ) # always return batch if not is_batched: UpperCAmelCase__ : List[Any] = [np.asarray(_A )] # convert to mel spectrogram, truncate and pad if needed. UpperCAmelCase__ : Optional[Any] = [ self._get_input_mel(_A , max_length if max_length else self.nb_max_samples , _A , _A ) for waveform in raw_speech ] UpperCAmelCase__ : Optional[Any] = [] UpperCAmelCase__ : Optional[int] = [] for mel, longer in padded_inputs: input_mel.append(_A ) is_longer.append(_A ) if truncation == "fusion" and sum(_A ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer UpperCAmelCase__ : Optional[Any] = np.random.randint(0 , len(_A ) ) UpperCAmelCase__ : List[str] = True if isinstance(input_mel[0] , _A ): UpperCAmelCase__ : int = [np.asarray(_A , dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool UpperCAmelCase__ : Any = [[longer] for longer in is_longer] UpperCAmelCase__ : Optional[int] = {'''input_features''': input_mel, '''is_longer''': is_longer} UpperCAmelCase__ : int = BatchFeature(_A ) if return_tensors is not None: UpperCAmelCase__ : Tuple = input_features.convert_to_tensors(_A ) return input_features
299
'''simple docstring''' def a__ ( lowerCAmelCase__ ) -> Optional[Any]: UpperCAmelCase__ : Optional[Any] = len(lowerCAmelCase__ ) for i in range(length - 1 ): UpperCAmelCase__ : Optional[Any] = i for k in range(i + 1 , lowerCAmelCase__ ): if collection[k] < collection[least]: UpperCAmelCase__ : Dict = k if least != i: UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = (collection[i], collection[least]) return collection if __name__ == "__main__": UpperCamelCase__ = input('''Enter numbers separated by a comma:\n''').strip() UpperCamelCase__ = [int(item) for item in user_input.split(''',''')] print(selection_sort(unsorted))
299
1
'''simple docstring''' def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> float: UpperCAmelCase__ : Tuple = [redshift, radiation_density, matter_density, dark_energy] if any(p < 0 for p in parameters ): raise ValueError('''All input parameters must be positive''' ) if any(p > 1 for p in parameters[1:4] ): raise ValueError('''Relative densities cannot be greater than one''' ) else: UpperCAmelCase__ : List[str] = 1 - (matter_density + radiation_density + dark_energy) UpperCAmelCase__ : List[str] = ( radiation_density * (redshift + 1) ** 4 + matter_density * (redshift + 1) ** 3 + curvature * (redshift + 1) ** 2 + dark_energy ) UpperCAmelCase__ : Any = hubble_constant * e_a ** (1 / 2) return hubble if __name__ == "__main__": import doctest # run doctest doctest.testmod() # demo LCDM approximation UpperCamelCase__ = 0.3 print( hubble_parameter( hubble_constant=68.3, radiation_density=1e-4, matter_density=matter_density, dark_energy=1 - matter_density, redshift=0, ) )
299
'''simple docstring''' from collections.abc import Iterable from typing import Any class lowerCamelCase_ : def __init__( self : List[Any] , _A : int | None = None ): '''simple docstring''' UpperCAmelCase__ : List[Any] = value UpperCAmelCase__ : Node | None = None # Added in order to delete a node easier UpperCAmelCase__ : Node | None = None UpperCAmelCase__ : Node | None = None def __repr__( self : Optional[Any] ): '''simple docstring''' from pprint import pformat if self.left is None and self.right is None: return str(self.value ) return pformat({f"""{self.value}""": (self.left, self.right)} , indent=1 ) class lowerCamelCase_ : def __init__( self : Optional[Any] , _A : Node | None = None ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = root def __str__( self : Union[str, Any] ): '''simple docstring''' return str(self.root ) def lowercase_ ( self : str , _A : Node , _A : Node | None ): '''simple docstring''' if new_children is not None: # reset its kids UpperCAmelCase__ : Dict = node.parent if node.parent is not None: # reset its parent if self.is_right(_A ): # If it is the right children UpperCAmelCase__ : str = new_children else: UpperCAmelCase__ : Optional[int] = new_children else: UpperCAmelCase__ : Union[str, Any] = new_children def lowercase_ ( self : Union[str, Any] , _A : Node ): '''simple docstring''' if node.parent and node.parent.right: return node == node.parent.right return False def lowercase_ ( self : int ): '''simple docstring''' return self.root is None def lowercase_ ( self : List[str] , _A : Any ): '''simple docstring''' UpperCAmelCase__ : Dict = Node(_A ) # create a new Node if self.empty(): # if Tree is empty UpperCAmelCase__ : List[Any] = new_node # set its root else: # Tree is not empty UpperCAmelCase__ : str = self.root # from root if parent_node is None: return while True: # While we don't get to a leaf if value < parent_node.value: # We go left if parent_node.left is None: UpperCAmelCase__ : Optional[Any] = new_node # We insert the new node in a leaf break else: UpperCAmelCase__ : Any = parent_node.left else: if parent_node.right is None: UpperCAmelCase__ : str = new_node break else: UpperCAmelCase__ : List[str] = parent_node.right UpperCAmelCase__ : Tuple = parent_node def lowercase_ ( self : Optional[Any] , *_A : Tuple ): '''simple docstring''' for value in values: self.__insert(_A ) def lowercase_ ( self : Union[str, Any] , _A : int ): '''simple docstring''' if self.empty(): raise IndexError('''Warning: Tree is empty! please use another.''' ) else: UpperCAmelCase__ : List[Any] = self.root # use lazy evaluation here to avoid NoneType Attribute error while node is not None and node.value is not value: UpperCAmelCase__ : str = node.left if value < node.value else node.right return node def lowercase_ ( self : List[Any] , _A : Node | None = None ): '''simple docstring''' if node is None: if self.root is None: return None UpperCAmelCase__ : int = self.root if not self.empty(): while node.right is not None: UpperCAmelCase__ : Tuple = node.right return node def lowercase_ ( self : List[Any] , _A : Node | None = None ): '''simple docstring''' if node is None: UpperCAmelCase__ : Optional[int] = self.root if self.root is None: return None if not self.empty(): UpperCAmelCase__ : Optional[int] = self.root while node.left is not None: UpperCAmelCase__ : Tuple = node.left return node def lowercase_ ( self : List[Any] , _A : int ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.search(_A ) # Look for the node with that label if node is not None: if node.left is None and node.right is None: # If it has no children self.__reassign_nodes(_A , _A ) elif node.left is None: # Has only right children self.__reassign_nodes(_A , node.right ) elif node.right is None: # Has only left children self.__reassign_nodes(_A , node.left ) else: UpperCAmelCase__ : Union[str, Any] = self.get_max( node.left ) # Gets the max value of the left branch self.remove(tmp_node.value ) # type: ignore UpperCAmelCase__ : Optional[Any] = ( tmp_node.value # type: ignore ) # Assigns the value to the node to delete and keep tree structure def lowercase_ ( self : List[str] , _A : Node | None ): '''simple docstring''' if node is not None: yield node # Preorder Traversal yield from self.preorder_traverse(node.left ) yield from self.preorder_traverse(node.right ) def lowercase_ ( self : str , _A : Any=None ): '''simple docstring''' if traversal_function is None: return self.preorder_traverse(self.root ) else: return traversal_function(self.root ) def lowercase_ ( self : Dict , _A : list , _A : Node | None ): '''simple docstring''' if node: self.inorder(_A , node.left ) arr.append(node.value ) self.inorder(_A , node.right ) def lowercase_ ( self : Optional[Any] , _A : int , _A : Node ): '''simple docstring''' UpperCAmelCase__ : list[int] = [] self.inorder(_A , _A ) # append all values to list using inorder traversal return arr[k - 1] def a__ ( lowerCAmelCase__ ) -> list[Node]: UpperCAmelCase__ : Union[str, Any] = [] if curr_node is not None: UpperCAmelCase__ : str = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node] return node_list def a__ ( ) -> None: UpperCAmelCase__ : List[Any] = (8, 3, 6, 1, 10, 14, 13, 4, 7) UpperCAmelCase__ : str = BinarySearchTree() for i in testlist: t.insert(lowerCAmelCase__ ) # Prints all the elements of the list in order traversal print(lowerCAmelCase__ ) if t.search(6 ) is not None: print('''The value 6 exists''' ) else: print('''The value 6 doesn\'t exist''' ) if t.search(-1 ) is not None: print('''The value -1 exists''' ) else: print('''The value -1 doesn\'t exist''' ) if not t.empty(): print('''Max Value: ''' , t.get_max().value ) # type: ignore print('''Min Value: ''' , t.get_min().value ) # type: ignore for i in testlist: t.remove(lowerCAmelCase__ ) print(lowerCAmelCase__ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
299
1
'''simple docstring''' def a__ ( lowerCAmelCase__ = 50 ) -> int: UpperCAmelCase__ : Any = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(F"""{solution() = }""")
299
'''simple docstring''' import argparse import torch from transformers import ( SpeechTaConfig, SpeechTaFeatureExtractor, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaProcessor, SpeechTaTokenizer, logging, ) from transformers.tokenization_utils import AddedToken logging.set_verbosity_info() UpperCamelCase__ = logging.get_logger('''transformers.models.speecht5''') UpperCamelCase__ = { '''speech_encoder_prenet.layer_norm''': '''speecht5.encoder.prenet.feature_projection.layer_norm''', '''speech_encoder_prenet.post_extract_proj''': '''speecht5.encoder.prenet.feature_projection.projection''', '''speech_encoder_prenet.pos_conv.0''': '''speecht5.encoder.prenet.pos_conv_embed.conv''', '''speech_encoder_prenet.mask_emb''': '''speecht5.encoder.prenet.masked_spec_embed''', } UpperCamelCase__ = { '''text_encoder_prenet.encoder_prenet.0''': '''speecht5.encoder.prenet.embed_tokens''', '''text_encoder_prenet.encoder_prenet.1.alpha''': '''speecht5.encoder.prenet.encode_positions.alpha''', } UpperCamelCase__ = { '''speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0''': '''speecht5.decoder.prenet.layers.0''', '''speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0''': '''speecht5.decoder.prenet.layers.1''', '''speech_decoder_prenet.decoder_prenet.0.1''': '''speecht5.decoder.prenet.final_layer''', '''speech_decoder_prenet.decoder_prenet.1.alpha''': '''speecht5.decoder.prenet.encode_positions.alpha''', '''speech_decoder_prenet.spkembs_layer.0''': '''speecht5.decoder.prenet.speaker_embeds_layer''', } UpperCamelCase__ = { '''speech_decoder_postnet.feat_out''': '''speech_decoder_postnet.feat_out''', '''speech_decoder_postnet.prob_out''': '''speech_decoder_postnet.prob_out''', '''speech_decoder_postnet.postnet.postnet.0.0''': '''speech_decoder_postnet.layers.0.conv''', '''speech_decoder_postnet.postnet.postnet.0.1''': '''speech_decoder_postnet.layers.0.batch_norm''', '''speech_decoder_postnet.postnet.postnet.1.0''': '''speech_decoder_postnet.layers.1.conv''', '''speech_decoder_postnet.postnet.postnet.1.1''': '''speech_decoder_postnet.layers.1.batch_norm''', '''speech_decoder_postnet.postnet.postnet.2.0''': '''speech_decoder_postnet.layers.2.conv''', '''speech_decoder_postnet.postnet.postnet.2.1''': '''speech_decoder_postnet.layers.2.batch_norm''', '''speech_decoder_postnet.postnet.postnet.3.0''': '''speech_decoder_postnet.layers.3.conv''', '''speech_decoder_postnet.postnet.postnet.3.1''': '''speech_decoder_postnet.layers.3.batch_norm''', '''speech_decoder_postnet.postnet.postnet.4.0''': '''speech_decoder_postnet.layers.4.conv''', '''speech_decoder_postnet.postnet.postnet.4.1''': '''speech_decoder_postnet.layers.4.batch_norm''', } UpperCamelCase__ = { '''text_decoder_prenet.embed_tokens''': '''speecht5.decoder.prenet.embed_tokens''', } UpperCamelCase__ = { '''text_decoder_postnet.output_projection''': '''text_decoder_postnet.lm_head''', } UpperCamelCase__ = { '''encoder.layers.*.self_attn.k_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj''', '''encoder.layers.*.self_attn.v_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj''', '''encoder.layers.*.self_attn.q_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj''', '''encoder.layers.*.self_attn.out_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj''', '''encoder.layers.*.self_attn_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.layer_norm''', '''encoder.layers.*.fc1''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense''', '''encoder.layers.*.fc2''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense''', '''encoder.layers.*.final_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''speecht5.encoder.wrapped_encoder.layer_norm''', '''encoder.pos_emb.pe_k''': '''speecht5.encoder.wrapped_encoder.embed_positions.pe_k''', } UpperCamelCase__ = { '''decoder.layers.*.self_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj''', '''decoder.layers.*.self_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj''', '''decoder.layers.*.self_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj''', '''decoder.layers.*.self_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj''', '''decoder.layers.*.self_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm''', '''decoder.layers.*.encoder_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj''', '''decoder.layers.*.encoder_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj''', '''decoder.layers.*.encoder_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj''', '''decoder.layers.*.encoder_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj''', '''decoder.layers.*.encoder_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm''', '''decoder.layers.*.fc1''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense''', '''decoder.layers.*.fc2''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense''', '''decoder.layers.*.final_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm''', } UpperCamelCase__ = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_TEXT_DECODER_PRENET, **MAPPING_TEXT_DECODER_POSTNET, } UpperCamelCase__ = { **MAPPING_TEXT_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } UpperCamelCase__ = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } UpperCamelCase__ = [] UpperCamelCase__ = [ '''encoder.version''', '''encoder.layers.*.norm_k.weight''', '''encoder.layers.*.norm_k.bias''', '''decoder.version''', '''decoder.layers.*.norm_k.weight''', '''decoder.layers.*.norm_k.bias''', '''decoder.pos_emb.pe_k''', '''speech_encoder_prenet.embed_positions._float_tensor''', '''text_decoder_prenet.embed_positions._float_tensor''', ] UpperCamelCase__ = IGNORE_KEYS + [ '''encoder.proj''', '''text_encoder_prenet.*''', '''speech_decoder_prenet.*''', '''speech_decoder_postnet.*''', ] UpperCamelCase__ = IGNORE_KEYS + [ '''encoder.proj''', '''speech_encoder_prenet.*''', '''text_decoder_prenet.*''', '''text_decoder_postnet.*''', ] UpperCamelCase__ = IGNORE_KEYS + [ '''encoder.proj''', '''text_encoder_prenet.*''', '''text_decoder_prenet.*''', '''text_decoder_postnet.*''', ] def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int: for attribute in key.split('''.''' ): UpperCAmelCase__ : Optional[int] = getattr(lowerCAmelCase__ , lowerCAmelCase__ ) if weight_type is not None: UpperCAmelCase__ : List[str] = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape else: UpperCAmelCase__ : Any = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": UpperCAmelCase__ : Union[str, Any] = value elif weight_type == "weight_g": UpperCAmelCase__ : Tuple = value elif weight_type == "weight_v": UpperCAmelCase__ : List[Any] = value elif weight_type == "bias": UpperCAmelCase__ : int = value elif weight_type == "running_mean": UpperCAmelCase__ : int = value elif weight_type == "running_var": UpperCAmelCase__ : Union[str, Any] = value elif weight_type == "num_batches_tracked": UpperCAmelCase__ : List[Any] = value else: UpperCAmelCase__ : Union[str, Any] = value logger.info(F"""{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.""" ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]: for key in ignore_keys: if key.endswith('''.*''' ): if name.startswith(key[:-1] ): return True elif ".*." in key: UpperCAmelCase__ , UpperCAmelCase__ : int = key.split('''.*.''' ) if prefix in name and suffix in name: return True elif key in name: return True return False def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]: UpperCAmelCase__ : int = [] if task == "s2t": UpperCAmelCase__ : Optional[Any] = hf_model.speechta.encoder.prenet.feature_encoder UpperCAmelCase__ : List[Any] = MAPPING_S2T UpperCAmelCase__ : int = IGNORE_KEYS_S2T elif task == "t2s": UpperCAmelCase__ : List[str] = None UpperCAmelCase__ : Tuple = MAPPING_T2S UpperCAmelCase__ : Union[str, Any] = IGNORE_KEYS_T2S elif task == "s2s": UpperCAmelCase__ : Optional[int] = hf_model.speechta.encoder.prenet.feature_encoder UpperCAmelCase__ : Tuple = MAPPING_S2S UpperCAmelCase__ : int = IGNORE_KEYS_S2S else: raise ValueError(F"""Unsupported task: {task}""" ) for name, value in fairseq_dict.items(): if should_ignore(lowerCAmelCase__ , lowerCAmelCase__ ): logger.info(F"""{name} was ignored""" ) continue UpperCAmelCase__ : List[Any] = False if "conv_layers" in name: load_conv_layer( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == '''group''' , ) UpperCAmelCase__ : Tuple = True else: for key, mapped_key in MAPPING.items(): # mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if "*" in key: UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = key.split('''.*.''' ) if prefix in name and suffix in name: UpperCAmelCase__ : List[str] = suffix # if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: if key in name: UpperCAmelCase__ : Optional[int] = True if "*" in mapped_key: UpperCAmelCase__ : Any = name.split(lowerCAmelCase__ )[0].split('''.''' )[-2] UpperCAmelCase__ : Union[str, Any] = mapped_key.replace('''*''' , lowerCAmelCase__ ) if "weight_g" in name: UpperCAmelCase__ : Dict = '''weight_g''' elif "weight_v" in name: UpperCAmelCase__ : Union[str, Any] = '''weight_v''' elif "bias" in name: UpperCAmelCase__ : Optional[int] = '''bias''' elif "weight" in name: UpperCAmelCase__ : Optional[int] = '''weight''' elif "running_mean" in name: UpperCAmelCase__ : Optional[int] = '''running_mean''' elif "running_var" in name: UpperCAmelCase__ : List[Any] = '''running_var''' elif "num_batches_tracked" in name: UpperCAmelCase__ : Optional[Any] = '''num_batches_tracked''' else: UpperCAmelCase__ : Union[str, Any] = None set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) continue if not is_used: unused_weights.append(lowerCAmelCase__ ) logger.warning(F"""Unused weights: {unused_weights}""" ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str: UpperCAmelCase__ : Optional[int] = full_name.split('''conv_layers.''' )[-1] UpperCAmelCase__ : Optional[Any] = name.split('''.''' ) UpperCAmelCase__ : Any = int(items[0] ) UpperCAmelCase__ : Optional[int] = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) UpperCAmelCase__ : Any = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) UpperCAmelCase__ : int = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) UpperCAmelCase__ : List[str] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) UpperCAmelCase__ : Union[str, Any] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(lowerCAmelCase__ ) @torch.no_grad() def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , ) -> Any: if config_path is not None: UpperCAmelCase__ : Optional[Any] = SpeechTaConfig.from_pretrained(lowerCAmelCase__ ) else: UpperCAmelCase__ : str = SpeechTaConfig() if task == "s2t": UpperCAmelCase__ : str = config.max_text_positions UpperCAmelCase__ : List[str] = SpeechTaForSpeechToText(lowerCAmelCase__ ) elif task == "t2s": UpperCAmelCase__ : Tuple = 18_76 UpperCAmelCase__ : int = 6_00 UpperCAmelCase__ : Union[str, Any] = config.max_speech_positions UpperCAmelCase__ : Optional[Any] = SpeechTaForTextToSpeech(lowerCAmelCase__ ) elif task == "s2s": UpperCAmelCase__ : Tuple = 18_76 UpperCAmelCase__ : Optional[Any] = config.max_speech_positions UpperCAmelCase__ : Dict = SpeechTaForSpeechToSpeech(lowerCAmelCase__ ) else: raise ValueError(F"""Unknown task name: {task}""" ) if vocab_path: UpperCAmelCase__ : Tuple = SpeechTaTokenizer(lowerCAmelCase__ , model_max_length=config.max_text_positions ) # Mask token behaves like a normal word, i.e. include the space before it UpperCAmelCase__ : Dict = AddedToken('''<mask>''' , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) UpperCAmelCase__ : int = mask_token tokenizer.add_special_tokens({'''mask_token''': mask_token} ) tokenizer.add_tokens(['''<ctc_blank>'''] ) UpperCAmelCase__ : Optional[Any] = SpeechTaFeatureExtractor() UpperCAmelCase__ : Any = SpeechTaProcessor(tokenizer=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ ) processor.save_pretrained(lowerCAmelCase__ ) UpperCAmelCase__ : List[str] = torch.load(lowerCAmelCase__ ) recursively_load_weights(fairseq_checkpoint['''model'''] , lowerCAmelCase__ , lowerCAmelCase__ ) model.save_pretrained(lowerCAmelCase__ ) if repo_id: print('''Pushing to the hub...''' ) processor.push_to_hub(lowerCAmelCase__ ) model.push_to_hub(lowerCAmelCase__ ) if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() parser.add_argument( '''--task''', default='''s2t''', type=str, help='''Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.''', ) parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--vocab_path''', default=None, type=str, help='''Path to SentencePiece model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) UpperCamelCase__ = parser.parse_args() convert_speechta_checkpoint( args.task, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.vocab_path, args.push_to_hub, )
299
1
'''simple docstring''' def a__ ( lowerCAmelCase__ = 3 , lowerCAmelCase__ = 7 , lowerCAmelCase__ = 1_00_00_00 ) -> int: UpperCAmelCase__ : List[str] = 0 UpperCAmelCase__ : Optional[int] = 1 for current_denominator in range(1 , limit + 1 ): UpperCAmelCase__ : List[Any] = current_denominator * numerator // denominator if current_denominator % denominator == 0: current_numerator -= 1 if current_numerator * max_denominator > current_denominator * max_numerator: UpperCAmelCase__ : Tuple = current_numerator UpperCAmelCase__ : Optional[int] = current_denominator return max_numerator if __name__ == "__main__": print(solution(numerator=3, denominator=7, limit=1_0_0_0_0_0_0))
299
'''simple docstring''' import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''encoder.layer_norm_for_extract''': '''layer_norm_for_extract''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''label_embs_concat''': '''label_embeddings_concat''', '''mask_emb''': '''masked_spec_embed''', '''spk_proj''': '''speaker_proj''', } UpperCamelCase__ = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', '''label_embeddings_concat''', '''speaker_proj''', '''layer_norm_for_extract''', ] def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]: for attribute in key.split('''.''' ): UpperCAmelCase__ : Optional[int] = getattr(lowerCAmelCase__ , lowerCAmelCase__ ) if weight_type is not None: UpperCAmelCase__ : Any = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape else: UpperCAmelCase__ : Union[str, Any] = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": UpperCAmelCase__ : int = value elif weight_type == "weight_g": UpperCAmelCase__ : Dict = value elif weight_type == "weight_v": UpperCAmelCase__ : List[str] = value elif weight_type == "bias": UpperCAmelCase__ : Tuple = value else: UpperCAmelCase__ : Tuple = value logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict: UpperCAmelCase__ : Optional[int] = [] UpperCAmelCase__ : Dict = fairseq_model.state_dict() UpperCAmelCase__ : Union[str, Any] = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): UpperCAmelCase__ : Any = False if "conv_layers" in name: load_conv_layer( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == '''group''' , ) UpperCAmelCase__ : str = True else: for key, mapped_key in MAPPING.items(): UpperCAmelCase__ : List[str] = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key): # special case since naming is very similar continue UpperCAmelCase__ : Optional[int] = True if "*" in mapped_key: UpperCAmelCase__ : str = name.split(lowerCAmelCase__ )[0].split('''.''' )[-2] UpperCAmelCase__ : Optional[int] = mapped_key.replace('''*''' , lowerCAmelCase__ ) if "weight_g" in name: UpperCAmelCase__ : List[str] = '''weight_g''' elif "weight_v" in name: UpperCAmelCase__ : Dict = '''weight_v''' elif "bias" in name: UpperCAmelCase__ : Optional[int] = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj UpperCAmelCase__ : Tuple = '''weight''' else: UpperCAmelCase__ : Optional[Any] = None set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) continue if not is_used: unused_weights.append(lowerCAmelCase__ ) logger.warning(F"""Unused weights: {unused_weights}""" ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]: UpperCAmelCase__ : Tuple = full_name.split('''conv_layers.''' )[-1] UpperCAmelCase__ : Optional[Any] = name.split('''.''' ) UpperCAmelCase__ : Union[str, Any] = int(items[0] ) UpperCAmelCase__ : Tuple = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) UpperCAmelCase__ : str = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) UpperCAmelCase__ : Optional[int] = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" ) UpperCAmelCase__ : List[str] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) UpperCAmelCase__ : Optional[Any] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(lowerCAmelCase__ ) @torch.no_grad() def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True ) -> Any: if config_path is not None: UpperCAmelCase__ : Any = UniSpeechSatConfig.from_pretrained(lowerCAmelCase__ ) else: UpperCAmelCase__ : int = UniSpeechSatConfig() UpperCAmelCase__ : Tuple = '''''' if is_finetuned: UpperCAmelCase__ : Optional[int] = UniSpeechSatForCTC(lowerCAmelCase__ ) else: UpperCAmelCase__ : List[Any] = UniSpeechSatForPreTraining(lowerCAmelCase__ ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) UpperCAmelCase__ : Union[str, Any] = model[0].eval() recursively_load_weights(lowerCAmelCase__ , lowerCAmelCase__ ) hf_wavavec.save_pretrained(lowerCAmelCase__ ) if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) UpperCamelCase__ = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
299
1
'''simple docstring''' import functools def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> int: # Validation if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or not all(isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) for day in days ): raise ValueError('''The parameter days should be a list of integers''' ) if len(lowerCAmelCase__ ) != 3 or not all(isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) for cost in costs ): raise ValueError('''The parameter costs should be a list of three integers''' ) if len(lowerCAmelCase__ ) == 0: return 0 if min(lowerCAmelCase__ ) <= 0: raise ValueError('''All days elements should be greater than 0''' ) if max(lowerCAmelCase__ ) >= 3_66: raise ValueError('''All days elements should be less than 366''' ) UpperCAmelCase__ : Dict = set(lowerCAmelCase__ ) @functools.cache def dynamic_programming(lowerCAmelCase__ ) -> int: if index > 3_65: return 0 if index not in days_set: return dynamic_programming(index + 1 ) return min( costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , ) return dynamic_programming(1 ) if __name__ == "__main__": import doctest doctest.testmod()
299
'''simple docstring''' import itertools import random import unittest import numpy as np from transformers import ASTFeatureExtractor from transformers.testing_utils import require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin UpperCamelCase__ = random.Random() if is_torch_available(): import torch def a__ ( lowerCAmelCase__ , lowerCAmelCase__=1.0 , lowerCAmelCase__=None , lowerCAmelCase__=None ) -> Optional[Any]: if rng is None: UpperCAmelCase__ : List[str] = global_rng UpperCAmelCase__ : Optional[Any] = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class lowerCamelCase_ ( unittest.TestCase ): def __init__( self : Any , _A : List[str] , _A : int=7 , _A : Dict=400 , _A : Tuple=2_000 , _A : Optional[int]=1 , _A : List[Any]=0.0 , _A : Any=16_000 , _A : int=True , _A : str=True , ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = parent UpperCAmelCase__ : str = batch_size UpperCAmelCase__ : Dict = min_seq_length UpperCAmelCase__ : str = max_seq_length UpperCAmelCase__ : List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) UpperCAmelCase__ : Optional[Any] = feature_size UpperCAmelCase__ : int = padding_value UpperCAmelCase__ : int = sampling_rate UpperCAmelCase__ : Tuple = return_attention_mask UpperCAmelCase__ : str = do_normalize def lowercase_ ( self : Optional[int] ): '''simple docstring''' return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def lowercase_ ( self : int , _A : Optional[Any]=False , _A : Any=False ): '''simple docstring''' def _flatten(_A : Union[str, Any] ): return list(itertools.chain(*_A ) ) if equal_length: UpperCAmelCase__ : Tuple = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size UpperCAmelCase__ : Optional[int] = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: UpperCAmelCase__ : Dict = [np.asarray(_A ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class lowerCamelCase_ ( __a , unittest.TestCase ): lowerCAmelCase__ = ASTFeatureExtractor def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : int = ASTFeatureExtractionTester(self ) def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 UpperCAmelCase__ : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] UpperCAmelCase__ : List[Any] = [np.asarray(_A ) for speech_input in speech_inputs] # Test not batched input UpperCAmelCase__ : str = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values UpperCAmelCase__ : List[Any] = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) ) # Test batched UpperCAmelCase__ : Optional[Any] = feat_extract(_A , padding=_A , return_tensors='''np''' ).input_values UpperCAmelCase__ : Optional[int] = feat_extract(_A , padding=_A , return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(_A , _A ): self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. UpperCAmelCase__ : Tuple = [floats_list((1, x) )[0] for x in (800, 800, 800)] UpperCAmelCase__ : Any = np.asarray(_A ) UpperCAmelCase__ : int = feat_extract(_A , return_tensors='''np''' ).input_values UpperCAmelCase__ : List[str] = feat_extract(_A , return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(_A , _A ): self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) ) @require_torch def lowercase_ ( self : List[str] ): '''simple docstring''' import torch UpperCAmelCase__ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCAmelCase__ : Any = np.random.rand(100 ).astype(np.floataa ) UpperCAmelCase__ : int = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: UpperCAmelCase__ : str = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) UpperCAmelCase__ : Any = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def lowercase_ ( self : int , _A : List[Any] ): '''simple docstring''' from datasets import load_dataset UpperCAmelCase__ : Tuple = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' ) # automatic decoding with librispeech UpperCAmelCase__ : List[Any] = ds.sort('''id''' ).select(range(_A ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] @require_torch def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Any = torch.tensor( [-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6, -1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3, -1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6, -0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9] ) # fmt: on UpperCAmelCase__ : Optional[Any] = self._load_datasamples(1 ) UpperCAmelCase__ : Optional[int] = ASTFeatureExtractor() UpperCAmelCase__ : Dict = feature_extractor(_A , return_tensors='''pt''' ).input_values self.assertEquals(input_values.shape , (1, 1_024, 128) ) self.assertTrue(torch.allclose(input_values[0, 0, :30] , _A , atol=1e-4 ) )
299
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase__ = { '''configuration_roc_bert''': ['''ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoCBertConfig'''], '''tokenization_roc_bert''': ['''RoCBertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: pass try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = [ '''ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''RoCBertForCausalLM''', '''RoCBertForMaskedLM''', '''RoCBertForMultipleChoice''', '''RoCBertForPreTraining''', '''RoCBertForQuestionAnswering''', '''RoCBertForSequenceClassification''', '''RoCBertForTokenClassification''', '''RoCBertLayer''', '''RoCBertModel''', '''RoCBertPreTrainedModel''', '''load_tf_weights_in_roc_bert''', ] if TYPE_CHECKING: from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig from .tokenization_roc_bert import RoCBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: raise OptionalDependencyNotAvailable() try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roc_bert import ( ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, RoCBertForCausalLM, RoCBertForMaskedLM, RoCBertForMultipleChoice, RoCBertForPreTraining, RoCBertForQuestionAnswering, RoCBertForSequenceClassification, RoCBertForTokenClassification, RoCBertLayer, RoCBertModel, RoCBertPreTrainedModel, load_tf_weights_in_roc_bert, ) else: import sys UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
299
'''simple docstring''' import os import shutil import sys import tempfile import unittest from pathlib import Path import pytest import transformers from transformers import ( BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoTokenizer, BertConfig, BertTokenizer, BertTokenizerFast, CTRLTokenizer, GPTaTokenizer, GPTaTokenizerFast, PreTrainedTokenizerFast, RobertaTokenizer, RobertaTokenizerFast, is_tokenizers_available, ) from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.auto.tokenization_auto import ( TOKENIZER_MAPPING, get_tokenizer_config, tokenizer_class_from_name, ) from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import ( DUMMY_DIFF_TOKENIZER_IDENTIFIER, DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tokenizers, slow, ) sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils''')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class lowerCamelCase_ ( unittest.TestCase ): def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = 0 @slow def lowercase_ ( self : Dict ): '''simple docstring''' for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x): UpperCAmelCase__ : int = AutoTokenizer.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) ) self.assertGreater(len(_A ) , 0 ) for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys(): UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , (GPTaTokenizer, GPTaTokenizerFast) ) self.assertGreater(len(_A ) , 0 ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : int = AutoTokenizer.from_pretrained(_A ) self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 12 ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : int = AutoTokenizer.from_pretrained(_A ) self.assertIsInstance(_A , (RobertaTokenizer, RobertaTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 20 ) def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Tuple = AutoConfig.from_pretrained(_A ) self.assertIsInstance(_A , _A ) # Check that tokenizer_type ≠ model_type UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained(_A , config=_A ) self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 12 ) def lowercase_ ( self : str ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(_A , '''vocab.txt''' ) ) UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained(_A , tokenizer_type='''bert''' , use_fast=_A ) self.assertIsInstance(_A , _A ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(_A , '''vocab.json''' ) ) shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(_A , '''merges.txt''' ) ) UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(_A , tokenizer_type='''gpt2''' , use_fast=_A ) self.assertIsInstance(_A , _A ) @require_tokenizers def lowercase_ ( self : str ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(_A , '''vocab.txt''' ) ) UpperCAmelCase__ : str = AutoTokenizer.from_pretrained(_A , tokenizer_type='''bert''' ) self.assertIsInstance(_A , _A ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(_A , '''vocab.json''' ) ) shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(_A , '''merges.txt''' ) ) UpperCAmelCase__ : Any = AutoTokenizer.from_pretrained(_A , tokenizer_type='''gpt2''' ) self.assertIsInstance(_A , _A ) def lowercase_ ( self : Optional[Any] ): '''simple docstring''' with pytest.raises(_A ): AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' ) @require_tokenizers def lowercase_ ( self : int ): '''simple docstring''' for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: UpperCAmelCase__ : Optional[int] = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' ) self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) ) if isinstance(_A , _A ): self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , _A ) else: self.assertEqual(tokenizer.do_lower_case , _A ) self.assertEqual(tokenizer.model_max_length , 512 ) @require_tokenizers def lowercase_ ( self : List[str] ): '''simple docstring''' for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: with self.assertRaisesRegex( _A , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ): UpperCAmelCase__ : Dict = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' ) def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : List[Any] = TOKENIZER_MAPPING.values() UpperCAmelCase__ : Any = [] for slow_tok, fast_tok in tokenizers: if slow_tok is not None: tokenizer_names.append(slow_tok.__name__ ) if fast_tok is not None: tokenizer_names.append(fast_tok.__name__ ) for tokenizer_name in tokenizer_names: # must find the right class tokenizer_class_from_name(_A ) @require_tokenizers def lowercase_ ( self : Optional[int] ): '''simple docstring''' self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=_A ) , _A ) self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , _A ) @require_tokenizers def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : int = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=_A ) UpperCAmelCase__ : Any = '''Hello, world. How are you?''' UpperCAmelCase__ : Dict = tokenizer.tokenize(_A ) self.assertEqual('''[UNK]''' , tokens[0] ) UpperCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=_A ) UpperCAmelCase__ : Union[str, Any] = tokenizer.tokenize(_A ) self.assertEqual('''[UNK]''' , tokens[0] ) @require_tokenizers def lowercase_ ( self : str ): '''simple docstring''' UpperCAmelCase__ : List[str] = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' ) self.assertEqual(type(_A ) , _A ) self.assertEqual(tokenizer.model_max_length , 512 ) self.assertEqual(tokenizer.vocab_size , 30_000 ) self.assertEqual(tokenizer.unk_token , '''[UNK]''' ) self.assertEqual(tokenizer.padding_side , '''right''' ) self.assertEqual(tokenizer.truncation_side , '''right''' ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(_A ) self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_A ) UpperCAmelCase__ : int = AutoTokenizer.from_pretrained(_A ) self.assertIsInstance(_A , tokenizer.__class__ ) self.assertEqual(tokenizera.vocab_size , 12 ) def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained('''ctrl''' ) # There is no fast CTRL so this always gives us a slow tokenizer. self.assertIsInstance(_A , _A ) def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : str = get_tokenizer_config('''bert-base-cased''' ) UpperCAmelCase__ : Optional[int] = config.pop('''_commit_hash''' , _A ) # If we ever update bert-base-cased tokenizer config, this dict here will need to be updated. self.assertEqual(_A , {'''do_lower_case''': False} ) # This model does not have a tokenizer_config so we get back an empty dict. UpperCAmelCase__ : Tuple = get_tokenizer_config(_A ) self.assertDictEqual(_A , {} ) # A tokenizer saved with `save_pretrained` always creates a tokenizer config. UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(_A ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_A ) UpperCAmelCase__ : List[Any] = get_tokenizer_config(_A ) # Check the class of the tokenizer was properly saved (note that it always saves the slow class). self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' ) def lowercase_ ( self : Dict ): '''simple docstring''' try: AutoConfig.register('''custom''' , _A ) AutoTokenizer.register(_A , slow_tokenizer_class=_A ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_A ): AutoTokenizer.register(_A , slow_tokenizer_class=_A ) UpperCAmelCase__ : Optional[int] = CustomTokenizer.from_pretrained(_A ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_A ) UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(_A ) self.assertIsInstance(_A , _A ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] @require_tokenizers def lowercase_ ( self : Any ): '''simple docstring''' try: AutoConfig.register('''custom''' , _A ) # Can register in two steps AutoTokenizer.register(_A , slow_tokenizer_class=_A ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) ) AutoTokenizer.register(_A , fast_tokenizer_class=_A ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) del TOKENIZER_MAPPING._extra_content[CustomConfig] # Can register in one step AutoTokenizer.register( _A , slow_tokenizer_class=_A , fast_tokenizer_class=_A ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_A ): AutoTokenizer.register(_A , fast_tokenizer_class=_A ) # We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer # and that model does not have a tokenizer.json with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase__ : Any = BertTokenizerFast.from_pretrained(_A ) bert_tokenizer.save_pretrained(_A ) UpperCAmelCase__ : Optional[int] = CustomTokenizerFast.from_pretrained(_A ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_A ) UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(_A ) self.assertIsInstance(_A , _A ) UpperCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained(_A , use_fast=_A ) self.assertIsInstance(_A , _A ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def lowercase_ ( self : Optional[int] ): '''simple docstring''' with self.assertRaises(_A ): UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(_A ): UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A ) UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A ) self.assertTrue(tokenizer.special_attribute_present ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_A ) UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(_A , trust_remote_code=_A ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) # Test we can also load the slow version UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A , use_fast=_A ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_A ) UpperCAmelCase__ : Any = AutoTokenizer.from_pretrained(_A , trust_remote_code=_A , use_fast=_A ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) else: self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' ) @require_tokenizers def lowercase_ ( self : int ): '''simple docstring''' class lowerCamelCase_ ( __a ): lowerCAmelCase__ = False class lowerCamelCase_ ( __a ): lowerCAmelCase__ = NewTokenizer lowerCAmelCase__ = False try: AutoConfig.register('''custom''' , _A ) AutoTokenizer.register(_A , slow_tokenizer_class=_A ) AutoTokenizer.register(_A , fast_tokenizer_class=_A ) # If remote code is not set, the default is to use local UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertFalse(tokenizer.special_attribute_present ) UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=_A ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertFalse(tokenizer.special_attribute_present ) # If remote code is disabled, we load the local one. UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertFalse(tokenizer.special_attribute_present ) UpperCAmelCase__ : str = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A , use_fast=_A ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertFalse(tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertTrue(tokenizer.special_attribute_present ) UpperCAmelCase__ : Any = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A , use_fast=_A ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertTrue(tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=_A ) self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) # Test we can also load the slow version UpperCAmelCase__ : Optional[Any] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=_A , use_fast=_A ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) else: self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) def lowercase_ ( self : Tuple ): '''simple docstring''' with self.assertRaisesRegex( _A , '''bert-base is not a local folder and is not a valid model identifier''' ): UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained('''bert-base''' ) def lowercase_ ( self : Dict ): '''simple docstring''' with self.assertRaisesRegex( _A , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(_A , revision='''aaaaaa''' ) def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) with RequestCounter() as counter: UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
299
1
'''simple docstring''' import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''encoder.layer_norm_for_extract''': '''layer_norm_for_extract''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''label_embs_concat''': '''label_embeddings_concat''', '''mask_emb''': '''masked_spec_embed''', '''spk_proj''': '''speaker_proj''', } UpperCamelCase__ = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', '''label_embeddings_concat''', '''speaker_proj''', '''layer_norm_for_extract''', ] def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]: for attribute in key.split('''.''' ): UpperCAmelCase__ : Optional[int] = getattr(lowerCAmelCase__ , lowerCAmelCase__ ) if weight_type is not None: UpperCAmelCase__ : Any = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape else: UpperCAmelCase__ : Union[str, Any] = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": UpperCAmelCase__ : int = value elif weight_type == "weight_g": UpperCAmelCase__ : Dict = value elif weight_type == "weight_v": UpperCAmelCase__ : List[str] = value elif weight_type == "bias": UpperCAmelCase__ : Tuple = value else: UpperCAmelCase__ : Tuple = value logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict: UpperCAmelCase__ : Optional[int] = [] UpperCAmelCase__ : Dict = fairseq_model.state_dict() UpperCAmelCase__ : Union[str, Any] = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): UpperCAmelCase__ : Any = False if "conv_layers" in name: load_conv_layer( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == '''group''' , ) UpperCAmelCase__ : str = True else: for key, mapped_key in MAPPING.items(): UpperCAmelCase__ : List[str] = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key): # special case since naming is very similar continue UpperCAmelCase__ : Optional[int] = True if "*" in mapped_key: UpperCAmelCase__ : str = name.split(lowerCAmelCase__ )[0].split('''.''' )[-2] UpperCAmelCase__ : Optional[int] = mapped_key.replace('''*''' , lowerCAmelCase__ ) if "weight_g" in name: UpperCAmelCase__ : List[str] = '''weight_g''' elif "weight_v" in name: UpperCAmelCase__ : Dict = '''weight_v''' elif "bias" in name: UpperCAmelCase__ : Optional[int] = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj UpperCAmelCase__ : Tuple = '''weight''' else: UpperCAmelCase__ : Optional[Any] = None set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) continue if not is_used: unused_weights.append(lowerCAmelCase__ ) logger.warning(F"""Unused weights: {unused_weights}""" ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]: UpperCAmelCase__ : Tuple = full_name.split('''conv_layers.''' )[-1] UpperCAmelCase__ : Optional[Any] = name.split('''.''' ) UpperCAmelCase__ : Union[str, Any] = int(items[0] ) UpperCAmelCase__ : Tuple = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) UpperCAmelCase__ : str = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) UpperCAmelCase__ : Optional[int] = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" ) UpperCAmelCase__ : List[str] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) UpperCAmelCase__ : Optional[Any] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(lowerCAmelCase__ ) @torch.no_grad() def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True ) -> Any: if config_path is not None: UpperCAmelCase__ : Any = UniSpeechSatConfig.from_pretrained(lowerCAmelCase__ ) else: UpperCAmelCase__ : int = UniSpeechSatConfig() UpperCAmelCase__ : Tuple = '''''' if is_finetuned: UpperCAmelCase__ : Optional[int] = UniSpeechSatForCTC(lowerCAmelCase__ ) else: UpperCAmelCase__ : List[Any] = UniSpeechSatForPreTraining(lowerCAmelCase__ ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) UpperCAmelCase__ : Union[str, Any] = model[0].eval() recursively_load_weights(lowerCAmelCase__ , lowerCAmelCase__ ) hf_wavavec.save_pretrained(lowerCAmelCase__ ) if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) UpperCamelCase__ = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
299
'''simple docstring''' def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> float: UpperCAmelCase__ : Tuple = [redshift, radiation_density, matter_density, dark_energy] if any(p < 0 for p in parameters ): raise ValueError('''All input parameters must be positive''' ) if any(p > 1 for p in parameters[1:4] ): raise ValueError('''Relative densities cannot be greater than one''' ) else: UpperCAmelCase__ : List[str] = 1 - (matter_density + radiation_density + dark_energy) UpperCAmelCase__ : List[str] = ( radiation_density * (redshift + 1) ** 4 + matter_density * (redshift + 1) ** 3 + curvature * (redshift + 1) ** 2 + dark_energy ) UpperCAmelCase__ : Any = hubble_constant * e_a ** (1 / 2) return hubble if __name__ == "__main__": import doctest # run doctest doctest.testmod() # demo LCDM approximation UpperCamelCase__ = 0.3 print( hubble_parameter( hubble_constant=68.3, radiation_density=1e-4, matter_density=matter_density, dark_energy=1 - matter_density, redshift=0, ) )
299
1
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_mobilebert import MobileBertTokenizer UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} UpperCamelCase__ = { '''vocab_file''': {'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'''}, '''tokenizer_file''': { '''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json''' }, } UpperCamelCase__ = {'''mobilebert-uncased''': 5_1_2} UpperCamelCase__ = {} class lowerCamelCase_ ( __a ): lowerCAmelCase__ = VOCAB_FILES_NAMES lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase__ = PRETRAINED_INIT_CONFIGURATION lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase__ = MobileBertTokenizer def __init__( self : List[Any] , _A : int=None , _A : List[Any]=None , _A : Optional[Any]=True , _A : List[Any]="[UNK]" , _A : str="[SEP]" , _A : List[Any]="[PAD]" , _A : Any="[CLS]" , _A : Any="[MASK]" , _A : Optional[Any]=True , _A : Union[str, Any]=None , **_A : List[str] , ): '''simple docstring''' super().__init__( _A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , ) UpperCAmelCase__ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , _A ) != do_lower_case or normalizer_state.get('''strip_accents''' , _A ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , _A ) != tokenize_chinese_chars ): UpperCAmelCase__ : List[str] = getattr(_A , normalizer_state.pop('''type''' ) ) UpperCAmelCase__ : Optional[Any] = do_lower_case UpperCAmelCase__ : Union[str, Any] = strip_accents UpperCAmelCase__ : Dict = tokenize_chinese_chars UpperCAmelCase__ : Tuple = normalizer_class(**_A ) UpperCAmelCase__ : str = do_lower_case def lowercase_ ( self : Tuple , _A : Optional[Any] , _A : Union[str, Any]=None ): '''simple docstring''' UpperCAmelCase__ : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowercase_ ( self : Optional[int] , _A : List[int] , _A : Optional[List[int]] = None ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = [self.sep_token_id] UpperCAmelCase__ : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowercase_ ( self : Optional[Any] , _A : str , _A : Optional[str] = None ): '''simple docstring''' UpperCAmelCase__ : str = self._tokenizer.model.save(_A , name=_A ) return tuple(_A )
299
'''simple docstring''' import gc import math import unittest import torch from diffusers import UNetaDModel from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin UpperCamelCase__ = logging.get_logger(__name__) enable_full_determinism() class lowerCamelCase_ ( __a , __a , unittest.TestCase ): lowerCAmelCase__ = UNetaDModel lowerCAmelCase__ = 'sample' @property def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = 4 UpperCAmelCase__ : str = 3 UpperCAmelCase__ : str = (32, 32) UpperCAmelCase__ : List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(_A ) UpperCAmelCase__ : Tuple = torch.tensor([10] ).to(_A ) return {"sample": noise, "timestep": time_step} @property def lowercase_ ( self : int ): '''simple docstring''' return (3, 32, 32) @property def lowercase_ ( self : Dict ): '''simple docstring''' return (3, 32, 32) def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = { '''block_out_channels''': (32, 64), '''down_block_types''': ('''DownBlock2D''', '''AttnDownBlock2D'''), '''up_block_types''': ('''AttnUpBlock2D''', '''UpBlock2D'''), '''attention_head_dim''': 3, '''out_channels''': 3, '''in_channels''': 3, '''layers_per_block''': 2, '''sample_size''': 32, } UpperCAmelCase__ : Tuple = self.dummy_input return init_dict, inputs_dict class lowerCamelCase_ ( __a , __a , unittest.TestCase ): lowerCAmelCase__ = UNetaDModel lowerCAmelCase__ = 'sample' @property def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : List[str] = 4 UpperCAmelCase__ : Dict = 4 UpperCAmelCase__ : List[str] = (32, 32) UpperCAmelCase__ : List[str] = floats_tensor((batch_size, num_channels) + sizes ).to(_A ) UpperCAmelCase__ : List[Any] = torch.tensor([10] ).to(_A ) return {"sample": noise, "timestep": time_step} @property def lowercase_ ( self : Tuple ): '''simple docstring''' return (4, 32, 32) @property def lowercase_ ( self : List[str] ): '''simple docstring''' return (4, 32, 32) def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : List[str] = { '''sample_size''': 32, '''in_channels''': 4, '''out_channels''': 4, '''layers_per_block''': 2, '''block_out_channels''': (32, 64), '''attention_head_dim''': 32, '''down_block_types''': ('''DownBlock2D''', '''DownBlock2D'''), '''up_block_types''': ('''UpBlock2D''', '''UpBlock2D'''), } UpperCAmelCase__ : Optional[Any] = self.dummy_input return init_dict, inputs_dict def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : int = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A ) self.assertIsNotNone(_A ) self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 ) model.to(_A ) UpperCAmelCase__ : Dict = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' ) def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Any = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A ) model.to(_A ) UpperCAmelCase__ : Dict = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' ) def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A ) model_accelerate.to(_A ) model_accelerate.eval() UpperCAmelCase__ : Tuple = torch.randn( 1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , ) UpperCAmelCase__ : Union[str, Any] = noise.to(_A ) UpperCAmelCase__ : Optional[Any] = torch.tensor([10] * noise.shape[0] ).to(_A ) UpperCAmelCase__ : Any = model_accelerate(_A , _A )['''sample'''] # two models don't need to stay in the device at the same time del model_accelerate torch.cuda.empty_cache() gc.collect() UpperCAmelCase__ , UpperCAmelCase__ : Dict = UNetaDModel.from_pretrained( '''fusing/unet-ldm-dummy-update''' , output_loading_info=_A , low_cpu_mem_usage=_A ) model_normal_load.to(_A ) model_normal_load.eval() UpperCAmelCase__ : Optional[int] = model_normal_load(_A , _A )['''sample'''] assert torch_all_close(_A , _A , rtol=1e-3 ) def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' ) model.eval() model.to(_A ) UpperCAmelCase__ : Union[str, Any] = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) UpperCAmelCase__ : str = noise.to(_A ) UpperCAmelCase__ : str = torch.tensor([10] * noise.shape[0] ).to(_A ) with torch.no_grad(): UpperCAmelCase__ : Optional[int] = model(_A , _A ).sample UpperCAmelCase__ : List[Any] = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off UpperCAmelCase__ : Tuple = torch.tensor([-1_3.3_2_5_8, -2_0.1_1_0_0, -1_5.9_8_7_3, -1_7.6_6_1_7, -2_3.0_5_9_6, -1_7.9_4_1_9, -1_3.3_6_7_5, -1_6.1_8_8_9, -1_2.3_8_0_0] ) # fmt: on self.assertTrue(torch_all_close(_A , _A , rtol=1e-3 ) ) class lowerCamelCase_ ( __a , __a , unittest.TestCase ): lowerCAmelCase__ = UNetaDModel lowerCAmelCase__ = 'sample' @property def lowercase_ ( self : Any , _A : str=(32, 32) ): '''simple docstring''' UpperCAmelCase__ : Tuple = 4 UpperCAmelCase__ : List[str] = 3 UpperCAmelCase__ : str = floats_tensor((batch_size, num_channels) + sizes ).to(_A ) UpperCAmelCase__ : Dict = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=_A ) return {"sample": noise, "timestep": time_step} @property def lowercase_ ( self : List[str] ): '''simple docstring''' return (3, 32, 32) @property def lowercase_ ( self : List[Any] ): '''simple docstring''' return (3, 32, 32) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : List[str] = { '''block_out_channels''': [32, 64, 64, 64], '''in_channels''': 3, '''layers_per_block''': 1, '''out_channels''': 3, '''time_embedding_type''': '''fourier''', '''norm_eps''': 1e-6, '''mid_block_scale_factor''': math.sqrt(2.0 ), '''norm_num_groups''': None, '''down_block_types''': [ '''SkipDownBlock2D''', '''AttnSkipDownBlock2D''', '''SkipDownBlock2D''', '''SkipDownBlock2D''', ], '''up_block_types''': [ '''SkipUpBlock2D''', '''SkipUpBlock2D''', '''AttnSkipUpBlock2D''', '''SkipUpBlock2D''', ], } UpperCAmelCase__ : Tuple = self.dummy_input return init_dict, inputs_dict @slow def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : str = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' , output_loading_info=_A ) self.assertIsNotNone(_A ) self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 ) model.to(_A ) UpperCAmelCase__ : List[str] = self.dummy_input UpperCAmelCase__ : Dict = floats_tensor((4, 3) + (256, 256) ).to(_A ) UpperCAmelCase__ : Optional[Any] = noise UpperCAmelCase__ : Any = model(**_A ) assert image is not None, "Make sure output is not None" @slow def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : int = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' ) model.to(_A ) UpperCAmelCase__ : Optional[Any] = 4 UpperCAmelCase__ : List[str] = 3 UpperCAmelCase__ : Dict = (256, 256) UpperCAmelCase__ : Optional[int] = torch.ones((batch_size, num_channels) + sizes ).to(_A ) UpperCAmelCase__ : Union[str, Any] = torch.tensor(batch_size * [1e-4] ).to(_A ) with torch.no_grad(): UpperCAmelCase__ : Optional[int] = model(_A , _A ).sample UpperCAmelCase__ : Any = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off UpperCAmelCase__ : Tuple = torch.tensor([-4_8_4_2.8_6_9_1, -6_4_9_9.6_6_3_1, -3_8_0_0.1_9_5_3, -7_9_7_8.2_6_8_6, -1_0_9_8_0.7_1_2_9, -2_0_0_2_8.8_5_3_5, 8_1_4_8.2_8_2_2, 2_3_4_2.2_9_0_5, 5_6_7.7_6_0_8] ) # fmt: on self.assertTrue(torch_all_close(_A , _A , rtol=1e-2 ) ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Dict = UNetaDModel.from_pretrained('''fusing/ncsnpp-ffhq-ve-dummy-update''' ) model.to(_A ) UpperCAmelCase__ : str = 4 UpperCAmelCase__ : Any = 3 UpperCAmelCase__ : int = (32, 32) UpperCAmelCase__ : Optional[Any] = torch.ones((batch_size, num_channels) + sizes ).to(_A ) UpperCAmelCase__ : Optional[Any] = torch.tensor(batch_size * [1e-4] ).to(_A ) with torch.no_grad(): UpperCAmelCase__ : int = model(_A , _A ).sample UpperCAmelCase__ : Dict = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off UpperCAmelCase__ : Any = torch.tensor([-0.0_3_2_5, -0.0_9_0_0, -0.0_8_6_9, -0.0_3_3_2, -0.0_7_2_5, -0.0_2_7_0, -0.0_1_0_1, 0.0_2_2_7, 0.0_2_5_6] ) # fmt: on self.assertTrue(torch_all_close(_A , _A , rtol=1e-2 ) ) def lowercase_ ( self : Tuple ): '''simple docstring''' pass
299
1
'''simple docstring''' import argparse import pathlib import fairseq import torch from fairseq.models.roberta import RobertaModel as FairseqRobertaModel from fairseq.modules import TransformerSentenceEncoderLayer from packaging import version from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.models.roberta.modeling_roberta import RobertaAttention from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse('''1.0.0a'''): raise Exception('''requires fairseq >= 1.0.0a''') logging.set_verbosity_info() UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = '''Hello world! cécé herlolip''' def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict: UpperCAmelCase__ : Optional[Any] = FairseqRobertaModel.from_pretrained(lowerCAmelCase__ ) roberta.eval() # disable dropout UpperCAmelCase__ : List[str] = roberta.model.encoder.sentence_encoder UpperCAmelCase__ : List[str] = XLMRobertaConfig( vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1E-5 , ) if classification_head: UpperCAmelCase__ : Optional[int] = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0] print('''Our RoBERTa config:''' , lowerCAmelCase__ ) UpperCAmelCase__ : List[Any] = XLMRobertaXLForSequenceClassification(lowerCAmelCase__ ) if classification_head else XLMRobertaXLForMaskedLM(lowerCAmelCase__ ) model.eval() # Now let's copy all the weights. # Embeddings UpperCAmelCase__ : List[str] = roberta_sent_encoder.embed_tokens.weight UpperCAmelCase__ : Any = roberta_sent_encoder.embed_positions.weight UpperCAmelCase__ : str = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them. UpperCAmelCase__ : str = roberta_sent_encoder.layer_norm.weight UpperCAmelCase__ : Optional[int] = roberta_sent_encoder.layer_norm.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer UpperCAmelCase__ : BertLayer = model.roberta.encoder.layer[i] UpperCAmelCase__ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i] UpperCAmelCase__ : RobertaAttention = layer.attention UpperCAmelCase__ : Any = roberta_layer.self_attn_layer_norm.weight UpperCAmelCase__ : List[Any] = roberta_layer.self_attn_layer_norm.bias # self attention UpperCAmelCase__ : BertSelfAttention = layer.attention.self assert ( roberta_layer.self_attn.k_proj.weight.data.shape == roberta_layer.self_attn.q_proj.weight.data.shape == roberta_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ) UpperCAmelCase__ : Tuple = roberta_layer.self_attn.q_proj.weight UpperCAmelCase__ : Tuple = roberta_layer.self_attn.q_proj.bias UpperCAmelCase__ : int = roberta_layer.self_attn.k_proj.weight UpperCAmelCase__ : Optional[int] = roberta_layer.self_attn.k_proj.bias UpperCAmelCase__ : Tuple = roberta_layer.self_attn.v_proj.weight UpperCAmelCase__ : Dict = roberta_layer.self_attn.v_proj.bias # self-attention output UpperCAmelCase__ : BertSelfOutput = layer.attention.output assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape UpperCAmelCase__ : Tuple = roberta_layer.self_attn.out_proj.weight UpperCAmelCase__ : int = roberta_layer.self_attn.out_proj.bias # this one is final layer norm UpperCAmelCase__ : Any = roberta_layer.final_layer_norm.weight UpperCAmelCase__ : Union[str, Any] = roberta_layer.final_layer_norm.bias # intermediate UpperCAmelCase__ : BertIntermediate = layer.intermediate assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape UpperCAmelCase__ : Any = roberta_layer.fca.weight UpperCAmelCase__ : List[str] = roberta_layer.fca.bias # output UpperCAmelCase__ : BertOutput = layer.output assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape UpperCAmelCase__ : Optional[int] = roberta_layer.fca.weight UpperCAmelCase__ : Union[str, Any] = roberta_layer.fca.bias # end of layer if classification_head: UpperCAmelCase__ : List[Any] = roberta.model.classification_heads['''mnli'''].dense.weight UpperCAmelCase__ : Any = roberta.model.classification_heads['''mnli'''].dense.bias UpperCAmelCase__ : Dict = roberta.model.classification_heads['''mnli'''].out_proj.weight UpperCAmelCase__ : Dict = roberta.model.classification_heads['''mnli'''].out_proj.bias else: # LM Head UpperCAmelCase__ : Optional[Any] = roberta.model.encoder.lm_head.dense.weight UpperCAmelCase__ : Any = roberta.model.encoder.lm_head.dense.bias UpperCAmelCase__ : Optional[int] = roberta.model.encoder.lm_head.layer_norm.weight UpperCAmelCase__ : str = roberta.model.encoder.lm_head.layer_norm.bias UpperCAmelCase__ : List[str] = roberta.model.encoder.lm_head.weight UpperCAmelCase__ : Tuple = roberta.model.encoder.lm_head.bias # Let's check that we get the same results. UpperCAmelCase__ : torch.Tensor = roberta.encode(lowerCAmelCase__ ).unsqueeze(0 ) # batch of size 1 UpperCAmelCase__ : Tuple = model(lowerCAmelCase__ )[0] if classification_head: UpperCAmelCase__ : str = roberta.model.classification_heads['''mnli'''](roberta.extract_features(lowerCAmelCase__ ) ) else: UpperCAmelCase__ : List[str] = roberta.model(lowerCAmelCase__ )[0] print(our_output.shape , their_output.shape ) UpperCAmelCase__ : Optional[Any] = torch.max(torch.abs(our_output - their_output ) ).item() print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7 UpperCAmelCase__ : Dict = torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' ) if not success: raise Exception('''Something went wRoNg''' ) pathlib.Path(lowerCAmelCase__ ).mkdir(parents=lowerCAmelCase__ , exist_ok=lowerCAmelCase__ ) print(F"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(lowerCAmelCase__ ) if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--roberta_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.''' ) UpperCamelCase__ = parser.parse_args() convert_xlm_roberta_xl_checkpoint_to_pytorch( args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
299
'''simple docstring''' from __future__ import annotations def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> tuple[float, list[float]]: UpperCAmelCase__ : Optional[Any] = list(range(len(lowerCAmelCase__ ) ) ) UpperCAmelCase__ : Optional[Any] = [v / w for v, w in zip(lowerCAmelCase__ , lowerCAmelCase__ )] index.sort(key=lambda lowerCAmelCase__ : ratio[i] , reverse=lowerCAmelCase__ ) UpperCAmelCase__ : float = 0 UpperCAmelCase__ : list[float] = [0] * len(lowerCAmelCase__ ) for i in index: if weight[i] <= capacity: UpperCAmelCase__ : List[str] = 1 max_value += value[i] capacity -= weight[i] else: UpperCAmelCase__ : Tuple = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
299
1
'''simple docstring''' from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo UpperCamelCase__ = '''\ @misc{wu2016googles, title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation}, author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes and Jeffrey Dean}, year={2016}, eprint={1609.08144}, archivePrefix={arXiv}, primaryClass={cs.CL} } ''' UpperCamelCase__ = '''\ The BLEU score has some undesirable properties when used for single sentences, as it was designed to be a corpus measure. We therefore use a slightly different score for our RL experiments which we call the \'GLEU score\'. For the GLEU score, we record all sub-sequences of 1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then compute a recall, which is the ratio of the number of matching n-grams to the number of total n-grams in the target (ground truth) sequence, and a precision, which is the ratio of the number of matching n-grams to the number of total n-grams in the generated output sequence. Then GLEU score is simply the minimum of recall and precision. This GLEU score\'s range is always between 0 (no matches) and 1 (all match) and it is symmetrical when switching output and target. According to our experiments, GLEU score correlates quite well with the BLEU metric on a corpus level but does not have its drawbacks for our per sentence reward objective. ''' UpperCamelCase__ = '''\ Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references. Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values. Args: predictions (list of str): list of translations to score. Each translation should be tokenized into a list of tokens. references (list of list of str): list of lists of references for each translation. Each reference should be tokenized into a list of tokens. min_len (int): The minimum order of n-gram this function should extract. Defaults to 1. max_len (int): The maximum order of n-gram this function should extract. Defaults to 4. Returns: \'google_bleu\': google_bleu score Examples: Example 1: >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\', ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\'] >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\', ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\', ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\'] >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\', ... \'interested\', \'in\', \'world\', \'history\'] >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\', ... \'because\', \'he\', \'read\', \'the\', \'book\'] >>> list_of_references = [[ref1a], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric("google_bleu") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results["google_bleu"], 2)) 0.44 Example 2: >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\', ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\'] >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\', ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\', ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\'] >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\', ... \'heed\', \'the\', \'cat\', \'commands\'] >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\', ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\', ... \'of\', \'the\', \'cat\'] >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\', ... \'interested\', \'in\', \'world\', \'history\'] >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\', ... \'because\', \'he\', \'read\', \'the\', \'book\'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric("google_bleu") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results["google_bleu"], 2)) 0.61 Example 3: >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\', ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\'] >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\', ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\', ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\'] >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\', ... \'heed\', \'the\', \'cat\', \'commands\'] >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\', ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\', ... \'of\', \'the\', \'cat\'] >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\', ... \'interested\', \'in\', \'world\', \'history\'] >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\', ... \'because\', \'he\', \'read\', \'the\', \'book\'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric("google_bleu") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2) >>> print(round(results["google_bleu"], 2)) 0.53 Example 4: >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\', ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\'] >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\', ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\', ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\'] >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\', ... \'heed\', \'the\', \'cat\', \'commands\'] >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\', ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\', ... \'of\', \'the\', \'cat\'] >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\', ... \'interested\', \'in\', \'world\', \'history\'] >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\', ... \'because\', \'he\', \'read\', \'the\', \'book\'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric("google_bleu") >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6) >>> print(round(results["google_bleu"], 2)) 0.4 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase_ ( datasets.Metric ): def lowercase_ ( self : Optional[int] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ), '''references''': datasets.Sequence( datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ), } ) , ) def lowercase_ ( self : Optional[Any] , _A : List[List[List[str]]] , _A : List[List[str]] , _A : int = 1 , _A : int = 4 , ): '''simple docstring''' return { "google_bleu": gleu_score.corpus_gleu( list_of_references=_A , hypotheses=_A , min_len=_A , max_len=_A ) }
299
'''simple docstring''' from ..utils import DummyObject, requires_backends class lowerCamelCase_ ( metaclass=__a ): lowerCAmelCase__ = ['torch', 'transformers', 'onnx'] def __init__( self : int , *_A : Tuple , **_A : Union[str, Any] ): '''simple docstring''' requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase_ ( cls : Union[str, Any] , *_A : List[Any] , **_A : Any ): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase_ ( cls : int , *_A : Union[str, Any] , **_A : Optional[Any] ): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) class lowerCamelCase_ ( metaclass=__a ): lowerCAmelCase__ = ['torch', 'transformers', 'onnx'] def __init__( self : Any , *_A : List[str] , **_A : Tuple ): '''simple docstring''' requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase_ ( cls : Tuple , *_A : Tuple , **_A : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase_ ( cls : List[Any] , *_A : List[str] , **_A : Dict ): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) class lowerCamelCase_ ( metaclass=__a ): lowerCAmelCase__ = ['torch', 'transformers', 'onnx'] def __init__( self : Dict , *_A : Any , **_A : int ): '''simple docstring''' requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase_ ( cls : List[Any] , *_A : List[Any] , **_A : Optional[int] ): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase_ ( cls : int , *_A : Dict , **_A : Optional[Any] ): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) class lowerCamelCase_ ( metaclass=__a ): lowerCAmelCase__ = ['torch', 'transformers', 'onnx'] def __init__( self : List[Any] , *_A : Optional[int] , **_A : Optional[Any] ): '''simple docstring''' requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase_ ( cls : Dict , *_A : Any , **_A : Tuple ): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase_ ( cls : int , *_A : Union[str, Any] , **_A : Dict ): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) class lowerCamelCase_ ( metaclass=__a ): lowerCAmelCase__ = ['torch', 'transformers', 'onnx'] def __init__( self : List[Any] , *_A : Optional[int] , **_A : Dict ): '''simple docstring''' requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase_ ( cls : Dict , *_A : str , **_A : Dict ): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase_ ( cls : Union[str, Any] , *_A : Optional[int] , **_A : int ): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) class lowerCamelCase_ ( metaclass=__a ): lowerCAmelCase__ = ['torch', 'transformers', 'onnx'] def __init__( self : Optional[Any] , *_A : Union[str, Any] , **_A : Dict ): '''simple docstring''' requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase_ ( cls : List[str] , *_A : str , **_A : List[str] ): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase_ ( cls : Dict , *_A : str , **_A : Any ): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
299
1
'''simple docstring''' import argparse from typing import List import evaluate import numpy as np import torch from datasets import DatasetDict, load_dataset # New Code # # We'll be using StratifiedKFold for this example from sklearn.model_selection import StratifiedKFold from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to perform Cross Validation, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## UpperCamelCase__ = 1_6 UpperCamelCase__ = 3_2 def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 16 ) -> Dict: UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained('''bert-base-cased''' ) UpperCAmelCase__ : str = DatasetDict( { '''train''': dataset['''train'''].select(lowerCAmelCase__ ), '''validation''': dataset['''train'''].select(lowerCAmelCase__ ), '''test''': dataset['''validation'''], } ) def tokenize_function(lowerCAmelCase__ ): # max_length=None => use the model max length (it's actually the default) UpperCAmelCase__ : Optional[int] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): UpperCAmelCase__ : Dict = datasets.map( lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCAmelCase__ : int = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(lowerCAmelCase__ ): # On TPU it's best to pad everything to the same length or training will be very slow. UpperCAmelCase__ : Optional[Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": UpperCAmelCase__ : Any = 16 elif accelerator.mixed_precision != "no": UpperCAmelCase__ : Dict = 8 else: UpperCAmelCase__ : List[Any] = None return tokenizer.pad( lowerCAmelCase__ , padding='''longest''' , max_length=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_tensors='''pt''' , ) # Instantiate dataloaders. UpperCAmelCase__ : List[Any] = DataLoader( tokenized_datasets['''train'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ ) UpperCAmelCase__ : List[str] = DataLoader( tokenized_datasets['''validation'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ ) UpperCAmelCase__ : List[Any] = DataLoader( tokenized_datasets['''test'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ ) return train_dataloader, eval_dataloader, test_dataloader def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str: # New Code # UpperCAmelCase__ : List[str] = [] # Download the dataset UpperCAmelCase__ : Union[str, Any] = load_dataset('''glue''' , '''mrpc''' ) # Create our splits UpperCAmelCase__ : str = StratifiedKFold(n_splits=int(args.num_folds ) ) # Initialize accelerator UpperCAmelCase__ : Dict = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCAmelCase__ : Any = config['''lr'''] UpperCAmelCase__ : Any = int(config['''num_epochs'''] ) UpperCAmelCase__ : Any = int(config['''seed'''] ) UpperCAmelCase__ : Dict = int(config['''batch_size'''] ) UpperCAmelCase__ : Any = evaluate.load('''glue''' , '''mrpc''' ) # If the batch size is too big we use gradient accumulation UpperCAmelCase__ : Optional[Any] = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: UpperCAmelCase__ : Any = batch_size // MAX_GPU_BATCH_SIZE UpperCAmelCase__ : List[Any] = MAX_GPU_BATCH_SIZE set_seed(lowerCAmelCase__ ) # New Code # # Create our folds: UpperCAmelCase__ : Union[str, Any] = kfold.split(np.zeros(datasets['''train'''].num_rows ) , datasets['''train''']['''label'''] ) UpperCAmelCase__ : Dict = [] # Iterate over them for i, (train_idxs, valid_idxs) in enumerate(lowerCAmelCase__ ): UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = get_fold_dataloaders( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCAmelCase__ : List[str] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=lowerCAmelCase__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). UpperCAmelCase__ : Optional[Any] = model.to(accelerator.device ) # Instantiate optimizer UpperCAmelCase__ : Union[str, Any] = AdamW(params=model.parameters() , lr=lowerCAmelCase__ ) # Instantiate scheduler UpperCAmelCase__ : Any = get_linear_schedule_with_warmup( optimizer=lowerCAmelCase__ , num_warmup_steps=1_00 , num_training_steps=(len(lowerCAmelCase__ ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = accelerator.prepare( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # Now we train the model for epoch in range(lowerCAmelCase__ ): model.train() for step, batch in enumerate(lowerCAmelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) UpperCAmelCase__ : Union[str, Any] = model(**lowerCAmelCase__ ) UpperCAmelCase__ : Dict = outputs.loss UpperCAmelCase__ : Dict = loss / gradient_accumulation_steps accelerator.backward(lowerCAmelCase__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowerCAmelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCAmelCase__ : str = model(**lowerCAmelCase__ ) UpperCAmelCase__ : Any = outputs.logits.argmax(dim=-1 ) UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=lowerCAmelCase__ , references=lowerCAmelCase__ , ) UpperCAmelCase__ : str = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" , lowerCAmelCase__ ) # New Code # # We also run predictions on the test set at the very end UpperCAmelCase__ : int = [] for step, batch in enumerate(lowerCAmelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCAmelCase__ : str = model(**lowerCAmelCase__ ) UpperCAmelCase__ : Union[str, Any] = outputs.logits UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) fold_predictions.append(predictions.cpu() ) if i == 0: # We need all of the test predictions test_references.append(references.cpu() ) # Use accelerator.print to print only on the main process. test_predictions.append(torch.cat(lowerCAmelCase__ , dim=0 ) ) # We now need to release all our memory and get rid of the current model, optimizer, etc accelerator.free_memory() # New Code # # Finally we check the accuracy of our folded results: UpperCAmelCase__ : Union[str, Any] = torch.cat(lowerCAmelCase__ , dim=0 ) UpperCAmelCase__ : Tuple = torch.stack(lowerCAmelCase__ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 ) UpperCAmelCase__ : Optional[Any] = metric.compute(predictions=lowerCAmelCase__ , references=lowerCAmelCase__ ) accelerator.print('''Average test metrics from all folds:''' , lowerCAmelCase__ ) def a__ ( ) -> Any: UpperCAmelCase__ : Tuple = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) # New Code # parser.add_argument('''--num_folds''' , type=lowerCAmelCase__ , default=3 , help='''The number of splits to perform across the dataset''' ) UpperCAmelCase__ : Tuple = parser.parse_args() UpperCAmelCase__ : Any = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(lowerCAmelCase__ , lowerCAmelCase__ ) if __name__ == "__main__": main()
299
'''simple docstring''' from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase__ = {'''configuration_mmbt''': ['''MMBTConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings'''] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
299
1
'''simple docstring''' import inspect import tempfile from collections import OrderedDict, UserDict from collections.abc import MutableMapping from contextlib import ExitStack, contextmanager from dataclasses import fields from enum import Enum from typing import Any, ContextManager, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy if is_flax_available(): import jax.numpy as jnp class lowerCamelCase_ ( __a ): def __get__( self : str , _A : Tuple , _A : List[str]=None ): '''simple docstring''' if obj is None: return self if self.fget is None: raise AttributeError('''unreadable attribute''' ) UpperCAmelCase__ : Union[str, Any] = '''__cached_''' + self.fget.__name__ UpperCAmelCase__ : Any = getattr(_A , _A , _A ) if cached is None: UpperCAmelCase__ : Dict = self.fget(_A ) setattr(_A , _A , _A ) return cached def a__ ( lowerCAmelCase__ ) -> Optional[int]: UpperCAmelCase__ : Tuple = val.lower() if val in {"y", "yes", "t", "true", "on", "1"}: return 1 if val in {"n", "no", "f", "false", "off", "0"}: return 0 raise ValueError(F"""invalid truth value {val!r}""" ) def a__ ( lowerCAmelCase__ ) -> Optional[Any]: if is_torch_fx_proxy(lowerCAmelCase__ ): return True if is_torch_available(): import torch if isinstance(lowerCAmelCase__ , torch.Tensor ): return True if is_tf_available(): import tensorflow as tf if isinstance(lowerCAmelCase__ , tf.Tensor ): return True if is_flax_available(): import jax.numpy as jnp from jax.core import Tracer if isinstance(lowerCAmelCase__ , (jnp.ndarray, Tracer) ): return True return isinstance(lowerCAmelCase__ , np.ndarray ) def a__ ( lowerCAmelCase__ ) -> Any: return isinstance(lowerCAmelCase__ , np.ndarray ) def a__ ( lowerCAmelCase__ ) -> int: return _is_numpy(lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ ) -> Optional[Any]: import torch return isinstance(lowerCAmelCase__ , torch.Tensor ) def a__ ( lowerCAmelCase__ ) -> List[str]: return False if not is_torch_available() else _is_torch(lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ ) -> Optional[Any]: import torch return isinstance(lowerCAmelCase__ , torch.device ) def a__ ( lowerCAmelCase__ ) -> List[str]: return False if not is_torch_available() else _is_torch_device(lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ ) -> Any: import torch if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): if hasattr(lowerCAmelCase__ , lowerCAmelCase__ ): UpperCAmelCase__ : Any = getattr(lowerCAmelCase__ , lowerCAmelCase__ ) else: return False return isinstance(lowerCAmelCase__ , torch.dtype ) def a__ ( lowerCAmelCase__ ) -> Optional[int]: return False if not is_torch_available() else _is_torch_dtype(lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ ) -> List[Any]: import tensorflow as tf return isinstance(lowerCAmelCase__ , tf.Tensor ) def a__ ( lowerCAmelCase__ ) -> List[str]: return False if not is_tf_available() else _is_tensorflow(lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ ) -> Any: import tensorflow as tf # the `is_symbolic_tensor` predicate is only available starting with TF 2.14 if hasattr(lowerCAmelCase__ , '''is_symbolic_tensor''' ): return tf.is_symbolic_tensor(lowerCAmelCase__ ) return type(lowerCAmelCase__ ) == tf.Tensor def a__ ( lowerCAmelCase__ ) -> Union[str, Any]: return False if not is_tf_available() else _is_tf_symbolic_tensor(lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ ) -> Tuple: import jax.numpy as jnp # noqa: F811 return isinstance(lowerCAmelCase__ , jnp.ndarray ) def a__ ( lowerCAmelCase__ ) -> List[Any]: return False if not is_flax_available() else _is_jax(lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ ) -> Tuple: if isinstance(lowerCAmelCase__ , (dict, UserDict) ): return {k: to_py_obj(lowerCAmelCase__ ) for k, v in obj.items()} elif isinstance(lowerCAmelCase__ , (list, tuple) ): return [to_py_obj(lowerCAmelCase__ ) for o in obj] elif is_tf_tensor(lowerCAmelCase__ ): return obj.numpy().tolist() elif is_torch_tensor(lowerCAmelCase__ ): return obj.detach().cpu().tolist() elif is_jax_tensor(lowerCAmelCase__ ): return np.asarray(lowerCAmelCase__ ).tolist() elif isinstance(lowerCAmelCase__ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays return obj.tolist() else: return obj def a__ ( lowerCAmelCase__ ) -> Tuple: if isinstance(lowerCAmelCase__ , (dict, UserDict) ): return {k: to_numpy(lowerCAmelCase__ ) for k, v in obj.items()} elif isinstance(lowerCAmelCase__ , (list, tuple) ): return np.array(lowerCAmelCase__ ) elif is_tf_tensor(lowerCAmelCase__ ): return obj.numpy() elif is_torch_tensor(lowerCAmelCase__ ): return obj.detach().cpu().numpy() elif is_jax_tensor(lowerCAmelCase__ ): return np.asarray(lowerCAmelCase__ ) else: return obj class lowerCamelCase_ ( __a ): def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : List[str] = fields(self ) # Safety and consistency checks if not len(_A ): raise ValueError(f"""{self.__class__.__name__} has no fields.""" ) if not all(field.default is None for field in class_fields[1:] ): raise ValueError(f"""{self.__class__.__name__} should not have more than one required field.""" ) UpperCAmelCase__ : Dict = getattr(self , class_fields[0].name ) UpperCAmelCase__ : Any = all(getattr(self , field.name ) is None for field in class_fields[1:] ) if other_fields_are_none and not is_tensor(_A ): if isinstance(_A , _A ): UpperCAmelCase__ : List[Any] = first_field.items() UpperCAmelCase__ : Optional[int] = True else: try: UpperCAmelCase__ : Optional[int] = iter(_A ) UpperCAmelCase__ : Optional[int] = True except TypeError: UpperCAmelCase__ : Optional[Any] = False # if we provided an iterator as first field and the iterator is a (key, value) iterator # set the associated fields if first_field_iterator: for idx, element in enumerate(_A ): if ( not isinstance(_A , (list, tuple) ) or not len(_A ) == 2 or not isinstance(element[0] , _A ) ): if idx == 0: # If we do not have an iterator of key/values, set it as attribute UpperCAmelCase__ : List[Any] = first_field else: # If we have a mixed iterator, raise an error raise ValueError( f"""Cannot set key/value for {element}. It needs to be a tuple (key, value).""" ) break setattr(self , element[0] , element[1] ) if element[1] is not None: UpperCAmelCase__ : List[str] = element[1] elif first_field is not None: UpperCAmelCase__ : Optional[Any] = first_field else: for field in class_fields: UpperCAmelCase__ : Optional[int] = getattr(self , field.name ) if v is not None: UpperCAmelCase__ : str = v def __delitem__( self : Union[str, Any] , *_A : Any , **_A : str ): '''simple docstring''' raise Exception(f"""You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.""" ) def lowercase_ ( self : Any , *_A : List[str] , **_A : Tuple ): '''simple docstring''' raise Exception(f"""You cannot use ``setdefault`` on a {self.__class__.__name__} instance.""" ) def lowercase_ ( self : Optional[Any] , *_A : Any , **_A : Tuple ): '''simple docstring''' raise Exception(f"""You cannot use ``pop`` on a {self.__class__.__name__} instance.""" ) def lowercase_ ( self : Optional[Any] , *_A : Dict , **_A : List[Any] ): '''simple docstring''' raise Exception(f"""You cannot use ``update`` on a {self.__class__.__name__} instance.""" ) def __getitem__( self : List[str] , _A : Any ): '''simple docstring''' if isinstance(_A , _A ): UpperCAmelCase__ : Union[str, Any] = dict(self.items() ) return inner_dict[k] else: return self.to_tuple()[k] def __setattr__( self : int , _A : Union[str, Any] , _A : str ): '''simple docstring''' if name in self.keys() and value is not None: # Don't call self.__setitem__ to avoid recursion errors super().__setitem__(_A , _A ) super().__setattr__(_A , _A ) def __setitem__( self : Any , _A : Optional[int] , _A : List[str] ): '''simple docstring''' super().__setitem__(_A , _A ) # Don't call self.__setattr__ to avoid recursion errors super().__setattr__(_A , _A ) def lowercase_ ( self : Optional[Any] ): '''simple docstring''' return tuple(self[k] for k in self.keys() ) class lowerCamelCase_ ( __a , __a ): @classmethod def lowercase_ ( cls : Optional[Any] , _A : Optional[Any] ): '''simple docstring''' raise ValueError( f"""{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}""" ) class lowerCamelCase_ ( __a ): lowerCAmelCase__ = 'longest' lowerCAmelCase__ = 'max_length' lowerCAmelCase__ = 'do_not_pad' class lowerCamelCase_ ( __a ): lowerCAmelCase__ = 'pt' lowerCAmelCase__ = 'tf' lowerCAmelCase__ = 'np' lowerCAmelCase__ = 'jax' class lowerCamelCase_ : def __init__( self : List[Any] , _A : List[ContextManager] ): '''simple docstring''' UpperCAmelCase__ : str = context_managers UpperCAmelCase__ : int = ExitStack() def __enter__( self : str ): '''simple docstring''' for context_manager in self.context_managers: self.stack.enter_context(_A ) def __exit__( self : Dict , *_A : List[Any] , **_A : str ): '''simple docstring''' self.stack.__exit__(*_A , **_A ) def a__ ( lowerCAmelCase__ ) -> Any: UpperCAmelCase__ : int = infer_framework(lowerCAmelCase__ ) if framework == "tf": UpperCAmelCase__ : Optional[Any] = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": UpperCAmelCase__ : List[Any] = inspect.signature(model_class.forward ) # PyTorch models else: UpperCAmelCase__ : List[Any] = inspect.signature(model_class.__call__ ) # Flax models for p in signature.parameters: if p == "return_loss" and signature.parameters[p].default is True: return True return False def a__ ( lowerCAmelCase__ ) -> Optional[int]: UpperCAmelCase__ : Dict = model_class.__name__ UpperCAmelCase__ : Union[str, Any] = infer_framework(lowerCAmelCase__ ) if framework == "tf": UpperCAmelCase__ : Tuple = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": UpperCAmelCase__ : List[str] = inspect.signature(model_class.forward ) # PyTorch models else: UpperCAmelCase__ : int = inspect.signature(model_class.__call__ ) # Flax models if "QuestionAnswering" in model_name: return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")] else: return [p for p in signature.parameters if "label" in p] def a__ ( lowerCAmelCase__ , lowerCAmelCase__ = "" , lowerCAmelCase__ = "." ) -> Any: def _flatten_dict(lowerCAmelCase__ , lowerCAmelCase__="" , lowerCAmelCase__="." ): for k, v in d.items(): UpperCAmelCase__ : int = str(lowerCAmelCase__ ) + delimiter + str(lowerCAmelCase__ ) if parent_key else k if v and isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): yield from flatten_dict(lowerCAmelCase__ , lowerCAmelCase__ , delimiter=lowerCAmelCase__ ).items() else: yield key, v return dict(_flatten_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) ) @contextmanager def a__ ( lowerCAmelCase__ , lowerCAmelCase__ = False ) -> int: if use_temp_dir: with tempfile.TemporaryDirectory() as tmp_dir: yield tmp_dir else: yield working_dir def a__ ( lowerCAmelCase__ , lowerCAmelCase__=None ) -> Optional[Any]: if is_numpy_array(lowerCAmelCase__ ): return np.transpose(lowerCAmelCase__ , axes=lowerCAmelCase__ ) elif is_torch_tensor(lowerCAmelCase__ ): return array.T if axes is None else array.permute(*lowerCAmelCase__ ) elif is_tf_tensor(lowerCAmelCase__ ): import tensorflow as tf return tf.transpose(lowerCAmelCase__ , perm=lowerCAmelCase__ ) elif is_jax_tensor(lowerCAmelCase__ ): return jnp.transpose(lowerCAmelCase__ , axes=lowerCAmelCase__ ) else: raise ValueError(F"""Type not supported for transpose: {type(lowerCAmelCase__ )}.""" ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple: if is_numpy_array(lowerCAmelCase__ ): return np.reshape(lowerCAmelCase__ , lowerCAmelCase__ ) elif is_torch_tensor(lowerCAmelCase__ ): return array.reshape(*lowerCAmelCase__ ) elif is_tf_tensor(lowerCAmelCase__ ): import tensorflow as tf return tf.reshape(lowerCAmelCase__ , lowerCAmelCase__ ) elif is_jax_tensor(lowerCAmelCase__ ): return jnp.reshape(lowerCAmelCase__ , lowerCAmelCase__ ) else: raise ValueError(F"""Type not supported for reshape: {type(lowerCAmelCase__ )}.""" ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__=None ) -> List[Any]: if is_numpy_array(lowerCAmelCase__ ): return np.squeeze(lowerCAmelCase__ , axis=lowerCAmelCase__ ) elif is_torch_tensor(lowerCAmelCase__ ): return array.squeeze() if axis is None else array.squeeze(dim=lowerCAmelCase__ ) elif is_tf_tensor(lowerCAmelCase__ ): import tensorflow as tf return tf.squeeze(lowerCAmelCase__ , axis=lowerCAmelCase__ ) elif is_jax_tensor(lowerCAmelCase__ ): return jnp.squeeze(lowerCAmelCase__ , axis=lowerCAmelCase__ ) else: raise ValueError(F"""Type not supported for squeeze: {type(lowerCAmelCase__ )}.""" ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]: if is_numpy_array(lowerCAmelCase__ ): return np.expand_dims(lowerCAmelCase__ , lowerCAmelCase__ ) elif is_torch_tensor(lowerCAmelCase__ ): return array.unsqueeze(dim=lowerCAmelCase__ ) elif is_tf_tensor(lowerCAmelCase__ ): import tensorflow as tf return tf.expand_dims(lowerCAmelCase__ , axis=lowerCAmelCase__ ) elif is_jax_tensor(lowerCAmelCase__ ): return jnp.expand_dims(lowerCAmelCase__ , axis=lowerCAmelCase__ ) else: raise ValueError(F"""Type not supported for expand_dims: {type(lowerCAmelCase__ )}.""" ) def a__ ( lowerCAmelCase__ ) -> int: if is_numpy_array(lowerCAmelCase__ ): return np.size(lowerCAmelCase__ ) elif is_torch_tensor(lowerCAmelCase__ ): return array.numel() elif is_tf_tensor(lowerCAmelCase__ ): import tensorflow as tf return tf.size(lowerCAmelCase__ ) elif is_jax_tensor(lowerCAmelCase__ ): return array.size else: raise ValueError(F"""Type not supported for expand_dims: {type(lowerCAmelCase__ )}.""" ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]: for key, value in auto_map.items(): if isinstance(lowerCAmelCase__ , (tuple, list) ): UpperCAmelCase__ : int = [F"""{repo_id}--{v}""" if (v is not None and '''--''' not in v) else v for v in value] elif value is not None and "--" not in value: UpperCAmelCase__ : str = F"""{repo_id}--{value}""" return auto_map def a__ ( lowerCAmelCase__ ) -> Tuple: for base_class in inspect.getmro(lowerCAmelCase__ ): UpperCAmelCase__ : Optional[int] = base_class.__module__ UpperCAmelCase__ : Optional[int] = base_class.__name__ if module.startswith('''tensorflow''' ) or module.startswith('''keras''' ) or name == "TFPreTrainedModel": return "tf" elif module.startswith('''torch''' ) or name == "PreTrainedModel": return "pt" elif module.startswith('''flax''' ) or module.startswith('''jax''' ) or name == "FlaxPreTrainedModel": return "flax" else: raise TypeError(F"""Could not infer framework from class {model_class}.""" )
299
'''simple docstring''' import inspect import tempfile from collections import OrderedDict, UserDict from collections.abc import MutableMapping from contextlib import ExitStack, contextmanager from dataclasses import fields from enum import Enum from typing import Any, ContextManager, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy if is_flax_available(): import jax.numpy as jnp class lowerCamelCase_ ( __a ): def __get__( self : str , _A : Tuple , _A : List[str]=None ): '''simple docstring''' if obj is None: return self if self.fget is None: raise AttributeError('''unreadable attribute''' ) UpperCAmelCase__ : Union[str, Any] = '''__cached_''' + self.fget.__name__ UpperCAmelCase__ : Any = getattr(_A , _A , _A ) if cached is None: UpperCAmelCase__ : Dict = self.fget(_A ) setattr(_A , _A , _A ) return cached def a__ ( lowerCAmelCase__ ) -> Optional[int]: UpperCAmelCase__ : Tuple = val.lower() if val in {"y", "yes", "t", "true", "on", "1"}: return 1 if val in {"n", "no", "f", "false", "off", "0"}: return 0 raise ValueError(F"""invalid truth value {val!r}""" ) def a__ ( lowerCAmelCase__ ) -> Optional[Any]: if is_torch_fx_proxy(lowerCAmelCase__ ): return True if is_torch_available(): import torch if isinstance(lowerCAmelCase__ , torch.Tensor ): return True if is_tf_available(): import tensorflow as tf if isinstance(lowerCAmelCase__ , tf.Tensor ): return True if is_flax_available(): import jax.numpy as jnp from jax.core import Tracer if isinstance(lowerCAmelCase__ , (jnp.ndarray, Tracer) ): return True return isinstance(lowerCAmelCase__ , np.ndarray ) def a__ ( lowerCAmelCase__ ) -> Any: return isinstance(lowerCAmelCase__ , np.ndarray ) def a__ ( lowerCAmelCase__ ) -> int: return _is_numpy(lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ ) -> Optional[Any]: import torch return isinstance(lowerCAmelCase__ , torch.Tensor ) def a__ ( lowerCAmelCase__ ) -> List[str]: return False if not is_torch_available() else _is_torch(lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ ) -> Optional[Any]: import torch return isinstance(lowerCAmelCase__ , torch.device ) def a__ ( lowerCAmelCase__ ) -> List[str]: return False if not is_torch_available() else _is_torch_device(lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ ) -> Any: import torch if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): if hasattr(lowerCAmelCase__ , lowerCAmelCase__ ): UpperCAmelCase__ : Any = getattr(lowerCAmelCase__ , lowerCAmelCase__ ) else: return False return isinstance(lowerCAmelCase__ , torch.dtype ) def a__ ( lowerCAmelCase__ ) -> Optional[int]: return False if not is_torch_available() else _is_torch_dtype(lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ ) -> List[Any]: import tensorflow as tf return isinstance(lowerCAmelCase__ , tf.Tensor ) def a__ ( lowerCAmelCase__ ) -> List[str]: return False if not is_tf_available() else _is_tensorflow(lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ ) -> Any: import tensorflow as tf # the `is_symbolic_tensor` predicate is only available starting with TF 2.14 if hasattr(lowerCAmelCase__ , '''is_symbolic_tensor''' ): return tf.is_symbolic_tensor(lowerCAmelCase__ ) return type(lowerCAmelCase__ ) == tf.Tensor def a__ ( lowerCAmelCase__ ) -> Union[str, Any]: return False if not is_tf_available() else _is_tf_symbolic_tensor(lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ ) -> Tuple: import jax.numpy as jnp # noqa: F811 return isinstance(lowerCAmelCase__ , jnp.ndarray ) def a__ ( lowerCAmelCase__ ) -> List[Any]: return False if not is_flax_available() else _is_jax(lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ ) -> Tuple: if isinstance(lowerCAmelCase__ , (dict, UserDict) ): return {k: to_py_obj(lowerCAmelCase__ ) for k, v in obj.items()} elif isinstance(lowerCAmelCase__ , (list, tuple) ): return [to_py_obj(lowerCAmelCase__ ) for o in obj] elif is_tf_tensor(lowerCAmelCase__ ): return obj.numpy().tolist() elif is_torch_tensor(lowerCAmelCase__ ): return obj.detach().cpu().tolist() elif is_jax_tensor(lowerCAmelCase__ ): return np.asarray(lowerCAmelCase__ ).tolist() elif isinstance(lowerCAmelCase__ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays return obj.tolist() else: return obj def a__ ( lowerCAmelCase__ ) -> Tuple: if isinstance(lowerCAmelCase__ , (dict, UserDict) ): return {k: to_numpy(lowerCAmelCase__ ) for k, v in obj.items()} elif isinstance(lowerCAmelCase__ , (list, tuple) ): return np.array(lowerCAmelCase__ ) elif is_tf_tensor(lowerCAmelCase__ ): return obj.numpy() elif is_torch_tensor(lowerCAmelCase__ ): return obj.detach().cpu().numpy() elif is_jax_tensor(lowerCAmelCase__ ): return np.asarray(lowerCAmelCase__ ) else: return obj class lowerCamelCase_ ( __a ): def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : List[str] = fields(self ) # Safety and consistency checks if not len(_A ): raise ValueError(f"""{self.__class__.__name__} has no fields.""" ) if not all(field.default is None for field in class_fields[1:] ): raise ValueError(f"""{self.__class__.__name__} should not have more than one required field.""" ) UpperCAmelCase__ : Dict = getattr(self , class_fields[0].name ) UpperCAmelCase__ : Any = all(getattr(self , field.name ) is None for field in class_fields[1:] ) if other_fields_are_none and not is_tensor(_A ): if isinstance(_A , _A ): UpperCAmelCase__ : List[Any] = first_field.items() UpperCAmelCase__ : Optional[int] = True else: try: UpperCAmelCase__ : Optional[int] = iter(_A ) UpperCAmelCase__ : Optional[int] = True except TypeError: UpperCAmelCase__ : Optional[Any] = False # if we provided an iterator as first field and the iterator is a (key, value) iterator # set the associated fields if first_field_iterator: for idx, element in enumerate(_A ): if ( not isinstance(_A , (list, tuple) ) or not len(_A ) == 2 or not isinstance(element[0] , _A ) ): if idx == 0: # If we do not have an iterator of key/values, set it as attribute UpperCAmelCase__ : List[Any] = first_field else: # If we have a mixed iterator, raise an error raise ValueError( f"""Cannot set key/value for {element}. It needs to be a tuple (key, value).""" ) break setattr(self , element[0] , element[1] ) if element[1] is not None: UpperCAmelCase__ : List[str] = element[1] elif first_field is not None: UpperCAmelCase__ : Optional[Any] = first_field else: for field in class_fields: UpperCAmelCase__ : Optional[int] = getattr(self , field.name ) if v is not None: UpperCAmelCase__ : str = v def __delitem__( self : Union[str, Any] , *_A : Any , **_A : str ): '''simple docstring''' raise Exception(f"""You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.""" ) def lowercase_ ( self : Any , *_A : List[str] , **_A : Tuple ): '''simple docstring''' raise Exception(f"""You cannot use ``setdefault`` on a {self.__class__.__name__} instance.""" ) def lowercase_ ( self : Optional[Any] , *_A : Any , **_A : Tuple ): '''simple docstring''' raise Exception(f"""You cannot use ``pop`` on a {self.__class__.__name__} instance.""" ) def lowercase_ ( self : Optional[Any] , *_A : Dict , **_A : List[Any] ): '''simple docstring''' raise Exception(f"""You cannot use ``update`` on a {self.__class__.__name__} instance.""" ) def __getitem__( self : List[str] , _A : Any ): '''simple docstring''' if isinstance(_A , _A ): UpperCAmelCase__ : Union[str, Any] = dict(self.items() ) return inner_dict[k] else: return self.to_tuple()[k] def __setattr__( self : int , _A : Union[str, Any] , _A : str ): '''simple docstring''' if name in self.keys() and value is not None: # Don't call self.__setitem__ to avoid recursion errors super().__setitem__(_A , _A ) super().__setattr__(_A , _A ) def __setitem__( self : Any , _A : Optional[int] , _A : List[str] ): '''simple docstring''' super().__setitem__(_A , _A ) # Don't call self.__setattr__ to avoid recursion errors super().__setattr__(_A , _A ) def lowercase_ ( self : Optional[Any] ): '''simple docstring''' return tuple(self[k] for k in self.keys() ) class lowerCamelCase_ ( __a , __a ): @classmethod def lowercase_ ( cls : Optional[Any] , _A : Optional[Any] ): '''simple docstring''' raise ValueError( f"""{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}""" ) class lowerCamelCase_ ( __a ): lowerCAmelCase__ = 'longest' lowerCAmelCase__ = 'max_length' lowerCAmelCase__ = 'do_not_pad' class lowerCamelCase_ ( __a ): lowerCAmelCase__ = 'pt' lowerCAmelCase__ = 'tf' lowerCAmelCase__ = 'np' lowerCAmelCase__ = 'jax' class lowerCamelCase_ : def __init__( self : List[Any] , _A : List[ContextManager] ): '''simple docstring''' UpperCAmelCase__ : str = context_managers UpperCAmelCase__ : int = ExitStack() def __enter__( self : str ): '''simple docstring''' for context_manager in self.context_managers: self.stack.enter_context(_A ) def __exit__( self : Dict , *_A : List[Any] , **_A : str ): '''simple docstring''' self.stack.__exit__(*_A , **_A ) def a__ ( lowerCAmelCase__ ) -> Any: UpperCAmelCase__ : int = infer_framework(lowerCAmelCase__ ) if framework == "tf": UpperCAmelCase__ : Optional[Any] = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": UpperCAmelCase__ : List[Any] = inspect.signature(model_class.forward ) # PyTorch models else: UpperCAmelCase__ : List[Any] = inspect.signature(model_class.__call__ ) # Flax models for p in signature.parameters: if p == "return_loss" and signature.parameters[p].default is True: return True return False def a__ ( lowerCAmelCase__ ) -> Optional[int]: UpperCAmelCase__ : Dict = model_class.__name__ UpperCAmelCase__ : Union[str, Any] = infer_framework(lowerCAmelCase__ ) if framework == "tf": UpperCAmelCase__ : Tuple = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": UpperCAmelCase__ : List[str] = inspect.signature(model_class.forward ) # PyTorch models else: UpperCAmelCase__ : int = inspect.signature(model_class.__call__ ) # Flax models if "QuestionAnswering" in model_name: return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")] else: return [p for p in signature.parameters if "label" in p] def a__ ( lowerCAmelCase__ , lowerCAmelCase__ = "" , lowerCAmelCase__ = "." ) -> Any: def _flatten_dict(lowerCAmelCase__ , lowerCAmelCase__="" , lowerCAmelCase__="." ): for k, v in d.items(): UpperCAmelCase__ : int = str(lowerCAmelCase__ ) + delimiter + str(lowerCAmelCase__ ) if parent_key else k if v and isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): yield from flatten_dict(lowerCAmelCase__ , lowerCAmelCase__ , delimiter=lowerCAmelCase__ ).items() else: yield key, v return dict(_flatten_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) ) @contextmanager def a__ ( lowerCAmelCase__ , lowerCAmelCase__ = False ) -> int: if use_temp_dir: with tempfile.TemporaryDirectory() as tmp_dir: yield tmp_dir else: yield working_dir def a__ ( lowerCAmelCase__ , lowerCAmelCase__=None ) -> Optional[Any]: if is_numpy_array(lowerCAmelCase__ ): return np.transpose(lowerCAmelCase__ , axes=lowerCAmelCase__ ) elif is_torch_tensor(lowerCAmelCase__ ): return array.T if axes is None else array.permute(*lowerCAmelCase__ ) elif is_tf_tensor(lowerCAmelCase__ ): import tensorflow as tf return tf.transpose(lowerCAmelCase__ , perm=lowerCAmelCase__ ) elif is_jax_tensor(lowerCAmelCase__ ): return jnp.transpose(lowerCAmelCase__ , axes=lowerCAmelCase__ ) else: raise ValueError(F"""Type not supported for transpose: {type(lowerCAmelCase__ )}.""" ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple: if is_numpy_array(lowerCAmelCase__ ): return np.reshape(lowerCAmelCase__ , lowerCAmelCase__ ) elif is_torch_tensor(lowerCAmelCase__ ): return array.reshape(*lowerCAmelCase__ ) elif is_tf_tensor(lowerCAmelCase__ ): import tensorflow as tf return tf.reshape(lowerCAmelCase__ , lowerCAmelCase__ ) elif is_jax_tensor(lowerCAmelCase__ ): return jnp.reshape(lowerCAmelCase__ , lowerCAmelCase__ ) else: raise ValueError(F"""Type not supported for reshape: {type(lowerCAmelCase__ )}.""" ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__=None ) -> List[Any]: if is_numpy_array(lowerCAmelCase__ ): return np.squeeze(lowerCAmelCase__ , axis=lowerCAmelCase__ ) elif is_torch_tensor(lowerCAmelCase__ ): return array.squeeze() if axis is None else array.squeeze(dim=lowerCAmelCase__ ) elif is_tf_tensor(lowerCAmelCase__ ): import tensorflow as tf return tf.squeeze(lowerCAmelCase__ , axis=lowerCAmelCase__ ) elif is_jax_tensor(lowerCAmelCase__ ): return jnp.squeeze(lowerCAmelCase__ , axis=lowerCAmelCase__ ) else: raise ValueError(F"""Type not supported for squeeze: {type(lowerCAmelCase__ )}.""" ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]: if is_numpy_array(lowerCAmelCase__ ): return np.expand_dims(lowerCAmelCase__ , lowerCAmelCase__ ) elif is_torch_tensor(lowerCAmelCase__ ): return array.unsqueeze(dim=lowerCAmelCase__ ) elif is_tf_tensor(lowerCAmelCase__ ): import tensorflow as tf return tf.expand_dims(lowerCAmelCase__ , axis=lowerCAmelCase__ ) elif is_jax_tensor(lowerCAmelCase__ ): return jnp.expand_dims(lowerCAmelCase__ , axis=lowerCAmelCase__ ) else: raise ValueError(F"""Type not supported for expand_dims: {type(lowerCAmelCase__ )}.""" ) def a__ ( lowerCAmelCase__ ) -> int: if is_numpy_array(lowerCAmelCase__ ): return np.size(lowerCAmelCase__ ) elif is_torch_tensor(lowerCAmelCase__ ): return array.numel() elif is_tf_tensor(lowerCAmelCase__ ): import tensorflow as tf return tf.size(lowerCAmelCase__ ) elif is_jax_tensor(lowerCAmelCase__ ): return array.size else: raise ValueError(F"""Type not supported for expand_dims: {type(lowerCAmelCase__ )}.""" ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]: for key, value in auto_map.items(): if isinstance(lowerCAmelCase__ , (tuple, list) ): UpperCAmelCase__ : int = [F"""{repo_id}--{v}""" if (v is not None and '''--''' not in v) else v for v in value] elif value is not None and "--" not in value: UpperCAmelCase__ : str = F"""{repo_id}--{value}""" return auto_map def a__ ( lowerCAmelCase__ ) -> Tuple: for base_class in inspect.getmro(lowerCAmelCase__ ): UpperCAmelCase__ : Optional[int] = base_class.__module__ UpperCAmelCase__ : Optional[int] = base_class.__name__ if module.startswith('''tensorflow''' ) or module.startswith('''keras''' ) or name == "TFPreTrainedModel": return "tf" elif module.startswith('''torch''' ) or name == "PreTrainedModel": return "pt" elif module.startswith('''flax''' ) or module.startswith('''jax''' ) or name == "FlaxPreTrainedModel": return "flax" else: raise TypeError(F"""Could not infer framework from class {model_class}.""" )
299
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import FunnelConfig, is_tf_available from transformers.testing_utils import require_tf from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, ) class lowerCamelCase_ : def __init__( self : List[Any] , _A : Union[str, Any] , _A : List[Any]=13 , _A : str=7 , _A : List[str]=True , _A : Optional[Any]=True , _A : str=True , _A : List[Any]=True , _A : Optional[int]=99 , _A : Any=[1, 1, 2] , _A : Any=1 , _A : Union[str, Any]=32 , _A : int=4 , _A : int=8 , _A : List[str]=37 , _A : int="gelu_new" , _A : Optional[int]=0.1 , _A : Any=0.1 , _A : List[str]=0.0 , _A : int=512 , _A : List[str]=3 , _A : Any=0.0_2 , _A : List[str]=3 , _A : int=4 , _A : Union[str, Any]=None , _A : Any=False , ): '''simple docstring''' UpperCAmelCase__ : Tuple = parent UpperCAmelCase__ : Any = batch_size UpperCAmelCase__ : Any = seq_length UpperCAmelCase__ : Any = is_training UpperCAmelCase__ : int = use_input_mask UpperCAmelCase__ : Optional[int] = use_token_type_ids UpperCAmelCase__ : str = use_labels UpperCAmelCase__ : List[Any] = vocab_size UpperCAmelCase__ : Optional[Any] = block_sizes UpperCAmelCase__ : int = num_decoder_layers UpperCAmelCase__ : Optional[int] = d_model UpperCAmelCase__ : int = n_head UpperCAmelCase__ : List[Any] = d_head UpperCAmelCase__ : Any = d_inner UpperCAmelCase__ : Optional[Any] = hidden_act UpperCAmelCase__ : int = hidden_dropout UpperCAmelCase__ : Dict = attention_dropout UpperCAmelCase__ : List[Any] = activation_dropout UpperCAmelCase__ : Optional[int] = max_position_embeddings UpperCAmelCase__ : Any = type_vocab_size UpperCAmelCase__ : int = 2 UpperCAmelCase__ : Dict = num_labels UpperCAmelCase__ : Any = num_choices UpperCAmelCase__ : List[Any] = scope UpperCAmelCase__ : int = initializer_std # Used in the tests to check the size of the first attention layer UpperCAmelCase__ : int = n_head # Used in the tests to check the size of the first hidden state UpperCAmelCase__ : List[Any] = self.d_model # Used in the tests to check the number of output hidden states/attentions UpperCAmelCase__ : Tuple = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers) # FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with # the last hidden state of the first block (which is the first hidden state of the decoder). if not base: UpperCAmelCase__ : Union[str, Any] = self.num_hidden_layers + 2 def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase__ : Optional[int] = None if self.use_input_mask: UpperCAmelCase__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase__ : Tuple = None if self.use_token_type_ids: UpperCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase__ : int = None UpperCAmelCase__ : int = None UpperCAmelCase__ : Tuple = None if self.use_labels: UpperCAmelCase__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase__ : Any = FunnelConfig( vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) def lowercase_ ( self : List[str] , _A : List[Any] , _A : str , _A : List[str] , _A : Optional[Any] , _A : Optional[Any] , _A : Optional[Any] , _A : Union[str, Any] , ): '''simple docstring''' UpperCAmelCase__ : Dict = TFFunnelModel(config=_A ) UpperCAmelCase__ : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} UpperCAmelCase__ : str = model(_A ) UpperCAmelCase__ : Any = [input_ids, input_mask] UpperCAmelCase__ : str = model(_A ) UpperCAmelCase__ : List[str] = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) UpperCAmelCase__ : Optional[Any] = False UpperCAmelCase__ : int = TFFunnelModel(config=_A ) UpperCAmelCase__ : Optional[Any] = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) UpperCAmelCase__ : str = False UpperCAmelCase__ : Union[str, Any] = TFFunnelModel(config=_A ) UpperCAmelCase__ : Union[str, Any] = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) def lowercase_ ( self : str , _A : Dict , _A : Union[str, Any] , _A : int , _A : List[Any] , _A : str , _A : Dict , _A : str , ): '''simple docstring''' UpperCAmelCase__ : List[Any] = TFFunnelBaseModel(config=_A ) UpperCAmelCase__ : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} UpperCAmelCase__ : int = model(_A ) UpperCAmelCase__ : Optional[int] = [input_ids, input_mask] UpperCAmelCase__ : List[Any] = model(_A ) UpperCAmelCase__ : Union[str, Any] = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) UpperCAmelCase__ : List[str] = False UpperCAmelCase__ : Any = TFFunnelBaseModel(config=_A ) UpperCAmelCase__ : Optional[Any] = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) ) UpperCAmelCase__ : int = False UpperCAmelCase__ : Dict = TFFunnelBaseModel(config=_A ) UpperCAmelCase__ : Dict = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) def lowercase_ ( self : List[str] , _A : Any , _A : List[str] , _A : Union[str, Any] , _A : List[str] , _A : str , _A : int , _A : Optional[int] , ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = TFFunnelForPreTraining(config=_A ) UpperCAmelCase__ : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} UpperCAmelCase__ : int = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) ) def lowercase_ ( self : Optional[Any] , _A : List[str] , _A : str , _A : List[Any] , _A : Tuple , _A : str , _A : Union[str, Any] , _A : Dict , ): '''simple docstring''' UpperCAmelCase__ : Dict = TFFunnelForMaskedLM(config=_A ) UpperCAmelCase__ : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} UpperCAmelCase__ : int = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowercase_ ( self : str , _A : Dict , _A : Any , _A : Dict , _A : Dict , _A : str , _A : int , _A : List[str] , ): '''simple docstring''' UpperCAmelCase__ : str = self.num_labels UpperCAmelCase__ : int = TFFunnelForSequenceClassification(config=_A ) UpperCAmelCase__ : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} UpperCAmelCase__ : Tuple = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase_ ( self : Union[str, Any] , _A : str , _A : Union[str, Any] , _A : str , _A : List[Any] , _A : Optional[Any] , _A : Tuple , _A : Optional[Any] , ): '''simple docstring''' UpperCAmelCase__ : str = self.num_choices UpperCAmelCase__ : Dict = TFFunnelForMultipleChoice(config=_A ) UpperCAmelCase__ : Optional[Any] = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) ) UpperCAmelCase__ : Dict = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) ) UpperCAmelCase__ : Any = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) ) UpperCAmelCase__ : int = { '''input_ids''': multiple_choice_inputs_ids, '''attention_mask''': multiple_choice_input_mask, '''token_type_ids''': multiple_choice_token_type_ids, } UpperCAmelCase__ : Any = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowercase_ ( self : Dict , _A : Optional[Any] , _A : Union[str, Any] , _A : Dict , _A : Union[str, Any] , _A : List[str] , _A : List[str] , _A : Dict , ): '''simple docstring''' UpperCAmelCase__ : Dict = self.num_labels UpperCAmelCase__ : Optional[int] = TFFunnelForTokenClassification(config=_A ) UpperCAmelCase__ : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} UpperCAmelCase__ : Union[str, Any] = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowercase_ ( self : Any , _A : str , _A : int , _A : List[str] , _A : List[str] , _A : List[Any] , _A : Tuple , _A : int , ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = TFFunnelForQuestionAnswering(config=_A ) UpperCAmelCase__ : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} UpperCAmelCase__ : str = model(_A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Tuple = self.prepare_config_and_inputs() ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : Optional[int] = config_and_inputs UpperCAmelCase__ : Any = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class lowerCamelCase_ ( __a , __a , unittest.TestCase ): lowerCAmelCase__ = ( ( TFFunnelModel, TFFunnelForMaskedLM, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForTokenClassification, ) if is_tf_available() else () ) lowerCAmelCase__ = ( { 'feature-extraction': (TFFunnelBaseModel, TFFunnelModel), 'fill-mask': TFFunnelForMaskedLM, 'question-answering': TFFunnelForQuestionAnswering, 'text-classification': TFFunnelForSequenceClassification, 'token-classification': TFFunnelForTokenClassification, 'zero-shot': TFFunnelForSequenceClassification, } if is_tf_available() else {} ) lowerCAmelCase__ = False lowerCAmelCase__ = False def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = TFFunnelModelTester(self ) UpperCAmelCase__ : Optional[int] = ConfigTester(self , config_class=_A ) def lowercase_ ( self : Any ): '''simple docstring''' self.config_tester.run_common_tests() def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*_A ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_A ) def lowercase_ ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_A ) def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_A ) @require_tf class lowerCamelCase_ ( __a , unittest.TestCase ): lowerCAmelCase__ = ( (TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else () ) lowerCAmelCase__ = False lowerCAmelCase__ = False def lowercase_ ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : str = TFFunnelModelTester(self , base=_A ) UpperCAmelCase__ : List[str] = ConfigTester(self , config_class=_A ) def lowercase_ ( self : Optional[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_base_model(*_A ) def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_A ) def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_A )
299
'''simple docstring''' import argparse from typing import List import evaluate import numpy as np import torch from datasets import DatasetDict, load_dataset # New Code # # We'll be using StratifiedKFold for this example from sklearn.model_selection import StratifiedKFold from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to perform Cross Validation, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## UpperCamelCase__ = 1_6 UpperCamelCase__ = 3_2 def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 16 ) -> Dict: UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained('''bert-base-cased''' ) UpperCAmelCase__ : str = DatasetDict( { '''train''': dataset['''train'''].select(lowerCAmelCase__ ), '''validation''': dataset['''train'''].select(lowerCAmelCase__ ), '''test''': dataset['''validation'''], } ) def tokenize_function(lowerCAmelCase__ ): # max_length=None => use the model max length (it's actually the default) UpperCAmelCase__ : Optional[int] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): UpperCAmelCase__ : Dict = datasets.map( lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCAmelCase__ : int = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(lowerCAmelCase__ ): # On TPU it's best to pad everything to the same length or training will be very slow. UpperCAmelCase__ : Optional[Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": UpperCAmelCase__ : Any = 16 elif accelerator.mixed_precision != "no": UpperCAmelCase__ : Dict = 8 else: UpperCAmelCase__ : List[Any] = None return tokenizer.pad( lowerCAmelCase__ , padding='''longest''' , max_length=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_tensors='''pt''' , ) # Instantiate dataloaders. UpperCAmelCase__ : List[Any] = DataLoader( tokenized_datasets['''train'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ ) UpperCAmelCase__ : List[str] = DataLoader( tokenized_datasets['''validation'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ ) UpperCAmelCase__ : List[Any] = DataLoader( tokenized_datasets['''test'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ ) return train_dataloader, eval_dataloader, test_dataloader def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str: # New Code # UpperCAmelCase__ : List[str] = [] # Download the dataset UpperCAmelCase__ : Union[str, Any] = load_dataset('''glue''' , '''mrpc''' ) # Create our splits UpperCAmelCase__ : str = StratifiedKFold(n_splits=int(args.num_folds ) ) # Initialize accelerator UpperCAmelCase__ : Dict = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCAmelCase__ : Any = config['''lr'''] UpperCAmelCase__ : Any = int(config['''num_epochs'''] ) UpperCAmelCase__ : Any = int(config['''seed'''] ) UpperCAmelCase__ : Dict = int(config['''batch_size'''] ) UpperCAmelCase__ : Any = evaluate.load('''glue''' , '''mrpc''' ) # If the batch size is too big we use gradient accumulation UpperCAmelCase__ : Optional[Any] = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: UpperCAmelCase__ : Any = batch_size // MAX_GPU_BATCH_SIZE UpperCAmelCase__ : List[Any] = MAX_GPU_BATCH_SIZE set_seed(lowerCAmelCase__ ) # New Code # # Create our folds: UpperCAmelCase__ : Union[str, Any] = kfold.split(np.zeros(datasets['''train'''].num_rows ) , datasets['''train''']['''label'''] ) UpperCAmelCase__ : Dict = [] # Iterate over them for i, (train_idxs, valid_idxs) in enumerate(lowerCAmelCase__ ): UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = get_fold_dataloaders( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCAmelCase__ : List[str] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=lowerCAmelCase__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). UpperCAmelCase__ : Optional[Any] = model.to(accelerator.device ) # Instantiate optimizer UpperCAmelCase__ : Union[str, Any] = AdamW(params=model.parameters() , lr=lowerCAmelCase__ ) # Instantiate scheduler UpperCAmelCase__ : Any = get_linear_schedule_with_warmup( optimizer=lowerCAmelCase__ , num_warmup_steps=1_00 , num_training_steps=(len(lowerCAmelCase__ ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = accelerator.prepare( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # Now we train the model for epoch in range(lowerCAmelCase__ ): model.train() for step, batch in enumerate(lowerCAmelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) UpperCAmelCase__ : Union[str, Any] = model(**lowerCAmelCase__ ) UpperCAmelCase__ : Dict = outputs.loss UpperCAmelCase__ : Dict = loss / gradient_accumulation_steps accelerator.backward(lowerCAmelCase__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowerCAmelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCAmelCase__ : str = model(**lowerCAmelCase__ ) UpperCAmelCase__ : Any = outputs.logits.argmax(dim=-1 ) UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=lowerCAmelCase__ , references=lowerCAmelCase__ , ) UpperCAmelCase__ : str = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" , lowerCAmelCase__ ) # New Code # # We also run predictions on the test set at the very end UpperCAmelCase__ : int = [] for step, batch in enumerate(lowerCAmelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCAmelCase__ : str = model(**lowerCAmelCase__ ) UpperCAmelCase__ : Union[str, Any] = outputs.logits UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) fold_predictions.append(predictions.cpu() ) if i == 0: # We need all of the test predictions test_references.append(references.cpu() ) # Use accelerator.print to print only on the main process. test_predictions.append(torch.cat(lowerCAmelCase__ , dim=0 ) ) # We now need to release all our memory and get rid of the current model, optimizer, etc accelerator.free_memory() # New Code # # Finally we check the accuracy of our folded results: UpperCAmelCase__ : Union[str, Any] = torch.cat(lowerCAmelCase__ , dim=0 ) UpperCAmelCase__ : Tuple = torch.stack(lowerCAmelCase__ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 ) UpperCAmelCase__ : Optional[Any] = metric.compute(predictions=lowerCAmelCase__ , references=lowerCAmelCase__ ) accelerator.print('''Average test metrics from all folds:''' , lowerCAmelCase__ ) def a__ ( ) -> Any: UpperCAmelCase__ : Tuple = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) # New Code # parser.add_argument('''--num_folds''' , type=lowerCAmelCase__ , default=3 , help='''The number of splits to perform across the dataset''' ) UpperCAmelCase__ : Tuple = parser.parse_args() UpperCAmelCase__ : Any = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(lowerCAmelCase__ , lowerCAmelCase__ ) if __name__ == "__main__": main()
299
1
'''simple docstring''' import json import os import shutil import tempfile import unittest from multiprocessing import get_context from pathlib import Path import datasets import numpy as np from datasets import load_dataset from parameterized import parameterized from transformers import AutoProcessor from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available from ..wavaveca.test_feature_extraction_wavaveca import floats_list if is_pyctcdecode_available(): from huggingface_hub import snapshot_download from pyctcdecode import BeamSearchDecoderCTC from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput if is_torch_available(): from transformers import WavaVecaForCTC @require_pyctcdecode class lowerCamelCase_ ( unittest.TestCase ): def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Any = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split() UpperCAmelCase__ : Optional[Any] = dict(zip(_A , range(len(_A ) ) ) ) UpperCAmelCase__ : Tuple = { '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>''', } UpperCAmelCase__ : Optional[int] = { '''feature_size''': 1, '''padding_value''': 0.0, '''sampling_rate''': 16_000, '''return_attention_mask''': False, '''do_normalize''': True, } UpperCAmelCase__ : Union[str, Any] = tempfile.mkdtemp() UpperCAmelCase__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) UpperCAmelCase__ : int = os.path.join(self.tmpdirname , _A ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_A ) + '''\n''' ) with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_A ) + '''\n''' ) # load decoder from hub UpperCAmelCase__ : Any = '''hf-internal-testing/ngram-beam-search-decoder''' def lowercase_ ( self : int , **_A : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Dict = self.add_kwargs_tokens_map.copy() kwargs.update(_A ) return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_A ) def lowercase_ ( self : str , **_A : Any ): '''simple docstring''' return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_A ) def lowercase_ ( self : str , **_A : Any ): '''simple docstring''' return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_A ) def lowercase_ ( self : Any ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.get_tokenizer() UpperCAmelCase__ : Any = self.get_feature_extractor() UpperCAmelCase__ : Tuple = self.get_decoder() UpperCAmelCase__ : Tuple = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase__ : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname ) # tokenizer self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , _A ) # feature extractor self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , _A ) # decoder self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels ) self.assertEqual( processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , ) self.assertIsInstance(processor.decoder , _A ) def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM( tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) processor.save_pretrained(self.tmpdirname ) # make sure that error is thrown when decoder alphabet doesn't match UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM.from_pretrained( self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 ) # decoder self.assertEqual(processor.language_model.alpha , 5.0 ) self.assertEqual(processor.language_model.beta , 3.0 ) self.assertEqual(processor.language_model.score_boundary , -7.0 ) self.assertEqual(processor.language_model.unk_score_offset , 3 ) def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.get_tokenizer() # add token to trigger raise tokenizer.add_tokens(['''xx'''] ) with self.assertRaisesRegex(_A , '''include''' ): WavaVecaProcessorWithLM( tokenizer=_A , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Any = self.get_feature_extractor() UpperCAmelCase__ : Optional[int] = self.get_tokenizer() UpperCAmelCase__ : Any = self.get_decoder() UpperCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A ) UpperCAmelCase__ : List[Any] = floats_list((3, 1_000) ) UpperCAmelCase__ : Dict = feature_extractor(_A , return_tensors='''np''' ) UpperCAmelCase__ : str = processor(_A , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.get_feature_extractor() UpperCAmelCase__ : str = self.get_tokenizer() UpperCAmelCase__ : str = self.get_decoder() UpperCAmelCase__ : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A ) UpperCAmelCase__ : Union[str, Any] = '''This is a test string''' UpperCAmelCase__ : Optional[int] = processor(text=_A ) UpperCAmelCase__ : List[str] = tokenizer(_A ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowercase_ ( self : Dict , _A : Optional[int]=(2, 10, 16) , _A : List[str]=77 ): '''simple docstring''' np.random.seed(_A ) return np.random.rand(*_A ) def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.get_feature_extractor() UpperCAmelCase__ : Union[str, Any] = self.get_tokenizer() UpperCAmelCase__ : Optional[Any] = self.get_decoder() UpperCAmelCase__ : Tuple = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A ) UpperCAmelCase__ : int = self._get_dummy_logits(shape=(10, 16) , seed=13 ) UpperCAmelCase__ : List[Any] = processor.decode(_A ) UpperCAmelCase__ : List[Any] = decoder.decode_beams(_A )[0] self.assertEqual(decoded_decoder[0] , decoded_processor.text ) self.assertEqual('''</s> <s> </s>''' , decoded_processor.text ) self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score ) self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score ) @parameterized.expand([[None], ['''fork'''], ['''spawn''']] ) def lowercase_ ( self : Any , _A : str ): '''simple docstring''' UpperCAmelCase__ : Any = self.get_feature_extractor() UpperCAmelCase__ : Tuple = self.get_tokenizer() UpperCAmelCase__ : Tuple = self.get_decoder() UpperCAmelCase__ : Any = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A ) UpperCAmelCase__ : Optional[Any] = self._get_dummy_logits() # note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM. # otherwise, the LM won't be available to the pool's sub-processes. # manual logic used to allow parameterized test for both pool=None and pool=Pool(...) if pool_context is None: UpperCAmelCase__ : Union[str, Any] = processor.batch_decode(_A ) else: with get_context(_A ).Pool() as pool: UpperCAmelCase__ : Union[str, Any] = processor.batch_decode(_A , _A ) UpperCAmelCase__ : str = list(_A ) with get_context('''fork''' ).Pool() as p: UpperCAmelCase__ : Dict = decoder.decode_beams_batch(_A , _A ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = [], [], [] for beams in decoded_beams: texts_decoder.append(beams[0][0] ) logit_scores_decoder.append(beams[0][-2] ) lm_scores_decoder.append(beams[0][-1] ) self.assertListEqual(_A , decoded_processor.text ) self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text ) self.assertListEqual(_A , decoded_processor.logit_score ) self.assertListEqual(_A , decoded_processor.lm_score ) def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : str = self.get_feature_extractor() UpperCAmelCase__ : List[Any] = self.get_tokenizer() UpperCAmelCase__ : int = self.get_decoder() UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A ) UpperCAmelCase__ : str = self._get_dummy_logits() UpperCAmelCase__ : Optional[int] = 15 UpperCAmelCase__ : Dict = -2_0.0 UpperCAmelCase__ : Optional[Any] = -4.0 UpperCAmelCase__ : Union[str, Any] = processor.batch_decode( _A , beam_width=_A , beam_prune_logp=_A , token_min_logp=_A , ) UpperCAmelCase__ : List[Any] = decoded_processor_out.text UpperCAmelCase__ : List[str] = list(_A ) with get_context('''fork''' ).Pool() as pool: UpperCAmelCase__ : Tuple = decoder.decode_beams_batch( _A , _A , beam_width=_A , beam_prune_logp=_A , token_min_logp=_A , ) UpperCAmelCase__ : Optional[int] = [d[0][0] for d in decoded_decoder_out] UpperCAmelCase__ : Optional[Any] = [d[0][2] for d in decoded_decoder_out] UpperCAmelCase__ : Optional[int] = [d[0][3] for d in decoded_decoder_out] self.assertListEqual(_A , _A ) self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , _A ) self.assertTrue(np.array_equal(_A , decoded_processor_out.logit_score ) ) self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] , _A , atol=1e-3 ) ) self.assertTrue(np.array_equal(_A , decoded_processor_out.lm_score ) ) self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] , _A , atol=1e-3 ) ) def lowercase_ ( self : str ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.get_feature_extractor() UpperCAmelCase__ : Optional[Any] = self.get_tokenizer() UpperCAmelCase__ : Dict = self.get_decoder() UpperCAmelCase__ : int = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A ) UpperCAmelCase__ : Optional[int] = self._get_dummy_logits() UpperCAmelCase__ : List[str] = 2.0 UpperCAmelCase__ : Union[str, Any] = 5.0 UpperCAmelCase__ : str = -2_0.0 UpperCAmelCase__ : Optional[int] = True UpperCAmelCase__ : Union[str, Any] = processor.batch_decode( _A , alpha=_A , beta=_A , unk_score_offset=_A , lm_score_boundary=_A , ) UpperCAmelCase__ : Union[str, Any] = decoded_processor_out.text UpperCAmelCase__ : Tuple = list(_A ) decoder.reset_params( alpha=_A , beta=_A , unk_score_offset=_A , lm_score_boundary=_A , ) with get_context('''fork''' ).Pool() as pool: UpperCAmelCase__ : Optional[Any] = decoder.decode_beams_batch( _A , _A , ) UpperCAmelCase__ : str = [d[0][0] for d in decoded_decoder_out] self.assertListEqual(_A , _A ) self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , _A ) UpperCAmelCase__ : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key] self.assertEqual(lm_model.alpha , 2.0 ) self.assertEqual(lm_model.beta , 5.0 ) self.assertEqual(lm_model.unk_score_offset , -2_0.0 ) self.assertEqual(lm_model.score_boundary , _A ) def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase__ : Dict = processor.decoder.model_container[processor.decoder._model_key] UpperCAmelCase__ : Optional[int] = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() UpperCAmelCase__ : Dict = os.listdir(_A ) UpperCAmelCase__ : Optional[Any] = ['''alphabet.json''', '''language_model'''] downloaded_decoder_files.sort() expected_decoder_files.sort() # test that only decoder relevant files from # https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main # are downloaded and none of the rest (e.g. README.md, ...) self.assertListEqual(_A , _A ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : str = snapshot_download('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase__ : Any = WavaVecaProcessorWithLM.from_pretrained(_A ) UpperCAmelCase__ : Optional[int] = processor.decoder.model_container[processor.decoder._model_key] UpperCAmelCase__ : str = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() UpperCAmelCase__ : List[str] = os.listdir(_A ) UpperCAmelCase__ : Any = os.listdir(_A ) local_decoder_files.sort() expected_decoder_files.sort() # test that both decoder form hub and local files in cache are the same self.assertListEqual(_A , _A ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Dict = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase__ : Dict = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase__ : Tuple = floats_list((3, 1_000) ) UpperCAmelCase__ : int = processor_wavaveca(_A , return_tensors='''np''' ) UpperCAmelCase__ : List[str] = processor_auto(_A , return_tensors='''np''' ) for key in input_wavaveca.keys(): self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 ) UpperCAmelCase__ : Tuple = self._get_dummy_logits() UpperCAmelCase__ : List[str] = processor_wavaveca.batch_decode(_A ) UpperCAmelCase__ : int = processor_auto.batch_decode(_A ) self.assertListEqual(decoded_wavaveca.text , decoded_auto.text ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : int = self.get_feature_extractor() UpperCAmelCase__ : int = self.get_tokenizer() UpperCAmelCase__ : Optional[Any] = self.get_decoder() UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A ) self.assertListEqual( processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , ) @staticmethod def lowercase_ ( _A : Dict , _A : str ): '''simple docstring''' UpperCAmelCase__ : int = [d[key] for d in offsets] return retrieved_list def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase__ : str = self._get_dummy_logits()[0] UpperCAmelCase__ : List[str] = processor.decode(_A , output_word_offsets=_A ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(_A , _A ) ) self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Any = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase__ : Dict = self._get_dummy_logits() UpperCAmelCase__ : Dict = processor.batch_decode(_A , output_word_offsets=_A ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(_A , _A ) ) self.assertListEqual( [''' '''.join(self.get_from_offsets(_A , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] ) @slow @require_torch @require_torchaudio def lowercase_ ( self : Optional[Any] ): '''simple docstring''' import torch UpperCAmelCase__ : Any = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=_A ) UpperCAmelCase__ : Dict = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=16_000 ) ) UpperCAmelCase__ : List[Any] = iter(_A ) UpperCAmelCase__ : Optional[Any] = next(_A ) UpperCAmelCase__ : Any = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) UpperCAmelCase__ : int = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) # compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train UpperCAmelCase__ : int = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values with torch.no_grad(): UpperCAmelCase__ : Dict = model(_A ).logits.cpu().numpy() UpperCAmelCase__ : int = processor.decode(logits[0] , output_word_offsets=_A ) UpperCAmelCase__ : Any = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate UpperCAmelCase__ : Any = [ { '''start_time''': d['''start_offset'''] * time_offset, '''end_time''': d['''end_offset'''] * time_offset, '''word''': d['''word'''], } for d in output['''word_offsets'''] ] UpperCAmelCase__ : int = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL''' # output words self.assertEqual(''' '''.join(self.get_from_offsets(_A , '''word''' ) ) , _A ) self.assertEqual(''' '''.join(self.get_from_offsets(_A , '''word''' ) ) , output.text ) # output times UpperCAmelCase__ : List[Any] = torch.tensor(self.get_from_offsets(_A , '''start_time''' ) ) UpperCAmelCase__ : List[str] = torch.tensor(self.get_from_offsets(_A , '''end_time''' ) ) # fmt: off UpperCAmelCase__ : int = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] ) UpperCAmelCase__ : List[str] = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] ) # fmt: on self.assertTrue(torch.allclose(_A , _A , atol=0.0_1 ) ) self.assertTrue(torch.allclose(_A , _A , atol=0.0_1 ) )
299
'''simple docstring''' import json import os import shutil import tempfile import unittest from multiprocessing import get_context from pathlib import Path import datasets import numpy as np from datasets import load_dataset from parameterized import parameterized from transformers import AutoProcessor from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available from ..wavaveca.test_feature_extraction_wavaveca import floats_list if is_pyctcdecode_available(): from huggingface_hub import snapshot_download from pyctcdecode import BeamSearchDecoderCTC from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput if is_torch_available(): from transformers import WavaVecaForCTC @require_pyctcdecode class lowerCamelCase_ ( unittest.TestCase ): def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Any = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split() UpperCAmelCase__ : Optional[Any] = dict(zip(_A , range(len(_A ) ) ) ) UpperCAmelCase__ : Tuple = { '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>''', } UpperCAmelCase__ : Optional[int] = { '''feature_size''': 1, '''padding_value''': 0.0, '''sampling_rate''': 16_000, '''return_attention_mask''': False, '''do_normalize''': True, } UpperCAmelCase__ : Union[str, Any] = tempfile.mkdtemp() UpperCAmelCase__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) UpperCAmelCase__ : int = os.path.join(self.tmpdirname , _A ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_A ) + '''\n''' ) with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_A ) + '''\n''' ) # load decoder from hub UpperCAmelCase__ : Any = '''hf-internal-testing/ngram-beam-search-decoder''' def lowercase_ ( self : int , **_A : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Dict = self.add_kwargs_tokens_map.copy() kwargs.update(_A ) return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_A ) def lowercase_ ( self : str , **_A : Any ): '''simple docstring''' return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_A ) def lowercase_ ( self : str , **_A : Any ): '''simple docstring''' return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_A ) def lowercase_ ( self : Any ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.get_tokenizer() UpperCAmelCase__ : Any = self.get_feature_extractor() UpperCAmelCase__ : Tuple = self.get_decoder() UpperCAmelCase__ : Tuple = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase__ : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname ) # tokenizer self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , _A ) # feature extractor self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , _A ) # decoder self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels ) self.assertEqual( processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , ) self.assertIsInstance(processor.decoder , _A ) def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM( tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) processor.save_pretrained(self.tmpdirname ) # make sure that error is thrown when decoder alphabet doesn't match UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM.from_pretrained( self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 ) # decoder self.assertEqual(processor.language_model.alpha , 5.0 ) self.assertEqual(processor.language_model.beta , 3.0 ) self.assertEqual(processor.language_model.score_boundary , -7.0 ) self.assertEqual(processor.language_model.unk_score_offset , 3 ) def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.get_tokenizer() # add token to trigger raise tokenizer.add_tokens(['''xx'''] ) with self.assertRaisesRegex(_A , '''include''' ): WavaVecaProcessorWithLM( tokenizer=_A , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Any = self.get_feature_extractor() UpperCAmelCase__ : Optional[int] = self.get_tokenizer() UpperCAmelCase__ : Any = self.get_decoder() UpperCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A ) UpperCAmelCase__ : List[Any] = floats_list((3, 1_000) ) UpperCAmelCase__ : Dict = feature_extractor(_A , return_tensors='''np''' ) UpperCAmelCase__ : str = processor(_A , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.get_feature_extractor() UpperCAmelCase__ : str = self.get_tokenizer() UpperCAmelCase__ : str = self.get_decoder() UpperCAmelCase__ : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A ) UpperCAmelCase__ : Union[str, Any] = '''This is a test string''' UpperCAmelCase__ : Optional[int] = processor(text=_A ) UpperCAmelCase__ : List[str] = tokenizer(_A ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowercase_ ( self : Dict , _A : Optional[int]=(2, 10, 16) , _A : List[str]=77 ): '''simple docstring''' np.random.seed(_A ) return np.random.rand(*_A ) def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.get_feature_extractor() UpperCAmelCase__ : Union[str, Any] = self.get_tokenizer() UpperCAmelCase__ : Optional[Any] = self.get_decoder() UpperCAmelCase__ : Tuple = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A ) UpperCAmelCase__ : int = self._get_dummy_logits(shape=(10, 16) , seed=13 ) UpperCAmelCase__ : List[Any] = processor.decode(_A ) UpperCAmelCase__ : List[Any] = decoder.decode_beams(_A )[0] self.assertEqual(decoded_decoder[0] , decoded_processor.text ) self.assertEqual('''</s> <s> </s>''' , decoded_processor.text ) self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score ) self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score ) @parameterized.expand([[None], ['''fork'''], ['''spawn''']] ) def lowercase_ ( self : Any , _A : str ): '''simple docstring''' UpperCAmelCase__ : Any = self.get_feature_extractor() UpperCAmelCase__ : Tuple = self.get_tokenizer() UpperCAmelCase__ : Tuple = self.get_decoder() UpperCAmelCase__ : Any = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A ) UpperCAmelCase__ : Optional[Any] = self._get_dummy_logits() # note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM. # otherwise, the LM won't be available to the pool's sub-processes. # manual logic used to allow parameterized test for both pool=None and pool=Pool(...) if pool_context is None: UpperCAmelCase__ : Union[str, Any] = processor.batch_decode(_A ) else: with get_context(_A ).Pool() as pool: UpperCAmelCase__ : Union[str, Any] = processor.batch_decode(_A , _A ) UpperCAmelCase__ : str = list(_A ) with get_context('''fork''' ).Pool() as p: UpperCAmelCase__ : Dict = decoder.decode_beams_batch(_A , _A ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = [], [], [] for beams in decoded_beams: texts_decoder.append(beams[0][0] ) logit_scores_decoder.append(beams[0][-2] ) lm_scores_decoder.append(beams[0][-1] ) self.assertListEqual(_A , decoded_processor.text ) self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text ) self.assertListEqual(_A , decoded_processor.logit_score ) self.assertListEqual(_A , decoded_processor.lm_score ) def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : str = self.get_feature_extractor() UpperCAmelCase__ : List[Any] = self.get_tokenizer() UpperCAmelCase__ : int = self.get_decoder() UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A ) UpperCAmelCase__ : str = self._get_dummy_logits() UpperCAmelCase__ : Optional[int] = 15 UpperCAmelCase__ : Dict = -2_0.0 UpperCAmelCase__ : Optional[Any] = -4.0 UpperCAmelCase__ : Union[str, Any] = processor.batch_decode( _A , beam_width=_A , beam_prune_logp=_A , token_min_logp=_A , ) UpperCAmelCase__ : List[Any] = decoded_processor_out.text UpperCAmelCase__ : List[str] = list(_A ) with get_context('''fork''' ).Pool() as pool: UpperCAmelCase__ : Tuple = decoder.decode_beams_batch( _A , _A , beam_width=_A , beam_prune_logp=_A , token_min_logp=_A , ) UpperCAmelCase__ : Optional[int] = [d[0][0] for d in decoded_decoder_out] UpperCAmelCase__ : Optional[Any] = [d[0][2] for d in decoded_decoder_out] UpperCAmelCase__ : Optional[int] = [d[0][3] for d in decoded_decoder_out] self.assertListEqual(_A , _A ) self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , _A ) self.assertTrue(np.array_equal(_A , decoded_processor_out.logit_score ) ) self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] , _A , atol=1e-3 ) ) self.assertTrue(np.array_equal(_A , decoded_processor_out.lm_score ) ) self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] , _A , atol=1e-3 ) ) def lowercase_ ( self : str ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.get_feature_extractor() UpperCAmelCase__ : Optional[Any] = self.get_tokenizer() UpperCAmelCase__ : Dict = self.get_decoder() UpperCAmelCase__ : int = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A ) UpperCAmelCase__ : Optional[int] = self._get_dummy_logits() UpperCAmelCase__ : List[str] = 2.0 UpperCAmelCase__ : Union[str, Any] = 5.0 UpperCAmelCase__ : str = -2_0.0 UpperCAmelCase__ : Optional[int] = True UpperCAmelCase__ : Union[str, Any] = processor.batch_decode( _A , alpha=_A , beta=_A , unk_score_offset=_A , lm_score_boundary=_A , ) UpperCAmelCase__ : Union[str, Any] = decoded_processor_out.text UpperCAmelCase__ : Tuple = list(_A ) decoder.reset_params( alpha=_A , beta=_A , unk_score_offset=_A , lm_score_boundary=_A , ) with get_context('''fork''' ).Pool() as pool: UpperCAmelCase__ : Optional[Any] = decoder.decode_beams_batch( _A , _A , ) UpperCAmelCase__ : str = [d[0][0] for d in decoded_decoder_out] self.assertListEqual(_A , _A ) self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , _A ) UpperCAmelCase__ : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key] self.assertEqual(lm_model.alpha , 2.0 ) self.assertEqual(lm_model.beta , 5.0 ) self.assertEqual(lm_model.unk_score_offset , -2_0.0 ) self.assertEqual(lm_model.score_boundary , _A ) def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase__ : Dict = processor.decoder.model_container[processor.decoder._model_key] UpperCAmelCase__ : Optional[int] = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() UpperCAmelCase__ : Dict = os.listdir(_A ) UpperCAmelCase__ : Optional[Any] = ['''alphabet.json''', '''language_model'''] downloaded_decoder_files.sort() expected_decoder_files.sort() # test that only decoder relevant files from # https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main # are downloaded and none of the rest (e.g. README.md, ...) self.assertListEqual(_A , _A ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : str = snapshot_download('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase__ : Any = WavaVecaProcessorWithLM.from_pretrained(_A ) UpperCAmelCase__ : Optional[int] = processor.decoder.model_container[processor.decoder._model_key] UpperCAmelCase__ : str = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() UpperCAmelCase__ : List[str] = os.listdir(_A ) UpperCAmelCase__ : Any = os.listdir(_A ) local_decoder_files.sort() expected_decoder_files.sort() # test that both decoder form hub and local files in cache are the same self.assertListEqual(_A , _A ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Dict = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase__ : Dict = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase__ : Tuple = floats_list((3, 1_000) ) UpperCAmelCase__ : int = processor_wavaveca(_A , return_tensors='''np''' ) UpperCAmelCase__ : List[str] = processor_auto(_A , return_tensors='''np''' ) for key in input_wavaveca.keys(): self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 ) UpperCAmelCase__ : Tuple = self._get_dummy_logits() UpperCAmelCase__ : List[str] = processor_wavaveca.batch_decode(_A ) UpperCAmelCase__ : int = processor_auto.batch_decode(_A ) self.assertListEqual(decoded_wavaveca.text , decoded_auto.text ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : int = self.get_feature_extractor() UpperCAmelCase__ : int = self.get_tokenizer() UpperCAmelCase__ : Optional[Any] = self.get_decoder() UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A ) self.assertListEqual( processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , ) @staticmethod def lowercase_ ( _A : Dict , _A : str ): '''simple docstring''' UpperCAmelCase__ : int = [d[key] for d in offsets] return retrieved_list def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase__ : str = self._get_dummy_logits()[0] UpperCAmelCase__ : List[str] = processor.decode(_A , output_word_offsets=_A ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(_A , _A ) ) self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Any = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase__ : Dict = self._get_dummy_logits() UpperCAmelCase__ : Dict = processor.batch_decode(_A , output_word_offsets=_A ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(_A , _A ) ) self.assertListEqual( [''' '''.join(self.get_from_offsets(_A , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] ) @slow @require_torch @require_torchaudio def lowercase_ ( self : Optional[Any] ): '''simple docstring''' import torch UpperCAmelCase__ : Any = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=_A ) UpperCAmelCase__ : Dict = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=16_000 ) ) UpperCAmelCase__ : List[Any] = iter(_A ) UpperCAmelCase__ : Optional[Any] = next(_A ) UpperCAmelCase__ : Any = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) UpperCAmelCase__ : int = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) # compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train UpperCAmelCase__ : int = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values with torch.no_grad(): UpperCAmelCase__ : Dict = model(_A ).logits.cpu().numpy() UpperCAmelCase__ : int = processor.decode(logits[0] , output_word_offsets=_A ) UpperCAmelCase__ : Any = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate UpperCAmelCase__ : Any = [ { '''start_time''': d['''start_offset'''] * time_offset, '''end_time''': d['''end_offset'''] * time_offset, '''word''': d['''word'''], } for d in output['''word_offsets'''] ] UpperCAmelCase__ : int = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL''' # output words self.assertEqual(''' '''.join(self.get_from_offsets(_A , '''word''' ) ) , _A ) self.assertEqual(''' '''.join(self.get_from_offsets(_A , '''word''' ) ) , output.text ) # output times UpperCAmelCase__ : List[Any] = torch.tensor(self.get_from_offsets(_A , '''start_time''' ) ) UpperCAmelCase__ : List[str] = torch.tensor(self.get_from_offsets(_A , '''end_time''' ) ) # fmt: off UpperCAmelCase__ : int = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] ) UpperCAmelCase__ : List[str] = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] ) # fmt: on self.assertTrue(torch.allclose(_A , _A , atol=0.0_1 ) ) self.assertTrue(torch.allclose(_A , _A , atol=0.0_1 ) )
299
1
'''simple docstring''' import copy from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto.configuration_auto import AutoConfig if TYPE_CHECKING: from ... import PreTrainedTokenizerBase, TensorType UpperCamelCase__ = logging.get_logger(__name__) class lowerCamelCase_ ( __a ): lowerCAmelCase__ = 'vision-encoder-decoder' lowerCAmelCase__ = True def __init__( self : Optional[Any] , **_A : Any ): '''simple docstring''' super().__init__(**_A ) if "encoder" not in kwargs or "decoder" not in kwargs: raise ValueError( f"""A configuraton of type {self.model_type} cannot be instantiated because """ f"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" ) UpperCAmelCase__ : str = kwargs.pop('''encoder''' ) UpperCAmelCase__ : Dict = encoder_config.pop('''model_type''' ) UpperCAmelCase__ : Tuple = kwargs.pop('''decoder''' ) UpperCAmelCase__ : Optional[Any] = decoder_config.pop('''model_type''' ) UpperCAmelCase__ : Union[str, Any] = AutoConfig.for_model(_A , **_A ) UpperCAmelCase__ : List[Any] = AutoConfig.for_model(_A , **_A ) UpperCAmelCase__ : Dict = True @classmethod def lowercase_ ( cls : Optional[int] , _A : PretrainedConfig , _A : PretrainedConfig , **_A : List[Any] ): '''simple docstring''' logger.info('''Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' ) UpperCAmelCase__ : Optional[int] = True UpperCAmelCase__ : Any = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_A ) def lowercase_ ( self : str ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = copy.deepcopy(self.__dict__ ) UpperCAmelCase__ : str = self.encoder.to_dict() UpperCAmelCase__ : Tuple = self.decoder.to_dict() UpperCAmelCase__ : Tuple = self.__class__.model_type return output class lowerCamelCase_ ( __a ): lowerCAmelCase__ = version.parse('1.11' ) @property def lowercase_ ( self : List[Any] ): '''simple docstring''' return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def lowercase_ ( self : List[str] ): '''simple docstring''' return 1e-4 @property def lowercase_ ( self : Tuple ): '''simple docstring''' return OrderedDict({'''last_hidden_state''': {0: '''batch''', 1: '''encoder_sequence'''}} ) class lowerCamelCase_ ( __a ): @property def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Tuple = OrderedDict() UpperCAmelCase__ : Dict = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} UpperCAmelCase__ : Optional[int] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} UpperCAmelCase__ : Tuple = {0: '''batch''', 1: '''encoder_sequence'''} return common_inputs def lowercase_ ( self : List[Any] , _A : "PreTrainedTokenizerBase" , _A : int = -1 , _A : int = -1 , _A : bool = False , _A : Optional["TensorType"] = None , ): '''simple docstring''' import torch UpperCAmelCase__ : Optional[Any] = OrderedDict() UpperCAmelCase__ : Union[str, Any] = super().generate_dummy_inputs( _A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A ) UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = dummy_input['''input_ids'''].shape UpperCAmelCase__ : List[Any] = (batch, encoder_sequence, self._config.encoder_hidden_size) UpperCAmelCase__ : Tuple = dummy_input.pop('''input_ids''' ) UpperCAmelCase__ : Union[str, Any] = dummy_input.pop('''attention_mask''' ) UpperCAmelCase__ : Optional[int] = torch.zeros(_A ) return common_inputs class lowerCamelCase_ ( __a ): @property def lowercase_ ( self : Optional[Any] ): '''simple docstring''' pass def lowercase_ ( self : List[str] , _A : PretrainedConfig ): '''simple docstring''' return VisionEncoderDecoderEncoderOnnxConfig(_A ) def lowercase_ ( self : int , _A : PretrainedConfig , _A : PretrainedConfig , _A : str = "default" ): '''simple docstring''' UpperCAmelCase__ : List[Any] = encoder_config.hidden_size return VisionEncoderDecoderDecoderOnnxConfig(_A , _A )
299
'''simple docstring''' # Logistic Regression from scratch # In[62]: # In[63]: # importing all the required libraries import numpy as np from matplotlib import pyplot as plt from sklearn import datasets def a__ ( lowerCAmelCase__ ) -> List[Any]: return 1 / (1 + np.exp(-z )) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple: return (-y * np.log(lowerCAmelCase__ ) - (1 - y) * np.log(1 - h )).mean() def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict: UpperCAmelCase__ : str = np.dot(lowerCAmelCase__ , lowerCAmelCase__ ) return np.sum(y * scores - np.log(1 + np.exp(lowerCAmelCase__ ) ) ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=7_00_00 ) -> List[Any]: UpperCAmelCase__ : Tuple = np.zeros(x.shape[1] ) for iterations in range(lowerCAmelCase__ ): UpperCAmelCase__ : List[Any] = np.dot(lowerCAmelCase__ , lowerCAmelCase__ ) UpperCAmelCase__ : List[str] = sigmoid_function(lowerCAmelCase__ ) UpperCAmelCase__ : int = np.dot(x.T , h - y ) / y.size UpperCAmelCase__ : Optional[int] = theta - alpha * gradient # updating the weights UpperCAmelCase__ : Dict = np.dot(lowerCAmelCase__ , lowerCAmelCase__ ) UpperCAmelCase__ : int = sigmoid_function(lowerCAmelCase__ ) UpperCAmelCase__ : Tuple = cost_function(lowerCAmelCase__ , lowerCAmelCase__ ) if iterations % 1_00 == 0: print(F"""loss: {j} \t""" ) # printing the loss after every 100 iterations return theta # In[68]: if __name__ == "__main__": UpperCamelCase__ = datasets.load_iris() UpperCamelCase__ = iris.data[:, :2] UpperCamelCase__ = (iris.target != 0) * 1 UpperCamelCase__ = 0.1 UpperCamelCase__ = logistic_reg(alpha, x, y, max_iterations=7_0_0_0_0) print('''theta: ''', theta) # printing the theta i.e our weights vector def a__ ( lowerCAmelCase__ ) -> Dict: return sigmoid_function( np.dot(lowerCAmelCase__ , lowerCAmelCase__ ) ) # predicting the value of probability from the logistic regression algorithm plt.figure(figsize=(1_0, 6)) plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''') plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''') ((UpperCamelCase__) , (UpperCamelCase__)) = (x[:, 0].min(), x[:, 0].max()) ((UpperCamelCase__) , (UpperCamelCase__)) = (x[:, 1].min(), x[:, 1].max()) ((UpperCamelCase__) , (UpperCamelCase__)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max)) UpperCamelCase__ = np.c_[xxa.ravel(), xxa.ravel()] UpperCamelCase__ = predict_prob(grid).reshape(xxa.shape) plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''') plt.legend() plt.show()
299
1
'''simple docstring''' from datetime import datetime import requests from bsa import BeautifulSoup if __name__ == "__main__": UpperCamelCase__ = input('''Enter image url: ''').strip() print(F"""Downloading image from {url} ...""") UpperCamelCase__ = BeautifulSoup(requests.get(url).content, '''html.parser''') # The image URL is in the content field of the first meta tag with property og:image UpperCamelCase__ = soup.find('''meta''', {'''property''': '''og:image'''})['''content'''] UpperCamelCase__ = requests.get(image_url).content UpperCamelCase__ = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg""" with open(file_name, '''wb''') as fp: fp.write(image_data) print(F"""Done. Image saved to disk as {file_name}.""")
299
'''simple docstring''' from __future__ import annotations import copy import tempfile import unittest from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available from transformers.testing_utils import ( DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tensorflow_probability, require_tf, slow, ) from ..bert.test_modeling_bert import BertModelTester if is_tf_available(): from transformers import ( TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelForTableQuestionAnswering, TFAutoModelForTokenClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFFunnelBaseModel, TFFunnelModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, TFTapasForQuestionAnswering, ) from transformers.models.auto.modeling_tf_auto import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_MAPPING, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCamelCase_ ( __a ): lowerCAmelCase__ = 'new-model' if is_tf_available(): class lowerCamelCase_ ( __a ): lowerCAmelCase__ = NewModelConfig @require_tf class lowerCamelCase_ ( unittest.TestCase ): @slow def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : List[str] = '''bert-base-cased''' UpperCAmelCase__ : int = AutoConfig.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) UpperCAmelCase__ : Dict = TFAutoModel.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) @slow def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : str = '''bert-base-cased''' UpperCAmelCase__ : Any = AutoConfig.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) UpperCAmelCase__ : List[str] = TFAutoModelForPreTraining.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) @slow def lowercase_ ( self : int ): '''simple docstring''' for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : int = AutoConfig.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) UpperCAmelCase__ : str = TFAutoModelForCausalLM.from_pretrained(_A ) UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = TFAutoModelForCausalLM.from_pretrained(_A , output_loading_info=_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) @slow def lowercase_ ( self : List[Any] ): '''simple docstring''' for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : List[Any] = AutoConfig.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) UpperCAmelCase__ : List[Any] = TFAutoModelWithLMHead.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) @slow def lowercase_ ( self : Optional[Any] ): '''simple docstring''' for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : int = AutoConfig.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) UpperCAmelCase__ : List[Any] = TFAutoModelForMaskedLM.from_pretrained(_A ) UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = TFAutoModelForMaskedLM.from_pretrained(_A , output_loading_info=_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) @slow def lowercase_ ( self : Optional[int] ): '''simple docstring''' for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : Optional[Any] = AutoConfig.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) UpperCAmelCase__ : Dict = TFAutoModelForSeqaSeqLM.from_pretrained(_A ) UpperCAmelCase__ , UpperCAmelCase__ : Dict = TFAutoModelForSeqaSeqLM.from_pretrained(_A , output_loading_info=_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) @slow def lowercase_ ( self : Any ): '''simple docstring''' for model_name in ["bert-base-uncased"]: UpperCAmelCase__ : Any = AutoConfig.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) UpperCAmelCase__ : Any = TFAutoModelForSequenceClassification.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) @slow def lowercase_ ( self : Any ): '''simple docstring''' for model_name in ["bert-base-uncased"]: UpperCAmelCase__ : Optional[Any] = AutoConfig.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) UpperCAmelCase__ : Dict = TFAutoModelForQuestionAnswering.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) @slow @require_tensorflow_probability def lowercase_ ( self : Optional[int] ): '''simple docstring''' for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]: UpperCAmelCase__ : List[str] = AutoConfig.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) UpperCAmelCase__ : List[str] = TFAutoModelForTableQuestionAnswering.from_pretrained(_A ) UpperCAmelCase__ , UpperCAmelCase__ : Dict = TFAutoModelForTableQuestionAnswering.from_pretrained( _A , output_loading_info=_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , _A ) def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : List[Any] = TFAutoModelWithLMHead.from_pretrained(_A ) self.assertIsInstance(_A , _A ) self.assertEqual(model.num_parameters() , 14_410 ) self.assertEqual(model.num_parameters(only_trainable=_A ) , 14_410 ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = TFAutoModelWithLMHead.from_pretrained(_A ) self.assertIsInstance(_A , _A ) self.assertEqual(model.num_parameters() , 14_410 ) self.assertEqual(model.num_parameters(only_trainable=_A ) , 14_410 ) def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : int = TFAutoModel.from_pretrained('''sgugger/funnel-random-tiny''' ) self.assertIsInstance(_A , _A ) UpperCAmelCase__ : Any = copy.deepcopy(model.config ) UpperCAmelCase__ : Tuple = ['''FunnelBaseModel'''] UpperCAmelCase__ : int = TFAutoModel.from_config(_A ) self.assertIsInstance(_A , _A ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(_A ) UpperCAmelCase__ : str = TFAutoModel.from_pretrained(_A ) self.assertIsInstance(_A , _A ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' try: AutoConfig.register('''new-model''' , _A ) UpperCAmelCase__ : List[Any] = [ TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSequenceClassification, TFAutoModelForTokenClassification, ] for auto_class in auto_classes: with self.subTest(auto_class.__name__ ): # Wrong config class will raise an error with self.assertRaises(_A ): auto_class.register(_A , _A ) auto_class.register(_A , _A ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_A ): auto_class.register(_A , _A ) # Now that the config is registered, it can be used as any other config with the auto-API UpperCAmelCase__ : Tuple = BertModelTester(self ).get_config() UpperCAmelCase__ : str = NewModelConfig(**tiny_config.to_dict() ) UpperCAmelCase__ : str = auto_class.from_config(_A ) self.assertIsInstance(_A , _A ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(_A ) UpperCAmelCase__ : str = auto_class.from_pretrained(_A ) self.assertIsInstance(_A , _A ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"] for mapping in ( TF_MODEL_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, ): if NewModelConfig in mapping._extra_content: del mapping._extra_content[NewModelConfig] def lowercase_ ( self : str ): '''simple docstring''' with self.assertRaisesRegex( _A , '''bert-base is not a local folder and is not a valid model identifier''' ): UpperCAmelCase__ : Dict = TFAutoModel.from_pretrained('''bert-base''' ) def lowercase_ ( self : Tuple ): '''simple docstring''' with self.assertRaisesRegex( _A , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): UpperCAmelCase__ : int = TFAutoModel.from_pretrained(_A , revision='''aaaaaa''' ) def lowercase_ ( self : Tuple ): '''simple docstring''' with self.assertRaisesRegex( _A , '''hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin''' , ): UpperCAmelCase__ : List[Any] = TFAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' with self.assertRaisesRegex(_A , '''Use `from_pt=True` to load this model''' ): UpperCAmelCase__ : int = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' ) def lowercase_ ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : List[str] = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) with RequestCounter() as counter: UpperCAmelCase__ : Union[str, Any] = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 ) # With a sharded checkpoint UpperCAmelCase__ : Optional[Any] = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' ) with RequestCounter() as counter: UpperCAmelCase__ : List[Any] = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
299
1
'''simple docstring''' from diffusers.utils.testing_utils import require_onnxruntime @require_onnxruntime class lowerCamelCase_ : pass
299
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetaImageProcessor class lowerCamelCase_ ( unittest.TestCase ): def __init__( self : List[str] , _A : List[Any] , _A : Union[str, Any]=7 , _A : List[str]=3 , _A : str=30 , _A : Tuple=400 , _A : Optional[int]=True , _A : List[str]=None , _A : int=True , _A : int=[0.5, 0.5, 0.5] , _A : Optional[int]=[0.5, 0.5, 0.5] , _A : List[Any]=True , _A : str=1 / 255 , _A : Tuple=True , ): '''simple docstring''' UpperCAmelCase__ : str = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1_333} UpperCAmelCase__ : Optional[Any] = parent UpperCAmelCase__ : Optional[Any] = batch_size UpperCAmelCase__ : List[str] = num_channels UpperCAmelCase__ : List[Any] = min_resolution UpperCAmelCase__ : List[str] = max_resolution UpperCAmelCase__ : Tuple = do_resize UpperCAmelCase__ : Union[str, Any] = size UpperCAmelCase__ : Dict = do_normalize UpperCAmelCase__ : Union[str, Any] = image_mean UpperCAmelCase__ : Optional[int] = image_std UpperCAmelCase__ : Dict = do_rescale UpperCAmelCase__ : Union[str, Any] = rescale_factor UpperCAmelCase__ : int = do_pad def lowercase_ ( self : Any ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def lowercase_ ( self : Any , _A : Union[str, Any] , _A : Union[str, Any]=False ): '''simple docstring''' if not batched: UpperCAmelCase__ : Optional[int] = image_inputs[0] if isinstance(_A , Image.Image ): UpperCAmelCase__ , UpperCAmelCase__ : str = image.size else: UpperCAmelCase__ , UpperCAmelCase__ : int = image.shape[1], image.shape[2] if w < h: UpperCAmelCase__ : Optional[Any] = int(self.size['''shortest_edge'''] * h / w ) UpperCAmelCase__ : List[Any] = self.size['''shortest_edge'''] elif w > h: UpperCAmelCase__ : int = self.size['''shortest_edge'''] UpperCAmelCase__ : Dict = int(self.size['''shortest_edge'''] * w / h ) else: UpperCAmelCase__ : List[str] = self.size['''shortest_edge'''] UpperCAmelCase__ : Dict = self.size['''shortest_edge'''] else: UpperCAmelCase__ : int = [] for image in image_inputs: UpperCAmelCase__ , UpperCAmelCase__ : str = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) UpperCAmelCase__ : Union[str, Any] = max(_A , key=lambda _A : item[0] )[0] UpperCAmelCase__ : Union[str, Any] = max(_A , key=lambda _A : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class lowerCamelCase_ ( __a , unittest.TestCase ): lowerCAmelCase__ = DetaImageProcessor if is_vision_available() else None def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : List[Any] = DetaImageProcessingTester(self ) @property def lowercase_ ( self : int ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_A , '''image_mean''' ) ) self.assertTrue(hasattr(_A , '''image_std''' ) ) self.assertTrue(hasattr(_A , '''do_normalize''' ) ) self.assertTrue(hasattr(_A , '''do_resize''' ) ) self.assertTrue(hasattr(_A , '''do_rescale''' ) ) self.assertTrue(hasattr(_A , '''do_pad''' ) ) self.assertTrue(hasattr(_A , '''size''' ) ) def lowercase_ ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1_333} ) self.assertEqual(image_processor.do_pad , _A ) def lowercase_ ( self : Dict ): '''simple docstring''' pass def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A ) for image in image_inputs: self.assertIsInstance(_A , Image.Image ) # Test not batched input UpperCAmelCase__ : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values UpperCAmelCase__ , UpperCAmelCase__ : int = self.image_processor_tester.get_expected_values(_A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase__ , UpperCAmelCase__ : str = self.image_processor_tester.get_expected_values(_A , batched=_A ) UpperCAmelCase__ : Union[str, Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A ) for image in image_inputs: self.assertIsInstance(_A , np.ndarray ) # Test not batched input UpperCAmelCase__ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.image_processor_tester.get_expected_values(_A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase__ : List[str] = image_processing(_A , return_tensors='''pt''' ).pixel_values UpperCAmelCase__ , UpperCAmelCase__ : int = self.image_processor_tester.get_expected_values(_A , batched=_A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A ) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor ) # Test not batched input UpperCAmelCase__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.image_processor_tester.get_expected_values(_A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase__ : List[Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values UpperCAmelCase__ , UpperCAmelCase__ : Any = self.image_processor_tester.get_expected_values(_A , batched=_A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def lowercase_ ( self : str ): '''simple docstring''' UpperCAmelCase__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f: UpperCAmelCase__ : str = json.loads(f.read() ) UpperCAmelCase__ : Tuple = {'''image_id''': 39_769, '''annotations''': target} # encode them UpperCAmelCase__ : Optional[int] = DetaImageProcessor() UpperCAmelCase__ : str = image_processing(images=_A , annotations=_A , return_tensors='''pt''' ) # verify pixel values UpperCAmelCase__ : Optional[int] = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding['''pixel_values'''].shape , _A ) UpperCAmelCase__ : Any = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _A , atol=1e-4 ) ) # verify area UpperCAmelCase__ : List[Any] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _A ) ) # verify boxes UpperCAmelCase__ : int = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _A ) UpperCAmelCase__ : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _A , atol=1e-3 ) ) # verify image_id UpperCAmelCase__ : str = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _A ) ) # verify is_crowd UpperCAmelCase__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _A ) ) # verify class_labels UpperCAmelCase__ : Union[str, Any] = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _A ) ) # verify orig_size UpperCAmelCase__ : int = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _A ) ) # verify size UpperCAmelCase__ : int = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _A ) ) @slow def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f: UpperCAmelCase__ : int = json.loads(f.read() ) UpperCAmelCase__ : str = {'''file_name''': '''000000039769.png''', '''image_id''': 39_769, '''segments_info''': target} UpperCAmelCase__ : Dict = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' ) # encode them UpperCAmelCase__ : Any = DetaImageProcessor(format='''coco_panoptic''' ) UpperCAmelCase__ : str = image_processing(images=_A , annotations=_A , masks_path=_A , return_tensors='''pt''' ) # verify pixel values UpperCAmelCase__ : str = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding['''pixel_values'''].shape , _A ) UpperCAmelCase__ : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _A , atol=1e-4 ) ) # verify area UpperCAmelCase__ : Any = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _A ) ) # verify boxes UpperCAmelCase__ : Dict = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _A ) UpperCAmelCase__ : List[str] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _A , atol=1e-3 ) ) # verify image_id UpperCAmelCase__ : Optional[int] = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _A ) ) # verify is_crowd UpperCAmelCase__ : Any = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _A ) ) # verify class_labels UpperCAmelCase__ : Tuple = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _A ) ) # verify masks UpperCAmelCase__ : Dict = 822_873 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , _A ) # verify orig_size UpperCAmelCase__ : str = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _A ) ) # verify size UpperCAmelCase__ : Optional[Any] = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _A ) )
299
1
'''simple docstring''' from argparse import ArgumentParser from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline from ..utils import logging from . import BaseTransformersCLICommand UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name def a__ ( lowerCAmelCase__ ) -> Any: if not path: return "pipe" for ext in PipelineDataFormat.SUPPORTED_FORMATS: if path.endswith(lowerCAmelCase__ ): return ext raise Exception( F"""Unable to determine file format from file extension {path}. """ F"""Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}""" ) def a__ ( lowerCAmelCase__ ) -> Optional[Any]: UpperCAmelCase__ : Dict = pipeline( task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , ) UpperCAmelCase__ : List[str] = try_infer_format_from_ext(args.input ) if args.format == '''infer''' else args.format UpperCAmelCase__ : Optional[Any] = PipelineDataFormat.from_str( format=lowerCAmelCase__ , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , ) return RunCommand(lowerCAmelCase__ , lowerCAmelCase__ ) class lowerCamelCase_ ( __a ): def __init__( self : List[Any] , _A : Pipeline , _A : PipelineDataFormat ): '''simple docstring''' UpperCAmelCase__ : Tuple = nlp UpperCAmelCase__ : Optional[int] = reader @staticmethod def lowercase_ ( _A : ArgumentParser ): '''simple docstring''' UpperCAmelCase__ : Dict = parser.add_parser('''run''' , help='''Run a pipeline through the CLI''' ) run_parser.add_argument('''--task''' , choices=get_supported_tasks() , help='''Task to run''' ) run_parser.add_argument('''--input''' , type=_A , help='''Path to the file to use for inference''' ) run_parser.add_argument('''--output''' , type=_A , help='''Path to the file that will be used post to write results.''' ) run_parser.add_argument('''--model''' , type=_A , help='''Name or path to the model to instantiate.''' ) run_parser.add_argument('''--config''' , type=_A , help='''Name or path to the model\'s config to instantiate.''' ) run_parser.add_argument( '''--tokenizer''' , type=_A , help='''Name of the tokenizer to use. (default: same as the model name)''' ) run_parser.add_argument( '''--column''' , type=_A , help='''Name of the column to use as input. (For multi columns input as QA use column1,columns2)''' , ) run_parser.add_argument( '''--format''' , type=_A , default='''infer''' , choices=PipelineDataFormat.SUPPORTED_FORMATS , help='''Input format to read from''' , ) run_parser.add_argument( '''--device''' , type=_A , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , ) run_parser.add_argument('''--overwrite''' , action='''store_true''' , help='''Allow overwriting the output file.''' ) run_parser.set_defaults(func=_A ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self._nlp, [] for entry in self._reader: UpperCAmelCase__ : Dict = nlp(**_A ) if self._reader.is_multi_columns else nlp(_A ) if isinstance(_A , _A ): outputs.append(_A ) else: outputs += output # Saving data if self._nlp.binary_output: UpperCAmelCase__ : Any = self._reader.save_binary(_A ) logger.warning(f"""Current pipeline requires output to be in binary format, saving at {binary_path}""" ) else: self._reader.save(_A )
299
'''simple docstring''' from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def a__ ( lowerCAmelCase__ ) -> None: UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = analyze_text(lowerCAmelCase__ ) UpperCAmelCase__ : List[Any] = list(''' ''' + ascii_lowercase ) # what is our total sum of probabilities. UpperCAmelCase__ : str = sum(single_char_strings.values() ) # one length string UpperCAmelCase__ : int = 0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: UpperCAmelCase__ : Optional[int] = single_char_strings[ch] UpperCAmelCase__ : int = my_str / all_sum my_fir_sum += prob * math.loga(lowerCAmelCase__ ) # entropy formula. # print entropy print(F"""{round(-1 * my_fir_sum ):.1f}""" ) # two len string UpperCAmelCase__ : str = sum(two_char_strings.values() ) UpperCAmelCase__ : Optional[Any] = 0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: UpperCAmelCase__ : Optional[int] = cha + cha if sequence in two_char_strings: UpperCAmelCase__ : Dict = two_char_strings[sequence] UpperCAmelCase__ : Optional[int] = int(lowerCAmelCase__ ) / all_sum my_sec_sum += prob * math.loga(lowerCAmelCase__ ) # print second entropy print(F"""{round(-1 * my_sec_sum ):.1f}""" ) # print the difference between them print(F"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" ) def a__ ( lowerCAmelCase__ ) -> tuple[dict, dict]: UpperCAmelCase__ : Union[str, Any] = Counter() # type: ignore UpperCAmelCase__ : Tuple = Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0 , len(lowerCAmelCase__ ) - 1 ): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def a__ ( ) -> Tuple: import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
299
1
'''simple docstring''' import argparse import os import torch from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) UpperCamelCase__ = { '''sample_size''': 3_2, '''in_channels''': 3, '''out_channels''': 3, '''layers_per_block''': 2, '''num_class_embeds''': 1_0_0_0, '''block_out_channels''': [3_2, 6_4], '''attention_head_dim''': 8, '''down_block_types''': [ '''ResnetDownsampleBlock2D''', '''AttnDownBlock2D''', ], '''up_block_types''': [ '''AttnUpBlock2D''', '''ResnetUpsampleBlock2D''', ], '''resnet_time_scale_shift''': '''scale_shift''', '''upsample_type''': '''resnet''', '''downsample_type''': '''resnet''', } UpperCamelCase__ = { '''sample_size''': 6_4, '''in_channels''': 3, '''out_channels''': 3, '''layers_per_block''': 3, '''num_class_embeds''': 1_0_0_0, '''block_out_channels''': [1_9_2, 1_9_2 * 2, 1_9_2 * 3, 1_9_2 * 4], '''attention_head_dim''': 6_4, '''down_block_types''': [ '''ResnetDownsampleBlock2D''', '''AttnDownBlock2D''', '''AttnDownBlock2D''', '''AttnDownBlock2D''', ], '''up_block_types''': [ '''AttnUpBlock2D''', '''AttnUpBlock2D''', '''AttnUpBlock2D''', '''ResnetUpsampleBlock2D''', ], '''resnet_time_scale_shift''': '''scale_shift''', '''upsample_type''': '''resnet''', '''downsample_type''': '''resnet''', } UpperCamelCase__ = { '''sample_size''': 2_5_6, '''in_channels''': 3, '''out_channels''': 3, '''layers_per_block''': 2, '''num_class_embeds''': None, '''block_out_channels''': [2_5_6, 2_5_6, 2_5_6 * 2, 2_5_6 * 2, 2_5_6 * 4, 2_5_6 * 4], '''attention_head_dim''': 6_4, '''down_block_types''': [ '''ResnetDownsampleBlock2D''', '''ResnetDownsampleBlock2D''', '''ResnetDownsampleBlock2D''', '''AttnDownBlock2D''', '''AttnDownBlock2D''', '''AttnDownBlock2D''', ], '''up_block_types''': [ '''AttnUpBlock2D''', '''AttnUpBlock2D''', '''AttnUpBlock2D''', '''ResnetUpsampleBlock2D''', '''ResnetUpsampleBlock2D''', '''ResnetUpsampleBlock2D''', ], '''resnet_time_scale_shift''': '''default''', '''upsample_type''': '''resnet''', '''downsample_type''': '''resnet''', } UpperCamelCase__ = { '''num_train_timesteps''': 4_0, '''sigma_min''': 0.002, '''sigma_max''': 80.0, } UpperCamelCase__ = { '''num_train_timesteps''': 2_0_1, '''sigma_min''': 0.002, '''sigma_max''': 80.0, } UpperCamelCase__ = { '''num_train_timesteps''': 1_5_1, '''sigma_min''': 0.002, '''sigma_max''': 80.0, } def a__ ( lowerCAmelCase__ ) -> Union[str, Any]: if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise argparse.ArgumentTypeError('''boolean value expected''' ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> Tuple: UpperCAmelCase__ : str = checkpoint[F"""{old_prefix}.in_layers.0.weight"""] UpperCAmelCase__ : Any = checkpoint[F"""{old_prefix}.in_layers.0.bias"""] UpperCAmelCase__ : List[str] = checkpoint[F"""{old_prefix}.in_layers.2.weight"""] UpperCAmelCase__ : Optional[Any] = checkpoint[F"""{old_prefix}.in_layers.2.bias"""] UpperCAmelCase__ : Any = checkpoint[F"""{old_prefix}.emb_layers.1.weight"""] UpperCAmelCase__ : int = checkpoint[F"""{old_prefix}.emb_layers.1.bias"""] UpperCAmelCase__ : Optional[int] = checkpoint[F"""{old_prefix}.out_layers.0.weight"""] UpperCAmelCase__ : str = checkpoint[F"""{old_prefix}.out_layers.0.bias"""] UpperCAmelCase__ : Tuple = checkpoint[F"""{old_prefix}.out_layers.3.weight"""] UpperCAmelCase__ : Dict = checkpoint[F"""{old_prefix}.out_layers.3.bias"""] if has_skip: UpperCAmelCase__ : int = checkpoint[F"""{old_prefix}.skip_connection.weight"""] UpperCAmelCase__ : List[Any] = checkpoint[F"""{old_prefix}.skip_connection.bias"""] return new_checkpoint def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None ) -> List[Any]: UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = checkpoint[F"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = checkpoint[F"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 ) UpperCAmelCase__ : Optional[Any] = checkpoint[F"""{old_prefix}.norm.weight"""] UpperCAmelCase__ : List[Any] = checkpoint[F"""{old_prefix}.norm.bias"""] UpperCAmelCase__ : Tuple = weight_q.squeeze(-1 ).squeeze(-1 ) UpperCAmelCase__ : str = bias_q.squeeze(-1 ).squeeze(-1 ) UpperCAmelCase__ : List[Any] = weight_k.squeeze(-1 ).squeeze(-1 ) UpperCAmelCase__ : Optional[Any] = bias_k.squeeze(-1 ).squeeze(-1 ) UpperCAmelCase__ : Union[str, Any] = weight_v.squeeze(-1 ).squeeze(-1 ) UpperCAmelCase__ : Tuple = bias_v.squeeze(-1 ).squeeze(-1 ) UpperCAmelCase__ : int = ( checkpoint[F"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 ) ) UpperCAmelCase__ : Any = checkpoint[F"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 ) return new_checkpoint def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple: UpperCAmelCase__ : int = torch.load(lowerCAmelCase__ , map_location='''cpu''' ) UpperCAmelCase__ : Tuple = {} UpperCAmelCase__ : List[Any] = checkpoint['''time_embed.0.weight'''] UpperCAmelCase__ : List[str] = checkpoint['''time_embed.0.bias'''] UpperCAmelCase__ : str = checkpoint['''time_embed.2.weight'''] UpperCAmelCase__ : Dict = checkpoint['''time_embed.2.bias'''] if unet_config["num_class_embeds"] is not None: UpperCAmelCase__ : List[Any] = checkpoint['''label_emb.weight'''] UpperCAmelCase__ : str = checkpoint['''input_blocks.0.0.weight'''] UpperCAmelCase__ : Optional[Any] = checkpoint['''input_blocks.0.0.bias'''] UpperCAmelCase__ : Tuple = unet_config['''down_block_types'''] UpperCAmelCase__ : Optional[Any] = unet_config['''layers_per_block'''] UpperCAmelCase__ : List[Any] = unet_config['''attention_head_dim'''] UpperCAmelCase__ : Dict = unet_config['''block_out_channels'''] UpperCAmelCase__ : Optional[Any] = 1 UpperCAmelCase__ : Optional[int] = channels_list[0] for i, layer_type in enumerate(lowerCAmelCase__ ): UpperCAmelCase__ : Dict = channels_list[i] UpperCAmelCase__ : List[str] = current_channels != prev_channels if layer_type == "ResnetDownsampleBlock2D": for j in range(lowerCAmelCase__ ): UpperCAmelCase__ : Any = F"""down_blocks.{i}.resnets.{j}""" UpperCAmelCase__ : Tuple = F"""input_blocks.{current_layer}.0""" UpperCAmelCase__ : int = True if j == 0 and downsample_block_has_skip else False UpperCAmelCase__ : Union[str, Any] = convert_resnet(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , has_skip=lowerCAmelCase__ ) current_layer += 1 elif layer_type == "AttnDownBlock2D": for j in range(lowerCAmelCase__ ): UpperCAmelCase__ : Union[str, Any] = F"""down_blocks.{i}.resnets.{j}""" UpperCAmelCase__ : Optional[Any] = F"""input_blocks.{current_layer}.0""" UpperCAmelCase__ : Any = True if j == 0 and downsample_block_has_skip else False UpperCAmelCase__ : Optional[int] = convert_resnet(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , has_skip=lowerCAmelCase__ ) UpperCAmelCase__ : Any = F"""down_blocks.{i}.attentions.{j}""" UpperCAmelCase__ : Any = F"""input_blocks.{current_layer}.1""" UpperCAmelCase__ : Union[str, Any] = convert_attention( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) current_layer += 1 if i != len(lowerCAmelCase__ ) - 1: UpperCAmelCase__ : Tuple = F"""down_blocks.{i}.downsamplers.0""" UpperCAmelCase__ : Dict = F"""input_blocks.{current_layer}.0""" UpperCAmelCase__ : List[Any] = convert_resnet(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) current_layer += 1 UpperCAmelCase__ : Any = current_channels # hardcoded the mid-block for now UpperCAmelCase__ : List[str] = '''mid_block.resnets.0''' UpperCAmelCase__ : Tuple = '''middle_block.0''' UpperCAmelCase__ : int = convert_resnet(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) UpperCAmelCase__ : Any = '''mid_block.attentions.0''' UpperCAmelCase__ : Optional[int] = '''middle_block.1''' UpperCAmelCase__ : Dict = convert_attention(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) UpperCAmelCase__ : Union[str, Any] = '''mid_block.resnets.1''' UpperCAmelCase__ : List[str] = '''middle_block.2''' UpperCAmelCase__ : List[Any] = convert_resnet(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) UpperCAmelCase__ : Tuple = 0 UpperCAmelCase__ : Tuple = unet_config['''up_block_types'''] for i, layer_type in enumerate(lowerCAmelCase__ ): if layer_type == "ResnetUpsampleBlock2D": for j in range(layers_per_block + 1 ): UpperCAmelCase__ : List[str] = F"""up_blocks.{i}.resnets.{j}""" UpperCAmelCase__ : Dict = F"""output_blocks.{current_layer}.0""" UpperCAmelCase__ : Any = convert_resnet(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , has_skip=lowerCAmelCase__ ) current_layer += 1 if i != len(lowerCAmelCase__ ) - 1: UpperCAmelCase__ : str = F"""up_blocks.{i}.upsamplers.0""" UpperCAmelCase__ : Optional[Any] = F"""output_blocks.{current_layer-1}.1""" UpperCAmelCase__ : Tuple = convert_resnet(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) elif layer_type == "AttnUpBlock2D": for j in range(layers_per_block + 1 ): UpperCAmelCase__ : Optional[int] = F"""up_blocks.{i}.resnets.{j}""" UpperCAmelCase__ : List[str] = F"""output_blocks.{current_layer}.0""" UpperCAmelCase__ : int = convert_resnet(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , has_skip=lowerCAmelCase__ ) UpperCAmelCase__ : Any = F"""up_blocks.{i}.attentions.{j}""" UpperCAmelCase__ : Tuple = F"""output_blocks.{current_layer}.1""" UpperCAmelCase__ : int = convert_attention( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) current_layer += 1 if i != len(lowerCAmelCase__ ) - 1: UpperCAmelCase__ : Optional[Any] = F"""up_blocks.{i}.upsamplers.0""" UpperCAmelCase__ : int = F"""output_blocks.{current_layer-1}.2""" UpperCAmelCase__ : Dict = convert_resnet(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) UpperCAmelCase__ : Dict = checkpoint['''out.0.weight'''] UpperCAmelCase__ : Optional[int] = checkpoint['''out.0.bias'''] UpperCAmelCase__ : Tuple = checkpoint['''out.2.weight'''] UpperCAmelCase__ : Union[str, Any] = checkpoint['''out.2.bias'''] return new_checkpoint if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''') parser.add_argument( '''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.''' ) parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''') UpperCamelCase__ = parser.parse_args() UpperCamelCase__ = strabool(args.class_cond) UpperCamelCase__ = os.path.basename(args.unet_path) print(F"""Checkpoint: {ckpt_name}""") # Get U-Net config if "imagenet64" in ckpt_name: UpperCamelCase__ = IMAGENET_64_UNET_CONFIG elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): UpperCamelCase__ = LSUN_256_UNET_CONFIG elif "test" in ckpt_name: UpperCamelCase__ = TEST_UNET_CONFIG else: raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""") if not args.class_cond: UpperCamelCase__ = None UpperCamelCase__ = con_pt_to_diffuser(args.unet_path, unet_config) UpperCamelCase__ = UNetaDModel(**unet_config) image_unet.load_state_dict(converted_unet_ckpt) # Get scheduler config if "cd" in ckpt_name or "test" in ckpt_name: UpperCamelCase__ = CD_SCHEDULER_CONFIG elif "ct" in ckpt_name and "imagenet64" in ckpt_name: UpperCamelCase__ = CT_IMAGENET_64_SCHEDULER_CONFIG elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): UpperCamelCase__ = CT_LSUN_256_SCHEDULER_CONFIG else: raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""") UpperCamelCase__ = CMStochasticIterativeScheduler(**scheduler_config) UpperCamelCase__ = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler) consistency_model.save_pretrained(args.dump_path)
299
'''simple docstring''' from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_config_file''': '''tokenizer_config.json''', } UpperCamelCase__ = { '''vocab_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json''' }, '''merges_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt''' }, '''tokenizer_config_file''': { '''facebook/blenderbot_small-90M''': ( '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json''' ) }, } UpperCamelCase__ = { '''facebook/blenderbot_small-90M''': 5_1_2, } class lowerCamelCase_ ( __a ): lowerCAmelCase__ = VOCAB_FILES_NAMES lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase__ = BlenderbotSmallTokenizer def __init__( self : List[Any] , _A : List[Any]=None , _A : Optional[Any]=None , _A : Optional[int]="<|endoftext|>" , _A : List[str]="<|endoftext|>" , _A : List[str]="<|endoftext|>" , _A : Any=False , _A : Union[str, Any]=True , **_A : Optional[int] , ): '''simple docstring''' super().__init__( ByteLevelBPETokenizer( vocab=_A , merges=_A , add_prefix_space=_A , trim_offsets=_A , ) , bos_token=_A , eos_token=_A , unk_token=_A , **_A , ) UpperCAmelCase__ : List[Any] = add_prefix_space def lowercase_ ( self : str , _A : Any , _A : Any=None ): '''simple docstring''' UpperCAmelCase__ : Dict = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def lowercase_ ( self : Optional[int] , _A : List[int] , _A : Optional[List[int]] = None ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = [self.sep_token_id] UpperCAmelCase__ : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
299
1
'''simple docstring''' from __future__ import annotations UpperCamelCase__ = [ [-1, 0], # left [0, -1], # down [1, 0], # right [0, 1], # up ] def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> tuple[list[list[int]], list[list[int]]]: UpperCAmelCase__ : int = [ [0 for col in range(len(grid[0] ) )] for row in range(len(lowerCAmelCase__ ) ) ] # the reference grid UpperCAmelCase__ : Dict = 1 UpperCAmelCase__ : str = [ [0 for col in range(len(grid[0] ) )] for row in range(len(lowerCAmelCase__ ) ) ] # the action grid UpperCAmelCase__ : Any = init[0] UpperCAmelCase__ : List[Any] = init[1] UpperCAmelCase__ : Union[str, Any] = 0 UpperCAmelCase__ : List[Any] = g + heuristic[x][y] # cost from starting cell to destination cell UpperCAmelCase__ : List[str] = [[f, g, x, y]] UpperCAmelCase__ : Dict = False # flag that is set when search is complete UpperCAmelCase__ : Union[str, Any] = False # flag set if we can't find expand while not found and not resign: if len(lowerCAmelCase__ ) == 0: raise ValueError('''Algorithm is unable to find solution''' ) else: # to choose the least costliest action so as to move closer to the goal cell.sort() cell.reverse() UpperCAmelCase__ : Optional[Any] = cell.pop() UpperCAmelCase__ : Dict = next_cell[2] UpperCAmelCase__ : List[str] = next_cell[3] UpperCAmelCase__ : int = next_cell[1] if x == goal[0] and y == goal[1]: UpperCAmelCase__ : List[Any] = True else: for i in range(len(lowerCAmelCase__ ) ): # to try out different valid actions UpperCAmelCase__ : Optional[int] = x + DIRECTIONS[i][0] UpperCAmelCase__ : str = y + DIRECTIONS[i][1] if xa >= 0 and xa < len(lowerCAmelCase__ ) and ya >= 0 and ya < len(grid[0] ): if closed[xa][ya] == 0 and grid[xa][ya] == 0: UpperCAmelCase__ : Tuple = g + cost UpperCAmelCase__ : List[str] = ga + heuristic[xa][ya] cell.append([fa, ga, xa, ya] ) UpperCAmelCase__ : Tuple = 1 UpperCAmelCase__ : Union[str, Any] = i UpperCAmelCase__ : int = [] UpperCAmelCase__ : Dict = goal[0] UpperCAmelCase__ : Tuple = goal[1] invpath.append([x, y] ) # we get the reverse path from here while x != init[0] or y != init[1]: UpperCAmelCase__ : int = x - DIRECTIONS[action[x][y]][0] UpperCAmelCase__ : Optional[int] = y - DIRECTIONS[action[x][y]][1] UpperCAmelCase__ : Dict = xa UpperCAmelCase__ : Dict = ya invpath.append([x, y] ) UpperCAmelCase__ : Any = [] for i in range(len(lowerCAmelCase__ ) ): path.append(invpath[len(lowerCAmelCase__ ) - 1 - i] ) return path, action if __name__ == "__main__": UpperCamelCase__ = [ [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 1, 0], ] UpperCamelCase__ = [0, 0] # all coordinates are given in format [y,x] UpperCamelCase__ = [len(grid) - 1, len(grid[0]) - 1] UpperCamelCase__ = 1 # the cost map which pushes the path closer to the goal UpperCamelCase__ = [[0 for row in range(len(grid[0]))] for col in range(len(grid))] for i in range(len(grid)): for j in range(len(grid[0])): UpperCamelCase__ = abs(i - goal[0]) + abs(j - goal[1]) if grid[i][j] == 1: # added extra penalty in the heuristic map UpperCamelCase__ = 9_9 UpperCamelCase__ , UpperCamelCase__ = search(grid, init, goal, cost, heuristic) print('''ACTION MAP''') for i in range(len(action)): print(action[i]) for i in range(len(path)): print(path[i])
299
'''simple docstring''' import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece @require_tokenizers class lowerCamelCase_ ( __a , unittest.TestCase ): lowerCAmelCase__ = XLMRobertaTokenizer lowerCAmelCase__ = XLMRobertaTokenizerFast lowerCAmelCase__ = True lowerCAmelCase__ = True def lowercase_ ( self : Dict ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing UpperCAmelCase__ : Union[str, Any] = XLMRobertaTokenizer(_A , keep_accents=_A ) tokenizer.save_pretrained(self.tmpdirname ) def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = '''<pad>''' UpperCAmelCase__ : Dict = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A ) def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(vocab_keys[-1] , '''<mask>''' ) self.assertEqual(len(_A ) , 1_002 ) def lowercase_ ( self : int ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1_002 ) def lowercase_ ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : str = XLMRobertaTokenizer(_A , keep_accents=_A ) UpperCAmelCase__ : int = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) UpperCAmelCase__ : Union[str, Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( _A , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) UpperCAmelCase__ : Dict = tokenizer.convert_tokens_to_ids(_A ) self.assertListEqual( _A , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) UpperCAmelCase__ : Optional[int] = tokenizer.convert_ids_to_tokens(_A ) self.assertListEqual( _A , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) def lowercase_ ( self : str ): '''simple docstring''' if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return UpperCAmelCase__ : List[str] = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): UpperCAmelCase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(_A , **_A ) UpperCAmelCase__ : Optional[int] = self.tokenizer_class.from_pretrained(_A , **_A ) UpperCAmelCase__ : List[str] = tempfile.mkdtemp() UpperCAmelCase__ : Any = tokenizer_r.save_pretrained(_A ) UpperCAmelCase__ : Tuple = tokenizer_p.save_pretrained(_A ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) UpperCAmelCase__ : Optional[int] = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f ) self.assertSequenceEqual(_A , _A ) # Checks everything loads correctly in the same way UpperCAmelCase__ : Any = tokenizer_r.from_pretrained(_A ) UpperCAmelCase__ : Dict = tokenizer_p.from_pretrained(_A ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_A , _A ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(_A ) # Save tokenizer rust, legacy_format=True UpperCAmelCase__ : Union[str, Any] = tempfile.mkdtemp() UpperCAmelCase__ : Union[str, Any] = tokenizer_r.save_pretrained(_A , legacy_format=_A ) UpperCAmelCase__ : List[str] = tokenizer_p.save_pretrained(_A ) # Checks it save with the same files self.assertSequenceEqual(_A , _A ) # Checks everything loads correctly in the same way UpperCAmelCase__ : List[str] = tokenizer_r.from_pretrained(_A ) UpperCAmelCase__ : List[str] = tokenizer_p.from_pretrained(_A ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_A , _A ) ) shutil.rmtree(_A ) # Save tokenizer rust, legacy_format=False UpperCAmelCase__ : Union[str, Any] = tempfile.mkdtemp() UpperCAmelCase__ : Dict = tokenizer_r.save_pretrained(_A , legacy_format=_A ) UpperCAmelCase__ : str = tokenizer_p.save_pretrained(_A ) # Checks it saved the tokenizer.json file self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way UpperCAmelCase__ : Union[str, Any] = tokenizer_r.from_pretrained(_A ) UpperCAmelCase__ : Optional[Any] = tokenizer_p.from_pretrained(_A ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_A , _A ) ) shutil.rmtree(_A ) @cached_property def lowercase_ ( self : Optional[Any] ): '''simple docstring''' return XLMRobertaTokenizer.from_pretrained('''xlm-roberta-base''' ) def lowercase_ ( self : Any ): '''simple docstring''' with tempfile.NamedTemporaryFile() as f: shutil.copyfile(_A , f.name ) UpperCAmelCase__ : int = XLMRobertaTokenizer(f.name , keep_accents=_A ) UpperCAmelCase__ : str = pickle.dumps(_A ) pickle.loads(_A ) def lowercase_ ( self : int ): '''simple docstring''' if not self.test_rust_tokenizer: return UpperCAmelCase__ : Optional[Any] = self.get_tokenizer() UpperCAmelCase__ : Union[str, Any] = self.get_rust_tokenizer() UpperCAmelCase__ : Dict = '''I was born in 92000, and this is falsé.''' UpperCAmelCase__ : Dict = tokenizer.tokenize(_A ) UpperCAmelCase__ : List[Any] = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) UpperCAmelCase__ : int = tokenizer.encode(_A , add_special_tokens=_A ) UpperCAmelCase__ : Optional[Any] = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) UpperCAmelCase__ : Any = self.get_rust_tokenizer() UpperCAmelCase__ : List[Any] = tokenizer.encode(_A ) UpperCAmelCase__ : Union[str, Any] = rust_tokenizer.encode(_A ) self.assertListEqual(_A , _A ) @slow def lowercase_ ( self : str ): '''simple docstring''' UpperCAmelCase__ : str = '''Hello World!''' UpperCAmelCase__ : Tuple = [0, 35_378, 6_661, 38, 2] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(_A , self.big_tokenizer.encode(_A ) ) @slow def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : List[str] = ( '''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will''' ''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth''' ) UpperCAmelCase__ : Any = [ 0, 3_293, 83, 10, 4_552, 4_989, 7_986, 678, 10, 5_915, 111, 179_459, 124_850, 4, 6_044, 237, 12, 6, 5, 6, 4, 6_780, 705, 15, 1_388, 44, 378, 10_114, 711, 152, 20, 6, 5, 22_376, 642, 1_221, 15_190, 34_153, 450, 5_608, 959, 1_119, 57_702, 136, 186, 47, 1_098, 29_367, 47, # 4426, # What fairseq tokenizes from "<unk>": "_<" # 3678, # What fairseq tokenizes from "<unk>": "unk" # 2740, # What fairseq tokenizes from "<unk>": ">" 3, # What we tokenize from "<unk>": "<unk>" 6, # Residue from the tokenization: an extra sentencepiece underline 4, 6_044, 237, 6_284, 50_901, 528, 31, 90, 34, 927, 2, ] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(_A , self.big_tokenizer.encode(_A ) ) @slow def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : int = {'''input_ids''': [[0, 11_062, 82_772, 7, 15, 82_772, 538, 51_529, 237, 17_198, 1_290, 206, 9, 215_175, 1_314, 136, 17_198, 1_290, 206, 9, 56_359, 42, 122_009, 9, 16_466, 16, 87_344, 4_537, 9, 4_717, 78_381, 6, 159_958, 7, 15, 24_480, 618, 4, 527, 22_693, 5_428, 4, 2_777, 24_480, 9_874, 4, 43_523, 594, 4, 803, 18_392, 33_189, 18, 4, 43_523, 24_447, 12_399, 100, 24_955, 83_658, 9_626, 144_057, 15, 839, 22_335, 16, 136, 24_955, 83_658, 83_479, 15, 39_102, 724, 16, 678, 645, 2_789, 1_328, 4_589, 42, 122_009, 115_774, 23, 805, 1_328, 46_876, 7, 136, 53_894, 1_940, 42_227, 41_159, 17_721, 823, 425, 4, 27_512, 98_722, 206, 136, 5_531, 4_970, 919, 17_336, 5, 2], [0, 20_080, 618, 83, 82_775, 47, 479, 9, 1_517, 73, 53_894, 333, 80_581, 110_117, 18_811, 5_256, 1_295, 51, 152_526, 297, 7_986, 390, 124_416, 538, 35_431, 214, 98, 15_044, 25_737, 136, 7_108, 43_701, 23, 756, 135_355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63_773, 119_455, 6, 147_797, 88_203, 7, 645, 70, 21, 3_285, 10_269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_A , model_name='''xlm-roberta-base''' , revision='''d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3''' , )
299
1
'''simple docstring''' import math def a__ ( ) -> None: UpperCAmelCase__ : Tuple = input('''Enter message: ''' ) UpperCAmelCase__ : Tuple = int(input(F"""Enter key [2-{len(lowerCAmelCase__ ) - 1}]: """ ) ) UpperCAmelCase__ : str = input('''Encryption/Decryption [e/d]: ''' ) if mode.lower().startswith('''e''' ): UpperCAmelCase__ : List[Any] = encrypt_message(lowerCAmelCase__ , lowerCAmelCase__ ) elif mode.lower().startswith('''d''' ): UpperCAmelCase__ : Union[str, Any] = decrypt_message(lowerCAmelCase__ , lowerCAmelCase__ ) # Append pipe symbol (vertical bar) to identify spaces at the end. print(F"""Output:\n{text + "|"}""" ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str: UpperCAmelCase__ : Dict = [''''''] * key for col in range(lowerCAmelCase__ ): UpperCAmelCase__ : Union[str, Any] = col while pointer < len(lowerCAmelCase__ ): cipher_text[col] += message[pointer] pointer += key return "".join(lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str: UpperCAmelCase__ : List[str] = math.ceil(len(lowerCAmelCase__ ) / key ) UpperCAmelCase__ : List[str] = key UpperCAmelCase__ : Tuple = (num_cols * num_rows) - len(lowerCAmelCase__ ) UpperCAmelCase__ : Any = [''''''] * num_cols UpperCAmelCase__ : Tuple = 0 UpperCAmelCase__ : Any = 0 for symbol in message: plain_text[col] += symbol col += 1 if ( (col == num_cols) or (col == num_cols - 1) and (row >= num_rows - num_shaded_boxes) ): UpperCAmelCase__ : List[str] = 0 row += 1 return "".join(lowerCAmelCase__ ) if __name__ == "__main__": import doctest doctest.testmod() main()
299
'''simple docstring''' from __future__ import annotations import math def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int: if depth < 0: raise ValueError('''Depth cannot be less than 0''' ) if not scores: raise ValueError('''Scores cannot be empty''' ) if depth == height: return scores[node_index] return ( max( minimax(depth + 1 , node_index * 2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , ) if is_max else min( minimax(depth + 1 , node_index * 2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , ) ) def a__ ( ) -> None: UpperCAmelCase__ : Union[str, Any] = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23] UpperCAmelCase__ : Optional[Any] = math.log(len(lowerCAmelCase__ ) , 2 ) print(F"""Optimal value : {minimax(0 , 0 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )}""" ) if __name__ == "__main__": import doctest doctest.testmod() main()
299
1
'''simple docstring''' from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxCrossAttnUpBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, FlaxUpBlockaD, ) @flax.struct.dataclass class lowerCamelCase_ ( __a ): lowerCAmelCase__ = 42 @flax_register_to_config class lowerCamelCase_ ( nn.Module , __a , __a ): lowerCAmelCase__ = 3_2 lowerCAmelCase__ = 4 lowerCAmelCase__ = 4 lowerCAmelCase__ = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) lowerCAmelCase__ = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D") lowerCAmelCase__ = False lowerCAmelCase__ = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0) lowerCAmelCase__ = 2 lowerCAmelCase__ = 8 lowerCAmelCase__ = None lowerCAmelCase__ = 1_2_8_0 lowerCAmelCase__ = 0.0 lowerCAmelCase__ = False lowerCAmelCase__ = jnp.floataa lowerCAmelCase__ = True lowerCAmelCase__ = 0 lowerCAmelCase__ = False def lowercase_ ( self : Optional[Any] , _A : jax.random.KeyArray ): '''simple docstring''' UpperCAmelCase__ : Dict = (1, self.in_channels, self.sample_size, self.sample_size) UpperCAmelCase__ : Optional[Any] = jnp.zeros(_A , dtype=jnp.floataa ) UpperCAmelCase__ : List[Any] = jnp.ones((1,) , dtype=jnp.intaa ) UpperCAmelCase__ : Dict = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa ) UpperCAmelCase__ , UpperCAmelCase__ : Dict = jax.random.split(_A ) UpperCAmelCase__ : Optional[int] = {'''params''': params_rng, '''dropout''': dropout_rng} return self.init(_A , _A , _A , _A )["params"] def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Tuple = self.block_out_channels UpperCAmelCase__ : Any = block_out_channels[0] * 4 if self.num_attention_heads is not None: raise ValueError( '''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' ) # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. UpperCAmelCase__ : Any = self.num_attention_heads or self.attention_head_dim # input UpperCAmelCase__ : int = nn.Conv( block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) # time UpperCAmelCase__ : int = FlaxTimesteps( block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift ) UpperCAmelCase__ : Optional[int] = FlaxTimestepEmbedding(_A , dtype=self.dtype ) UpperCAmelCase__ : Dict = self.only_cross_attention if isinstance(_A , _A ): UpperCAmelCase__ : Any = (only_cross_attention,) * len(self.down_block_types ) if isinstance(_A , _A ): UpperCAmelCase__ : Any = (num_attention_heads,) * len(self.down_block_types ) # down UpperCAmelCase__ : Optional[int] = [] UpperCAmelCase__ : Union[str, Any] = block_out_channels[0] for i, down_block_type in enumerate(self.down_block_types ): UpperCAmelCase__ : Dict = output_channel UpperCAmelCase__ : Union[str, Any] = block_out_channels[i] UpperCAmelCase__ : Union[str, Any] = i == len(_A ) - 1 if down_block_type == "CrossAttnDownBlock2D": UpperCAmelCase__ : Tuple = FlaxCrossAttnDownBlockaD( in_channels=_A , out_channels=_A , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: UpperCAmelCase__ : str = FlaxDownBlockaD( in_channels=_A , out_channels=_A , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , ) down_blocks.append(_A ) UpperCAmelCase__ : Union[str, Any] = down_blocks # mid UpperCAmelCase__ : Optional[Any] = FlaxUNetMidBlockaDCrossAttn( in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) # up UpperCAmelCase__ : Union[str, Any] = [] UpperCAmelCase__ : List[str] = list(reversed(_A ) ) UpperCAmelCase__ : Union[str, Any] = list(reversed(_A ) ) UpperCAmelCase__ : int = list(reversed(_A ) ) UpperCAmelCase__ : Dict = reversed_block_out_channels[0] for i, up_block_type in enumerate(self.up_block_types ): UpperCAmelCase__ : Tuple = output_channel UpperCAmelCase__ : Any = reversed_block_out_channels[i] UpperCAmelCase__ : int = reversed_block_out_channels[min(i + 1 , len(_A ) - 1 )] UpperCAmelCase__ : Tuple = i == len(_A ) - 1 if up_block_type == "CrossAttnUpBlock2D": UpperCAmelCase__ : Union[str, Any] = FlaxCrossAttnUpBlockaD( in_channels=_A , out_channels=_A , prev_output_channel=_A , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: UpperCAmelCase__ : Any = FlaxUpBlockaD( in_channels=_A , out_channels=_A , prev_output_channel=_A , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , ) up_blocks.append(_A ) UpperCAmelCase__ : List[Any] = output_channel UpperCAmelCase__ : Tuple = up_blocks # out UpperCAmelCase__ : Union[str, Any] = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) UpperCAmelCase__ : Optional[Any] = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self : str , _A : Optional[int] , _A : List[str] , _A : Optional[Any] , _A : Union[str, Any]=None , _A : List[Any]=None , _A : bool = True , _A : bool = False , ): '''simple docstring''' if not isinstance(_A , jnp.ndarray ): UpperCAmelCase__ : Dict = jnp.array([timesteps] , dtype=jnp.intaa ) elif isinstance(_A , jnp.ndarray ) and len(timesteps.shape ) == 0: UpperCAmelCase__ : str = timesteps.astype(dtype=jnp.floataa ) UpperCAmelCase__ : int = jnp.expand_dims(_A , 0 ) UpperCAmelCase__ : Optional[int] = self.time_proj(_A ) UpperCAmelCase__ : List[Any] = self.time_embedding(_A ) # 2. pre-process UpperCAmelCase__ : List[str] = jnp.transpose(_A , (0, 2, 3, 1) ) UpperCAmelCase__ : Optional[Any] = self.conv_in(_A ) # 3. down UpperCAmelCase__ : int = (sample,) for down_block in self.down_blocks: if isinstance(_A , _A ): UpperCAmelCase__ , UpperCAmelCase__ : str = down_block(_A , _A , _A , deterministic=not train ) else: UpperCAmelCase__ , UpperCAmelCase__ : List[str] = down_block(_A , _A , deterministic=not train ) down_block_res_samples += res_samples if down_block_additional_residuals is not None: UpperCAmelCase__ : str = () for down_block_res_sample, down_block_additional_residual in zip( _A , _A ): down_block_res_sample += down_block_additional_residual new_down_block_res_samples += (down_block_res_sample,) UpperCAmelCase__ : Union[str, Any] = new_down_block_res_samples # 4. mid UpperCAmelCase__ : List[Any] = self.mid_block(_A , _A , _A , deterministic=not train ) if mid_block_additional_residual is not None: sample += mid_block_additional_residual # 5. up for up_block in self.up_blocks: UpperCAmelCase__ : Dict = down_block_res_samples[-(self.layers_per_block + 1) :] UpperCAmelCase__ : Tuple = down_block_res_samples[: -(self.layers_per_block + 1)] if isinstance(_A , _A ): UpperCAmelCase__ : Union[str, Any] = up_block( _A , temb=_A , encoder_hidden_states=_A , res_hidden_states_tuple=_A , deterministic=not train , ) else: UpperCAmelCase__ : Union[str, Any] = up_block(_A , temb=_A , res_hidden_states_tuple=_A , deterministic=not train ) # 6. post-process UpperCAmelCase__ : Any = self.conv_norm_out(_A ) UpperCAmelCase__ : str = nn.silu(_A ) UpperCAmelCase__ : Any = self.conv_out(_A ) UpperCAmelCase__ : str = jnp.transpose(_A , (0, 3, 1, 2) ) if not return_dict: return (sample,) return FlaxUNetaDConditionOutput(sample=_A )
299
'''simple docstring''' class lowerCamelCase_ : def __init__( self : Union[str, Any] , _A : int ): '''simple docstring''' UpperCAmelCase__ : str = n UpperCAmelCase__ : Union[str, Any] = [None] * self.n UpperCAmelCase__ : Tuple = 0 # index of the first element UpperCAmelCase__ : int = 0 UpperCAmelCase__ : int = 0 def __len__( self : Optional[Any] ): '''simple docstring''' return self.size def lowercase_ ( self : Dict ): '''simple docstring''' return self.size == 0 def lowercase_ ( self : List[str] ): '''simple docstring''' return False if self.is_empty() else self.array[self.front] def lowercase_ ( self : List[Any] , _A : int ): '''simple docstring''' if self.size >= self.n: raise Exception('''QUEUE IS FULL''' ) UpperCAmelCase__ : str = data UpperCAmelCase__ : Optional[Any] = (self.rear + 1) % self.n self.size += 1 return self def lowercase_ ( self : List[Any] ): '''simple docstring''' if self.size == 0: raise Exception('''UNDERFLOW''' ) UpperCAmelCase__ : Any = self.array[self.front] UpperCAmelCase__ : List[Any] = None UpperCAmelCase__ : Tuple = (self.front + 1) % self.n self.size -= 1 return temp
299
1
'''simple docstring''' import os import tempfile import unittest from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter from transformers.testing_utils import slow from transformers.utils import cached_property @unittest.skipUnless(os.path.exists(__a ) , 'Tatoeba directory does not exist.' ) class lowerCamelCase_ ( unittest.TestCase ): @cached_property def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Tuple = tempfile.mkdtemp() return TatoebaConverter(save_dir=_A ) @slow def lowercase_ ( self : Optional[int] ): '''simple docstring''' self.resolver.convert_models(['''heb-eng'''] ) @slow def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=_A ) assert mmeta["long_pair"] == "heb-eng"
299
'''simple docstring''' def a__ ( lowerCAmelCase__ ) -> Optional[Any]: UpperCAmelCase__ : Optional[Any] = len(lowerCAmelCase__ ) for i in range(length - 1 ): UpperCAmelCase__ : Optional[Any] = i for k in range(i + 1 , lowerCAmelCase__ ): if collection[k] < collection[least]: UpperCAmelCase__ : Dict = k if least != i: UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = (collection[i], collection[least]) return collection if __name__ == "__main__": UpperCamelCase__ = input('''Enter numbers separated by a comma:\n''').strip() UpperCamelCase__ = [int(item) for item in user_input.split(''',''')] print(selection_sort(unsorted))
299
1
'''simple docstring''' from __future__ import annotations import collections import pprint from pathlib import Path def a__ ( lowerCAmelCase__ ) -> str: return "".join(sorted(lowerCAmelCase__ ) ) def a__ ( lowerCAmelCase__ ) -> list[str]: return word_by_signature[signature(lowerCAmelCase__ )] UpperCamelCase__ = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''') UpperCamelCase__ = sorted({word.strip().lower() for word in data.splitlines()}) UpperCamelCase__ = collections.defaultdict(list) for word in word_list: word_by_signature[signature(word)].append(word) if __name__ == "__main__": UpperCamelCase__ = {word: anagram(word) for word in word_list if len(anagram(word)) > 1} with open('''anagrams.txt''', '''w''') as file: file.write('''all_anagrams = \n ''') file.write(pprint.pformat(all_anagrams))
299
'''simple docstring''' from collections.abc import Iterable from typing import Any class lowerCamelCase_ : def __init__( self : List[Any] , _A : int | None = None ): '''simple docstring''' UpperCAmelCase__ : List[Any] = value UpperCAmelCase__ : Node | None = None # Added in order to delete a node easier UpperCAmelCase__ : Node | None = None UpperCAmelCase__ : Node | None = None def __repr__( self : Optional[Any] ): '''simple docstring''' from pprint import pformat if self.left is None and self.right is None: return str(self.value ) return pformat({f"""{self.value}""": (self.left, self.right)} , indent=1 ) class lowerCamelCase_ : def __init__( self : Optional[Any] , _A : Node | None = None ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = root def __str__( self : Union[str, Any] ): '''simple docstring''' return str(self.root ) def lowercase_ ( self : str , _A : Node , _A : Node | None ): '''simple docstring''' if new_children is not None: # reset its kids UpperCAmelCase__ : Dict = node.parent if node.parent is not None: # reset its parent if self.is_right(_A ): # If it is the right children UpperCAmelCase__ : str = new_children else: UpperCAmelCase__ : Optional[int] = new_children else: UpperCAmelCase__ : Union[str, Any] = new_children def lowercase_ ( self : Union[str, Any] , _A : Node ): '''simple docstring''' if node.parent and node.parent.right: return node == node.parent.right return False def lowercase_ ( self : int ): '''simple docstring''' return self.root is None def lowercase_ ( self : List[str] , _A : Any ): '''simple docstring''' UpperCAmelCase__ : Dict = Node(_A ) # create a new Node if self.empty(): # if Tree is empty UpperCAmelCase__ : List[Any] = new_node # set its root else: # Tree is not empty UpperCAmelCase__ : str = self.root # from root if parent_node is None: return while True: # While we don't get to a leaf if value < parent_node.value: # We go left if parent_node.left is None: UpperCAmelCase__ : Optional[Any] = new_node # We insert the new node in a leaf break else: UpperCAmelCase__ : Any = parent_node.left else: if parent_node.right is None: UpperCAmelCase__ : str = new_node break else: UpperCAmelCase__ : List[str] = parent_node.right UpperCAmelCase__ : Tuple = parent_node def lowercase_ ( self : Optional[Any] , *_A : Tuple ): '''simple docstring''' for value in values: self.__insert(_A ) def lowercase_ ( self : Union[str, Any] , _A : int ): '''simple docstring''' if self.empty(): raise IndexError('''Warning: Tree is empty! please use another.''' ) else: UpperCAmelCase__ : List[Any] = self.root # use lazy evaluation here to avoid NoneType Attribute error while node is not None and node.value is not value: UpperCAmelCase__ : str = node.left if value < node.value else node.right return node def lowercase_ ( self : List[Any] , _A : Node | None = None ): '''simple docstring''' if node is None: if self.root is None: return None UpperCAmelCase__ : int = self.root if not self.empty(): while node.right is not None: UpperCAmelCase__ : Tuple = node.right return node def lowercase_ ( self : List[Any] , _A : Node | None = None ): '''simple docstring''' if node is None: UpperCAmelCase__ : Optional[int] = self.root if self.root is None: return None if not self.empty(): UpperCAmelCase__ : Optional[int] = self.root while node.left is not None: UpperCAmelCase__ : Tuple = node.left return node def lowercase_ ( self : List[Any] , _A : int ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.search(_A ) # Look for the node with that label if node is not None: if node.left is None and node.right is None: # If it has no children self.__reassign_nodes(_A , _A ) elif node.left is None: # Has only right children self.__reassign_nodes(_A , node.right ) elif node.right is None: # Has only left children self.__reassign_nodes(_A , node.left ) else: UpperCAmelCase__ : Union[str, Any] = self.get_max( node.left ) # Gets the max value of the left branch self.remove(tmp_node.value ) # type: ignore UpperCAmelCase__ : Optional[Any] = ( tmp_node.value # type: ignore ) # Assigns the value to the node to delete and keep tree structure def lowercase_ ( self : List[str] , _A : Node | None ): '''simple docstring''' if node is not None: yield node # Preorder Traversal yield from self.preorder_traverse(node.left ) yield from self.preorder_traverse(node.right ) def lowercase_ ( self : str , _A : Any=None ): '''simple docstring''' if traversal_function is None: return self.preorder_traverse(self.root ) else: return traversal_function(self.root ) def lowercase_ ( self : Dict , _A : list , _A : Node | None ): '''simple docstring''' if node: self.inorder(_A , node.left ) arr.append(node.value ) self.inorder(_A , node.right ) def lowercase_ ( self : Optional[Any] , _A : int , _A : Node ): '''simple docstring''' UpperCAmelCase__ : list[int] = [] self.inorder(_A , _A ) # append all values to list using inorder traversal return arr[k - 1] def a__ ( lowerCAmelCase__ ) -> list[Node]: UpperCAmelCase__ : Union[str, Any] = [] if curr_node is not None: UpperCAmelCase__ : str = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node] return node_list def a__ ( ) -> None: UpperCAmelCase__ : List[Any] = (8, 3, 6, 1, 10, 14, 13, 4, 7) UpperCAmelCase__ : str = BinarySearchTree() for i in testlist: t.insert(lowerCAmelCase__ ) # Prints all the elements of the list in order traversal print(lowerCAmelCase__ ) if t.search(6 ) is not None: print('''The value 6 exists''' ) else: print('''The value 6 doesn\'t exist''' ) if t.search(-1 ) is not None: print('''The value -1 exists''' ) else: print('''The value -1 doesn\'t exist''' ) if not t.empty(): print('''Max Value: ''' , t.get_max().value ) # type: ignore print('''Min Value: ''' , t.get_min().value ) # type: ignore for i in testlist: t.remove(lowerCAmelCase__ ) print(lowerCAmelCase__ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
299
1
'''simple docstring''' UpperCamelCase__ = 8.314_462 # Unit - J mol-1 K-1 def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> float: if moles < 0 or kelvin < 0 or volume < 0: raise ValueError('''Invalid inputs. Enter positive value.''' ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> float: if moles < 0 or kelvin < 0 or pressure < 0: raise ValueError('''Invalid inputs. Enter positive value.''' ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure if __name__ == "__main__": from doctest import testmod testmod()
299
'''simple docstring''' import argparse import torch from transformers import ( SpeechTaConfig, SpeechTaFeatureExtractor, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaProcessor, SpeechTaTokenizer, logging, ) from transformers.tokenization_utils import AddedToken logging.set_verbosity_info() UpperCamelCase__ = logging.get_logger('''transformers.models.speecht5''') UpperCamelCase__ = { '''speech_encoder_prenet.layer_norm''': '''speecht5.encoder.prenet.feature_projection.layer_norm''', '''speech_encoder_prenet.post_extract_proj''': '''speecht5.encoder.prenet.feature_projection.projection''', '''speech_encoder_prenet.pos_conv.0''': '''speecht5.encoder.prenet.pos_conv_embed.conv''', '''speech_encoder_prenet.mask_emb''': '''speecht5.encoder.prenet.masked_spec_embed''', } UpperCamelCase__ = { '''text_encoder_prenet.encoder_prenet.0''': '''speecht5.encoder.prenet.embed_tokens''', '''text_encoder_prenet.encoder_prenet.1.alpha''': '''speecht5.encoder.prenet.encode_positions.alpha''', } UpperCamelCase__ = { '''speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0''': '''speecht5.decoder.prenet.layers.0''', '''speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0''': '''speecht5.decoder.prenet.layers.1''', '''speech_decoder_prenet.decoder_prenet.0.1''': '''speecht5.decoder.prenet.final_layer''', '''speech_decoder_prenet.decoder_prenet.1.alpha''': '''speecht5.decoder.prenet.encode_positions.alpha''', '''speech_decoder_prenet.spkembs_layer.0''': '''speecht5.decoder.prenet.speaker_embeds_layer''', } UpperCamelCase__ = { '''speech_decoder_postnet.feat_out''': '''speech_decoder_postnet.feat_out''', '''speech_decoder_postnet.prob_out''': '''speech_decoder_postnet.prob_out''', '''speech_decoder_postnet.postnet.postnet.0.0''': '''speech_decoder_postnet.layers.0.conv''', '''speech_decoder_postnet.postnet.postnet.0.1''': '''speech_decoder_postnet.layers.0.batch_norm''', '''speech_decoder_postnet.postnet.postnet.1.0''': '''speech_decoder_postnet.layers.1.conv''', '''speech_decoder_postnet.postnet.postnet.1.1''': '''speech_decoder_postnet.layers.1.batch_norm''', '''speech_decoder_postnet.postnet.postnet.2.0''': '''speech_decoder_postnet.layers.2.conv''', '''speech_decoder_postnet.postnet.postnet.2.1''': '''speech_decoder_postnet.layers.2.batch_norm''', '''speech_decoder_postnet.postnet.postnet.3.0''': '''speech_decoder_postnet.layers.3.conv''', '''speech_decoder_postnet.postnet.postnet.3.1''': '''speech_decoder_postnet.layers.3.batch_norm''', '''speech_decoder_postnet.postnet.postnet.4.0''': '''speech_decoder_postnet.layers.4.conv''', '''speech_decoder_postnet.postnet.postnet.4.1''': '''speech_decoder_postnet.layers.4.batch_norm''', } UpperCamelCase__ = { '''text_decoder_prenet.embed_tokens''': '''speecht5.decoder.prenet.embed_tokens''', } UpperCamelCase__ = { '''text_decoder_postnet.output_projection''': '''text_decoder_postnet.lm_head''', } UpperCamelCase__ = { '''encoder.layers.*.self_attn.k_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj''', '''encoder.layers.*.self_attn.v_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj''', '''encoder.layers.*.self_attn.q_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj''', '''encoder.layers.*.self_attn.out_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj''', '''encoder.layers.*.self_attn_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.layer_norm''', '''encoder.layers.*.fc1''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense''', '''encoder.layers.*.fc2''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense''', '''encoder.layers.*.final_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''speecht5.encoder.wrapped_encoder.layer_norm''', '''encoder.pos_emb.pe_k''': '''speecht5.encoder.wrapped_encoder.embed_positions.pe_k''', } UpperCamelCase__ = { '''decoder.layers.*.self_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj''', '''decoder.layers.*.self_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj''', '''decoder.layers.*.self_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj''', '''decoder.layers.*.self_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj''', '''decoder.layers.*.self_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm''', '''decoder.layers.*.encoder_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj''', '''decoder.layers.*.encoder_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj''', '''decoder.layers.*.encoder_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj''', '''decoder.layers.*.encoder_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj''', '''decoder.layers.*.encoder_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm''', '''decoder.layers.*.fc1''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense''', '''decoder.layers.*.fc2''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense''', '''decoder.layers.*.final_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm''', } UpperCamelCase__ = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_TEXT_DECODER_PRENET, **MAPPING_TEXT_DECODER_POSTNET, } UpperCamelCase__ = { **MAPPING_TEXT_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } UpperCamelCase__ = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } UpperCamelCase__ = [] UpperCamelCase__ = [ '''encoder.version''', '''encoder.layers.*.norm_k.weight''', '''encoder.layers.*.norm_k.bias''', '''decoder.version''', '''decoder.layers.*.norm_k.weight''', '''decoder.layers.*.norm_k.bias''', '''decoder.pos_emb.pe_k''', '''speech_encoder_prenet.embed_positions._float_tensor''', '''text_decoder_prenet.embed_positions._float_tensor''', ] UpperCamelCase__ = IGNORE_KEYS + [ '''encoder.proj''', '''text_encoder_prenet.*''', '''speech_decoder_prenet.*''', '''speech_decoder_postnet.*''', ] UpperCamelCase__ = IGNORE_KEYS + [ '''encoder.proj''', '''speech_encoder_prenet.*''', '''text_decoder_prenet.*''', '''text_decoder_postnet.*''', ] UpperCamelCase__ = IGNORE_KEYS + [ '''encoder.proj''', '''text_encoder_prenet.*''', '''text_decoder_prenet.*''', '''text_decoder_postnet.*''', ] def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int: for attribute in key.split('''.''' ): UpperCAmelCase__ : Optional[int] = getattr(lowerCAmelCase__ , lowerCAmelCase__ ) if weight_type is not None: UpperCAmelCase__ : List[str] = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape else: UpperCAmelCase__ : Any = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": UpperCAmelCase__ : Union[str, Any] = value elif weight_type == "weight_g": UpperCAmelCase__ : Tuple = value elif weight_type == "weight_v": UpperCAmelCase__ : List[Any] = value elif weight_type == "bias": UpperCAmelCase__ : int = value elif weight_type == "running_mean": UpperCAmelCase__ : int = value elif weight_type == "running_var": UpperCAmelCase__ : Union[str, Any] = value elif weight_type == "num_batches_tracked": UpperCAmelCase__ : List[Any] = value else: UpperCAmelCase__ : Union[str, Any] = value logger.info(F"""{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.""" ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]: for key in ignore_keys: if key.endswith('''.*''' ): if name.startswith(key[:-1] ): return True elif ".*." in key: UpperCAmelCase__ , UpperCAmelCase__ : int = key.split('''.*.''' ) if prefix in name and suffix in name: return True elif key in name: return True return False def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]: UpperCAmelCase__ : int = [] if task == "s2t": UpperCAmelCase__ : Optional[Any] = hf_model.speechta.encoder.prenet.feature_encoder UpperCAmelCase__ : List[Any] = MAPPING_S2T UpperCAmelCase__ : int = IGNORE_KEYS_S2T elif task == "t2s": UpperCAmelCase__ : List[str] = None UpperCAmelCase__ : Tuple = MAPPING_T2S UpperCAmelCase__ : Union[str, Any] = IGNORE_KEYS_T2S elif task == "s2s": UpperCAmelCase__ : Optional[int] = hf_model.speechta.encoder.prenet.feature_encoder UpperCAmelCase__ : Tuple = MAPPING_S2S UpperCAmelCase__ : int = IGNORE_KEYS_S2S else: raise ValueError(F"""Unsupported task: {task}""" ) for name, value in fairseq_dict.items(): if should_ignore(lowerCAmelCase__ , lowerCAmelCase__ ): logger.info(F"""{name} was ignored""" ) continue UpperCAmelCase__ : List[Any] = False if "conv_layers" in name: load_conv_layer( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == '''group''' , ) UpperCAmelCase__ : Tuple = True else: for key, mapped_key in MAPPING.items(): # mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if "*" in key: UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = key.split('''.*.''' ) if prefix in name and suffix in name: UpperCAmelCase__ : List[str] = suffix # if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: if key in name: UpperCAmelCase__ : Optional[int] = True if "*" in mapped_key: UpperCAmelCase__ : Any = name.split(lowerCAmelCase__ )[0].split('''.''' )[-2] UpperCAmelCase__ : Union[str, Any] = mapped_key.replace('''*''' , lowerCAmelCase__ ) if "weight_g" in name: UpperCAmelCase__ : Dict = '''weight_g''' elif "weight_v" in name: UpperCAmelCase__ : Union[str, Any] = '''weight_v''' elif "bias" in name: UpperCAmelCase__ : Optional[int] = '''bias''' elif "weight" in name: UpperCAmelCase__ : Optional[int] = '''weight''' elif "running_mean" in name: UpperCAmelCase__ : Optional[int] = '''running_mean''' elif "running_var" in name: UpperCAmelCase__ : List[Any] = '''running_var''' elif "num_batches_tracked" in name: UpperCAmelCase__ : Optional[Any] = '''num_batches_tracked''' else: UpperCAmelCase__ : Union[str, Any] = None set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) continue if not is_used: unused_weights.append(lowerCAmelCase__ ) logger.warning(F"""Unused weights: {unused_weights}""" ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str: UpperCAmelCase__ : Optional[int] = full_name.split('''conv_layers.''' )[-1] UpperCAmelCase__ : Optional[Any] = name.split('''.''' ) UpperCAmelCase__ : Any = int(items[0] ) UpperCAmelCase__ : Optional[int] = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) UpperCAmelCase__ : Any = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) UpperCAmelCase__ : int = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) UpperCAmelCase__ : List[str] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) UpperCAmelCase__ : Union[str, Any] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(lowerCAmelCase__ ) @torch.no_grad() def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , ) -> Any: if config_path is not None: UpperCAmelCase__ : Optional[Any] = SpeechTaConfig.from_pretrained(lowerCAmelCase__ ) else: UpperCAmelCase__ : str = SpeechTaConfig() if task == "s2t": UpperCAmelCase__ : str = config.max_text_positions UpperCAmelCase__ : List[str] = SpeechTaForSpeechToText(lowerCAmelCase__ ) elif task == "t2s": UpperCAmelCase__ : Tuple = 18_76 UpperCAmelCase__ : int = 6_00 UpperCAmelCase__ : Union[str, Any] = config.max_speech_positions UpperCAmelCase__ : Optional[Any] = SpeechTaForTextToSpeech(lowerCAmelCase__ ) elif task == "s2s": UpperCAmelCase__ : Tuple = 18_76 UpperCAmelCase__ : Optional[Any] = config.max_speech_positions UpperCAmelCase__ : Dict = SpeechTaForSpeechToSpeech(lowerCAmelCase__ ) else: raise ValueError(F"""Unknown task name: {task}""" ) if vocab_path: UpperCAmelCase__ : Tuple = SpeechTaTokenizer(lowerCAmelCase__ , model_max_length=config.max_text_positions ) # Mask token behaves like a normal word, i.e. include the space before it UpperCAmelCase__ : Dict = AddedToken('''<mask>''' , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) UpperCAmelCase__ : int = mask_token tokenizer.add_special_tokens({'''mask_token''': mask_token} ) tokenizer.add_tokens(['''<ctc_blank>'''] ) UpperCAmelCase__ : Optional[Any] = SpeechTaFeatureExtractor() UpperCAmelCase__ : Any = SpeechTaProcessor(tokenizer=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ ) processor.save_pretrained(lowerCAmelCase__ ) UpperCAmelCase__ : List[str] = torch.load(lowerCAmelCase__ ) recursively_load_weights(fairseq_checkpoint['''model'''] , lowerCAmelCase__ , lowerCAmelCase__ ) model.save_pretrained(lowerCAmelCase__ ) if repo_id: print('''Pushing to the hub...''' ) processor.push_to_hub(lowerCAmelCase__ ) model.push_to_hub(lowerCAmelCase__ ) if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() parser.add_argument( '''--task''', default='''s2t''', type=str, help='''Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.''', ) parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--vocab_path''', default=None, type=str, help='''Path to SentencePiece model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) UpperCamelCase__ = parser.parse_args() convert_speechta_checkpoint( args.task, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.vocab_path, args.push_to_hub, )
299
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_dpt import DPTImageProcessor UpperCamelCase__ = logging.get_logger(__name__) class lowerCamelCase_ ( __a ): def __init__( self : str , *_A : Tuple , **_A : int ): '''simple docstring''' warnings.warn( '''The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use DPTImageProcessor instead.''' , _A , ) super().__init__(*_A , **_A )
299
'''simple docstring''' import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''encoder.layer_norm_for_extract''': '''layer_norm_for_extract''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''label_embs_concat''': '''label_embeddings_concat''', '''mask_emb''': '''masked_spec_embed''', '''spk_proj''': '''speaker_proj''', } UpperCamelCase__ = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', '''label_embeddings_concat''', '''speaker_proj''', '''layer_norm_for_extract''', ] def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]: for attribute in key.split('''.''' ): UpperCAmelCase__ : Optional[int] = getattr(lowerCAmelCase__ , lowerCAmelCase__ ) if weight_type is not None: UpperCAmelCase__ : Any = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape else: UpperCAmelCase__ : Union[str, Any] = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": UpperCAmelCase__ : int = value elif weight_type == "weight_g": UpperCAmelCase__ : Dict = value elif weight_type == "weight_v": UpperCAmelCase__ : List[str] = value elif weight_type == "bias": UpperCAmelCase__ : Tuple = value else: UpperCAmelCase__ : Tuple = value logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict: UpperCAmelCase__ : Optional[int] = [] UpperCAmelCase__ : Dict = fairseq_model.state_dict() UpperCAmelCase__ : Union[str, Any] = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): UpperCAmelCase__ : Any = False if "conv_layers" in name: load_conv_layer( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == '''group''' , ) UpperCAmelCase__ : str = True else: for key, mapped_key in MAPPING.items(): UpperCAmelCase__ : List[str] = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key): # special case since naming is very similar continue UpperCAmelCase__ : Optional[int] = True if "*" in mapped_key: UpperCAmelCase__ : str = name.split(lowerCAmelCase__ )[0].split('''.''' )[-2] UpperCAmelCase__ : Optional[int] = mapped_key.replace('''*''' , lowerCAmelCase__ ) if "weight_g" in name: UpperCAmelCase__ : List[str] = '''weight_g''' elif "weight_v" in name: UpperCAmelCase__ : Dict = '''weight_v''' elif "bias" in name: UpperCAmelCase__ : Optional[int] = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj UpperCAmelCase__ : Tuple = '''weight''' else: UpperCAmelCase__ : Optional[Any] = None set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) continue if not is_used: unused_weights.append(lowerCAmelCase__ ) logger.warning(F"""Unused weights: {unused_weights}""" ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]: UpperCAmelCase__ : Tuple = full_name.split('''conv_layers.''' )[-1] UpperCAmelCase__ : Optional[Any] = name.split('''.''' ) UpperCAmelCase__ : Union[str, Any] = int(items[0] ) UpperCAmelCase__ : Tuple = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) UpperCAmelCase__ : str = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) UpperCAmelCase__ : Optional[int] = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" ) UpperCAmelCase__ : List[str] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) UpperCAmelCase__ : Optional[Any] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(lowerCAmelCase__ ) @torch.no_grad() def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True ) -> Any: if config_path is not None: UpperCAmelCase__ : Any = UniSpeechSatConfig.from_pretrained(lowerCAmelCase__ ) else: UpperCAmelCase__ : int = UniSpeechSatConfig() UpperCAmelCase__ : Tuple = '''''' if is_finetuned: UpperCAmelCase__ : Optional[int] = UniSpeechSatForCTC(lowerCAmelCase__ ) else: UpperCAmelCase__ : List[Any] = UniSpeechSatForPreTraining(lowerCAmelCase__ ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) UpperCAmelCase__ : Union[str, Any] = model[0].eval() recursively_load_weights(lowerCAmelCase__ , lowerCAmelCase__ ) hf_wavavec.save_pretrained(lowerCAmelCase__ ) if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) UpperCamelCase__ = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
299
1
'''simple docstring''' def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Any: print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' ) for i in range(lowerCAmelCase__ ): for j in range(lowerCAmelCase__ ): if dist[i][j] != float('''inf''' ): print(int(dist[i][j] ) , end='''\t''' ) else: print('''INF''' , end='''\t''' ) print() def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]: UpperCAmelCase__ : Optional[int] = [[float('''inf''' ) for _ in range(lowerCAmelCase__ )] for _ in range(lowerCAmelCase__ )] for i in range(lowerCAmelCase__ ): for j in range(lowerCAmelCase__ ): UpperCAmelCase__ : Optional[int] = graph[i][j] # check vertex k against all other vertices (i, j) for k in range(lowerCAmelCase__ ): # looping through rows of graph array for i in range(lowerCAmelCase__ ): # looping through columns of graph array for j in range(lowerCAmelCase__ ): if ( dist[i][k] != float('''inf''' ) and dist[k][j] != float('''inf''' ) and dist[i][k] + dist[k][j] < dist[i][j] ): UpperCAmelCase__ : List[str] = dist[i][k] + dist[k][j] _print_dist(lowerCAmelCase__ , lowerCAmelCase__ ) return dist, v if __name__ == "__main__": UpperCamelCase__ = int(input('''Enter number of vertices: ''')) UpperCamelCase__ = int(input('''Enter number of edges: ''')) UpperCamelCase__ = [[float('''inf''') for i in range(v)] for j in range(v)] for i in range(v): UpperCamelCase__ = 0.0 # src and dst are indices that must be within the array size graph[e][v] # failure to follow this will result in an error for i in range(e): print('''\nEdge ''', i + 1) UpperCamelCase__ = int(input('''Enter source:''')) UpperCamelCase__ = int(input('''Enter destination:''')) UpperCamelCase__ = float(input('''Enter weight:''')) UpperCamelCase__ = weight floyd_warshall(graph, v) # Example Input # Enter number of vertices: 3 # Enter number of edges: 2 # # generated graph from vertex and edge inputs # [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]] # [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]] # specify source, destination and weight for edge #1 # Edge 1 # Enter source:1 # Enter destination:2 # Enter weight:2 # specify source, destination and weight for edge #2 # Edge 2 # Enter source:2 # Enter destination:1 # Enter weight:1 # # Expected Output from the vertice, edge and src, dst, weight inputs!! # 0 INF INF # INF 0 2 # INF 1 0
299
'''simple docstring''' import itertools import random import unittest import numpy as np from transformers import ASTFeatureExtractor from transformers.testing_utils import require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin UpperCamelCase__ = random.Random() if is_torch_available(): import torch def a__ ( lowerCAmelCase__ , lowerCAmelCase__=1.0 , lowerCAmelCase__=None , lowerCAmelCase__=None ) -> Optional[Any]: if rng is None: UpperCAmelCase__ : List[str] = global_rng UpperCAmelCase__ : Optional[Any] = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class lowerCamelCase_ ( unittest.TestCase ): def __init__( self : Any , _A : List[str] , _A : int=7 , _A : Dict=400 , _A : Tuple=2_000 , _A : Optional[int]=1 , _A : List[Any]=0.0 , _A : Any=16_000 , _A : int=True , _A : str=True , ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = parent UpperCAmelCase__ : str = batch_size UpperCAmelCase__ : Dict = min_seq_length UpperCAmelCase__ : str = max_seq_length UpperCAmelCase__ : List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) UpperCAmelCase__ : Optional[Any] = feature_size UpperCAmelCase__ : int = padding_value UpperCAmelCase__ : int = sampling_rate UpperCAmelCase__ : Tuple = return_attention_mask UpperCAmelCase__ : str = do_normalize def lowercase_ ( self : Optional[int] ): '''simple docstring''' return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def lowercase_ ( self : int , _A : Optional[Any]=False , _A : Any=False ): '''simple docstring''' def _flatten(_A : Union[str, Any] ): return list(itertools.chain(*_A ) ) if equal_length: UpperCAmelCase__ : Tuple = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size UpperCAmelCase__ : Optional[int] = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: UpperCAmelCase__ : Dict = [np.asarray(_A ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class lowerCamelCase_ ( __a , unittest.TestCase ): lowerCAmelCase__ = ASTFeatureExtractor def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : int = ASTFeatureExtractionTester(self ) def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 UpperCAmelCase__ : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] UpperCAmelCase__ : List[Any] = [np.asarray(_A ) for speech_input in speech_inputs] # Test not batched input UpperCAmelCase__ : str = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values UpperCAmelCase__ : List[Any] = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) ) # Test batched UpperCAmelCase__ : Optional[Any] = feat_extract(_A , padding=_A , return_tensors='''np''' ).input_values UpperCAmelCase__ : Optional[int] = feat_extract(_A , padding=_A , return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(_A , _A ): self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. UpperCAmelCase__ : Tuple = [floats_list((1, x) )[0] for x in (800, 800, 800)] UpperCAmelCase__ : Any = np.asarray(_A ) UpperCAmelCase__ : int = feat_extract(_A , return_tensors='''np''' ).input_values UpperCAmelCase__ : List[str] = feat_extract(_A , return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(_A , _A ): self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) ) @require_torch def lowercase_ ( self : List[str] ): '''simple docstring''' import torch UpperCAmelCase__ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCAmelCase__ : Any = np.random.rand(100 ).astype(np.floataa ) UpperCAmelCase__ : int = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: UpperCAmelCase__ : str = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) UpperCAmelCase__ : Any = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def lowercase_ ( self : int , _A : List[Any] ): '''simple docstring''' from datasets import load_dataset UpperCAmelCase__ : Tuple = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' ) # automatic decoding with librispeech UpperCAmelCase__ : List[Any] = ds.sort('''id''' ).select(range(_A ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] @require_torch def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Any = torch.tensor( [-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6, -1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3, -1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6, -0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9] ) # fmt: on UpperCAmelCase__ : Optional[Any] = self._load_datasamples(1 ) UpperCAmelCase__ : Optional[int] = ASTFeatureExtractor() UpperCAmelCase__ : Dict = feature_extractor(_A , return_tensors='''pt''' ).input_values self.assertEquals(input_values.shape , (1, 1_024, 128) ) self.assertTrue(torch.allclose(input_values[0, 0, :30] , _A , atol=1e-4 ) )
299
1
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPSegProcessor, ViTImageProcessor @require_vision class lowerCamelCase_ ( unittest.TestCase ): def lowercase_ ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : str = tempfile.mkdtemp() # fmt: off UpperCAmelCase__ : Union[str, Any] = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>'''] # fmt: on UpperCAmelCase__ : Optional[int] = dict(zip(_A , range(len(_A ) ) ) ) UpperCAmelCase__ : Union[str, Any] = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', ''''''] UpperCAmelCase__ : Optional[int] = {'''unk_token''': '''<unk>'''} UpperCAmelCase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) UpperCAmelCase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_A ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(_A ) ) UpperCAmelCase__ : Optional[Any] = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], '''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], } UpperCAmelCase__ : Dict = os.path.join(self.tmpdirname , _A ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(_A , _A ) def lowercase_ ( self : List[Any] , **_A : Dict ): '''simple docstring''' return CLIPTokenizer.from_pretrained(self.tmpdirname , **_A ) def lowercase_ ( self : Dict , **_A : str ): '''simple docstring''' return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_A ) def lowercase_ ( self : List[Any] , **_A : List[Any] ): '''simple docstring''' return ViTImageProcessor.from_pretrained(self.tmpdirname , **_A ) def lowercase_ ( self : Any ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] UpperCAmelCase__ : Optional[Any] = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : str = self.get_tokenizer() UpperCAmelCase__ : Optional[int] = self.get_rust_tokenizer() UpperCAmelCase__ : str = self.get_image_processor() UpperCAmelCase__ : str = CLIPSegProcessor(tokenizer=_A , image_processor=_A ) processor_slow.save_pretrained(self.tmpdirname ) UpperCAmelCase__ : Any = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=_A ) UpperCAmelCase__ : Tuple = CLIPSegProcessor(tokenizer=_A , image_processor=_A ) processor_fast.save_pretrained(self.tmpdirname ) UpperCAmelCase__ : Union[str, Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , _A ) self.assertIsInstance(processor_fast.tokenizer , _A ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , _A ) self.assertIsInstance(processor_fast.image_processor , _A ) def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : str = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase__ : Optional[Any] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) UpperCAmelCase__ : Optional[Any] = self.get_image_processor(do_normalize=_A , padding_value=1.0 ) UpperCAmelCase__ : Optional[int] = CLIPSegProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_A , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , _A ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _A ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.get_image_processor() UpperCAmelCase__ : Optional[Any] = self.get_tokenizer() UpperCAmelCase__ : Tuple = CLIPSegProcessor(tokenizer=_A , image_processor=_A ) UpperCAmelCase__ : int = self.prepare_image_inputs() UpperCAmelCase__ : int = image_processor(_A , return_tensors='''np''' ) UpperCAmelCase__ : str = processor(images=_A , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.get_image_processor() UpperCAmelCase__ : List[Any] = self.get_tokenizer() UpperCAmelCase__ : Dict = CLIPSegProcessor(tokenizer=_A , image_processor=_A ) UpperCAmelCase__ : Tuple = '''lower newer''' UpperCAmelCase__ : Dict = processor(text=_A ) UpperCAmelCase__ : Any = tokenizer(_A ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Any = self.get_image_processor() UpperCAmelCase__ : Any = self.get_tokenizer() UpperCAmelCase__ : List[str] = CLIPSegProcessor(tokenizer=_A , image_processor=_A ) UpperCAmelCase__ : Dict = '''lower newer''' UpperCAmelCase__ : Optional[Any] = self.prepare_image_inputs() UpperCAmelCase__ : str = processor(text=_A , images=_A ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(_A ): processor() def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : str = self.get_image_processor() UpperCAmelCase__ : Any = self.get_tokenizer() UpperCAmelCase__ : Optional[int] = CLIPSegProcessor(tokenizer=_A , image_processor=_A ) UpperCAmelCase__ : List[str] = self.prepare_image_inputs() UpperCAmelCase__ : int = self.prepare_image_inputs() UpperCAmelCase__ : Optional[int] = processor(images=_A , visual_prompt=_A ) self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''conditional_pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(_A ): processor() def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.get_image_processor() UpperCAmelCase__ : Tuple = self.get_tokenizer() UpperCAmelCase__ : List[Any] = CLIPSegProcessor(tokenizer=_A , image_processor=_A ) UpperCAmelCase__ : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] UpperCAmelCase__ : List[Any] = processor.batch_decode(_A ) UpperCAmelCase__ : Union[str, Any] = tokenizer.batch_decode(_A ) self.assertListEqual(_A , _A )
299
'''simple docstring''' import os import shutil import sys import tempfile import unittest from pathlib import Path import pytest import transformers from transformers import ( BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoTokenizer, BertConfig, BertTokenizer, BertTokenizerFast, CTRLTokenizer, GPTaTokenizer, GPTaTokenizerFast, PreTrainedTokenizerFast, RobertaTokenizer, RobertaTokenizerFast, is_tokenizers_available, ) from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.auto.tokenization_auto import ( TOKENIZER_MAPPING, get_tokenizer_config, tokenizer_class_from_name, ) from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import ( DUMMY_DIFF_TOKENIZER_IDENTIFIER, DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tokenizers, slow, ) sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils''')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class lowerCamelCase_ ( unittest.TestCase ): def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = 0 @slow def lowercase_ ( self : Dict ): '''simple docstring''' for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x): UpperCAmelCase__ : int = AutoTokenizer.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) ) self.assertGreater(len(_A ) , 0 ) for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys(): UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained(_A ) self.assertIsNotNone(_A ) self.assertIsInstance(_A , (GPTaTokenizer, GPTaTokenizerFast) ) self.assertGreater(len(_A ) , 0 ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : int = AutoTokenizer.from_pretrained(_A ) self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 12 ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : int = AutoTokenizer.from_pretrained(_A ) self.assertIsInstance(_A , (RobertaTokenizer, RobertaTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 20 ) def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Tuple = AutoConfig.from_pretrained(_A ) self.assertIsInstance(_A , _A ) # Check that tokenizer_type ≠ model_type UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained(_A , config=_A ) self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 12 ) def lowercase_ ( self : str ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(_A , '''vocab.txt''' ) ) UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained(_A , tokenizer_type='''bert''' , use_fast=_A ) self.assertIsInstance(_A , _A ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(_A , '''vocab.json''' ) ) shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(_A , '''merges.txt''' ) ) UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(_A , tokenizer_type='''gpt2''' , use_fast=_A ) self.assertIsInstance(_A , _A ) @require_tokenizers def lowercase_ ( self : str ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(_A , '''vocab.txt''' ) ) UpperCAmelCase__ : str = AutoTokenizer.from_pretrained(_A , tokenizer_type='''bert''' ) self.assertIsInstance(_A , _A ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(_A , '''vocab.json''' ) ) shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(_A , '''merges.txt''' ) ) UpperCAmelCase__ : Any = AutoTokenizer.from_pretrained(_A , tokenizer_type='''gpt2''' ) self.assertIsInstance(_A , _A ) def lowercase_ ( self : Optional[Any] ): '''simple docstring''' with pytest.raises(_A ): AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' ) @require_tokenizers def lowercase_ ( self : int ): '''simple docstring''' for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: UpperCAmelCase__ : Optional[int] = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' ) self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) ) if isinstance(_A , _A ): self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , _A ) else: self.assertEqual(tokenizer.do_lower_case , _A ) self.assertEqual(tokenizer.model_max_length , 512 ) @require_tokenizers def lowercase_ ( self : List[str] ): '''simple docstring''' for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: with self.assertRaisesRegex( _A , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ): UpperCAmelCase__ : Dict = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' ) def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : List[Any] = TOKENIZER_MAPPING.values() UpperCAmelCase__ : Any = [] for slow_tok, fast_tok in tokenizers: if slow_tok is not None: tokenizer_names.append(slow_tok.__name__ ) if fast_tok is not None: tokenizer_names.append(fast_tok.__name__ ) for tokenizer_name in tokenizer_names: # must find the right class tokenizer_class_from_name(_A ) @require_tokenizers def lowercase_ ( self : Optional[int] ): '''simple docstring''' self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=_A ) , _A ) self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , _A ) @require_tokenizers def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : int = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=_A ) UpperCAmelCase__ : Any = '''Hello, world. How are you?''' UpperCAmelCase__ : Dict = tokenizer.tokenize(_A ) self.assertEqual('''[UNK]''' , tokens[0] ) UpperCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=_A ) UpperCAmelCase__ : Union[str, Any] = tokenizer.tokenize(_A ) self.assertEqual('''[UNK]''' , tokens[0] ) @require_tokenizers def lowercase_ ( self : str ): '''simple docstring''' UpperCAmelCase__ : List[str] = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' ) self.assertEqual(type(_A ) , _A ) self.assertEqual(tokenizer.model_max_length , 512 ) self.assertEqual(tokenizer.vocab_size , 30_000 ) self.assertEqual(tokenizer.unk_token , '''[UNK]''' ) self.assertEqual(tokenizer.padding_side , '''right''' ) self.assertEqual(tokenizer.truncation_side , '''right''' ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(_A ) self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_A ) UpperCAmelCase__ : int = AutoTokenizer.from_pretrained(_A ) self.assertIsInstance(_A , tokenizer.__class__ ) self.assertEqual(tokenizera.vocab_size , 12 ) def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained('''ctrl''' ) # There is no fast CTRL so this always gives us a slow tokenizer. self.assertIsInstance(_A , _A ) def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : str = get_tokenizer_config('''bert-base-cased''' ) UpperCAmelCase__ : Optional[int] = config.pop('''_commit_hash''' , _A ) # If we ever update bert-base-cased tokenizer config, this dict here will need to be updated. self.assertEqual(_A , {'''do_lower_case''': False} ) # This model does not have a tokenizer_config so we get back an empty dict. UpperCAmelCase__ : Tuple = get_tokenizer_config(_A ) self.assertDictEqual(_A , {} ) # A tokenizer saved with `save_pretrained` always creates a tokenizer config. UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(_A ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_A ) UpperCAmelCase__ : List[Any] = get_tokenizer_config(_A ) # Check the class of the tokenizer was properly saved (note that it always saves the slow class). self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' ) def lowercase_ ( self : Dict ): '''simple docstring''' try: AutoConfig.register('''custom''' , _A ) AutoTokenizer.register(_A , slow_tokenizer_class=_A ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_A ): AutoTokenizer.register(_A , slow_tokenizer_class=_A ) UpperCAmelCase__ : Optional[int] = CustomTokenizer.from_pretrained(_A ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_A ) UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(_A ) self.assertIsInstance(_A , _A ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] @require_tokenizers def lowercase_ ( self : Any ): '''simple docstring''' try: AutoConfig.register('''custom''' , _A ) # Can register in two steps AutoTokenizer.register(_A , slow_tokenizer_class=_A ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) ) AutoTokenizer.register(_A , fast_tokenizer_class=_A ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) del TOKENIZER_MAPPING._extra_content[CustomConfig] # Can register in one step AutoTokenizer.register( _A , slow_tokenizer_class=_A , fast_tokenizer_class=_A ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_A ): AutoTokenizer.register(_A , fast_tokenizer_class=_A ) # We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer # and that model does not have a tokenizer.json with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase__ : Any = BertTokenizerFast.from_pretrained(_A ) bert_tokenizer.save_pretrained(_A ) UpperCAmelCase__ : Optional[int] = CustomTokenizerFast.from_pretrained(_A ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_A ) UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(_A ) self.assertIsInstance(_A , _A ) UpperCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained(_A , use_fast=_A ) self.assertIsInstance(_A , _A ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def lowercase_ ( self : Optional[int] ): '''simple docstring''' with self.assertRaises(_A ): UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(_A ): UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A ) UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A ) self.assertTrue(tokenizer.special_attribute_present ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_A ) UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(_A , trust_remote_code=_A ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) # Test we can also load the slow version UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A , use_fast=_A ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_A ) UpperCAmelCase__ : Any = AutoTokenizer.from_pretrained(_A , trust_remote_code=_A , use_fast=_A ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) else: self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' ) @require_tokenizers def lowercase_ ( self : int ): '''simple docstring''' class lowerCamelCase_ ( __a ): lowerCAmelCase__ = False class lowerCamelCase_ ( __a ): lowerCAmelCase__ = NewTokenizer lowerCAmelCase__ = False try: AutoConfig.register('''custom''' , _A ) AutoTokenizer.register(_A , slow_tokenizer_class=_A ) AutoTokenizer.register(_A , fast_tokenizer_class=_A ) # If remote code is not set, the default is to use local UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertFalse(tokenizer.special_attribute_present ) UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=_A ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertFalse(tokenizer.special_attribute_present ) # If remote code is disabled, we load the local one. UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertFalse(tokenizer.special_attribute_present ) UpperCAmelCase__ : str = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A , use_fast=_A ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertFalse(tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertTrue(tokenizer.special_attribute_present ) UpperCAmelCase__ : Any = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A , use_fast=_A ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertTrue(tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=_A ) self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) # Test we can also load the slow version UpperCAmelCase__ : Optional[Any] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=_A , use_fast=_A ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) else: self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) def lowercase_ ( self : Tuple ): '''simple docstring''' with self.assertRaisesRegex( _A , '''bert-base is not a local folder and is not a valid model identifier''' ): UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained('''bert-base''' ) def lowercase_ ( self : Dict ): '''simple docstring''' with self.assertRaisesRegex( _A , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(_A , revision='''aaaaaa''' ) def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) with RequestCounter() as counter: UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
299
1
'''simple docstring''' # Logistic Regression from scratch # In[62]: # In[63]: # importing all the required libraries import numpy as np from matplotlib import pyplot as plt from sklearn import datasets def a__ ( lowerCAmelCase__ ) -> List[Any]: return 1 / (1 + np.exp(-z )) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple: return (-y * np.log(lowerCAmelCase__ ) - (1 - y) * np.log(1 - h )).mean() def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict: UpperCAmelCase__ : str = np.dot(lowerCAmelCase__ , lowerCAmelCase__ ) return np.sum(y * scores - np.log(1 + np.exp(lowerCAmelCase__ ) ) ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=7_00_00 ) -> List[Any]: UpperCAmelCase__ : Tuple = np.zeros(x.shape[1] ) for iterations in range(lowerCAmelCase__ ): UpperCAmelCase__ : List[Any] = np.dot(lowerCAmelCase__ , lowerCAmelCase__ ) UpperCAmelCase__ : List[str] = sigmoid_function(lowerCAmelCase__ ) UpperCAmelCase__ : int = np.dot(x.T , h - y ) / y.size UpperCAmelCase__ : Optional[int] = theta - alpha * gradient # updating the weights UpperCAmelCase__ : Dict = np.dot(lowerCAmelCase__ , lowerCAmelCase__ ) UpperCAmelCase__ : int = sigmoid_function(lowerCAmelCase__ ) UpperCAmelCase__ : Tuple = cost_function(lowerCAmelCase__ , lowerCAmelCase__ ) if iterations % 1_00 == 0: print(F"""loss: {j} \t""" ) # printing the loss after every 100 iterations return theta # In[68]: if __name__ == "__main__": UpperCamelCase__ = datasets.load_iris() UpperCamelCase__ = iris.data[:, :2] UpperCamelCase__ = (iris.target != 0) * 1 UpperCamelCase__ = 0.1 UpperCamelCase__ = logistic_reg(alpha, x, y, max_iterations=7_0_0_0_0) print('''theta: ''', theta) # printing the theta i.e our weights vector def a__ ( lowerCAmelCase__ ) -> Dict: return sigmoid_function( np.dot(lowerCAmelCase__ , lowerCAmelCase__ ) ) # predicting the value of probability from the logistic regression algorithm plt.figure(figsize=(1_0, 6)) plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''') plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''') ((UpperCamelCase__) , (UpperCamelCase__)) = (x[:, 0].min(), x[:, 0].max()) ((UpperCamelCase__) , (UpperCamelCase__)) = (x[:, 1].min(), x[:, 1].max()) ((UpperCamelCase__) , (UpperCamelCase__)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max)) UpperCamelCase__ = np.c_[xxa.ravel(), xxa.ravel()] UpperCamelCase__ = predict_prob(grid).reshape(xxa.shape) plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''') plt.legend() plt.show()
299
'''simple docstring''' def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> float: UpperCAmelCase__ : Tuple = [redshift, radiation_density, matter_density, dark_energy] if any(p < 0 for p in parameters ): raise ValueError('''All input parameters must be positive''' ) if any(p > 1 for p in parameters[1:4] ): raise ValueError('''Relative densities cannot be greater than one''' ) else: UpperCAmelCase__ : List[str] = 1 - (matter_density + radiation_density + dark_energy) UpperCAmelCase__ : List[str] = ( radiation_density * (redshift + 1) ** 4 + matter_density * (redshift + 1) ** 3 + curvature * (redshift + 1) ** 2 + dark_energy ) UpperCAmelCase__ : Any = hubble_constant * e_a ** (1 / 2) return hubble if __name__ == "__main__": import doctest # run doctest doctest.testmod() # demo LCDM approximation UpperCamelCase__ = 0.3 print( hubble_parameter( hubble_constant=68.3, radiation_density=1e-4, matter_density=matter_density, dark_energy=1 - matter_density, redshift=0, ) )
299
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available UpperCamelCase__ = { '''configuration_conditional_detr''': [ '''CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConditionalDetrConfig''', '''ConditionalDetrOnnxConfig''', ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = ['''ConditionalDetrFeatureExtractor'''] UpperCamelCase__ = ['''ConditionalDetrImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = [ '''CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ConditionalDetrForObjectDetection''', '''ConditionalDetrForSegmentation''', '''ConditionalDetrModel''', '''ConditionalDetrPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, ConditionalDetrConfig, ConditionalDetrOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor from .image_processing_conditional_detr import ConditionalDetrImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrModel, ConditionalDetrPreTrainedModel, ) else: import sys UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
299
'''simple docstring''' import gc import math import unittest import torch from diffusers import UNetaDModel from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin UpperCamelCase__ = logging.get_logger(__name__) enable_full_determinism() class lowerCamelCase_ ( __a , __a , unittest.TestCase ): lowerCAmelCase__ = UNetaDModel lowerCAmelCase__ = 'sample' @property def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = 4 UpperCAmelCase__ : str = 3 UpperCAmelCase__ : str = (32, 32) UpperCAmelCase__ : List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(_A ) UpperCAmelCase__ : Tuple = torch.tensor([10] ).to(_A ) return {"sample": noise, "timestep": time_step} @property def lowercase_ ( self : int ): '''simple docstring''' return (3, 32, 32) @property def lowercase_ ( self : Dict ): '''simple docstring''' return (3, 32, 32) def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = { '''block_out_channels''': (32, 64), '''down_block_types''': ('''DownBlock2D''', '''AttnDownBlock2D'''), '''up_block_types''': ('''AttnUpBlock2D''', '''UpBlock2D'''), '''attention_head_dim''': 3, '''out_channels''': 3, '''in_channels''': 3, '''layers_per_block''': 2, '''sample_size''': 32, } UpperCAmelCase__ : Tuple = self.dummy_input return init_dict, inputs_dict class lowerCamelCase_ ( __a , __a , unittest.TestCase ): lowerCAmelCase__ = UNetaDModel lowerCAmelCase__ = 'sample' @property def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : List[str] = 4 UpperCAmelCase__ : Dict = 4 UpperCAmelCase__ : List[str] = (32, 32) UpperCAmelCase__ : List[str] = floats_tensor((batch_size, num_channels) + sizes ).to(_A ) UpperCAmelCase__ : List[Any] = torch.tensor([10] ).to(_A ) return {"sample": noise, "timestep": time_step} @property def lowercase_ ( self : Tuple ): '''simple docstring''' return (4, 32, 32) @property def lowercase_ ( self : List[str] ): '''simple docstring''' return (4, 32, 32) def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : List[str] = { '''sample_size''': 32, '''in_channels''': 4, '''out_channels''': 4, '''layers_per_block''': 2, '''block_out_channels''': (32, 64), '''attention_head_dim''': 32, '''down_block_types''': ('''DownBlock2D''', '''DownBlock2D'''), '''up_block_types''': ('''UpBlock2D''', '''UpBlock2D'''), } UpperCAmelCase__ : Optional[Any] = self.dummy_input return init_dict, inputs_dict def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : int = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A ) self.assertIsNotNone(_A ) self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 ) model.to(_A ) UpperCAmelCase__ : Dict = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' ) def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Any = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A ) model.to(_A ) UpperCAmelCase__ : Dict = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' ) def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A ) model_accelerate.to(_A ) model_accelerate.eval() UpperCAmelCase__ : Tuple = torch.randn( 1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , ) UpperCAmelCase__ : Union[str, Any] = noise.to(_A ) UpperCAmelCase__ : Optional[Any] = torch.tensor([10] * noise.shape[0] ).to(_A ) UpperCAmelCase__ : Any = model_accelerate(_A , _A )['''sample'''] # two models don't need to stay in the device at the same time del model_accelerate torch.cuda.empty_cache() gc.collect() UpperCAmelCase__ , UpperCAmelCase__ : Dict = UNetaDModel.from_pretrained( '''fusing/unet-ldm-dummy-update''' , output_loading_info=_A , low_cpu_mem_usage=_A ) model_normal_load.to(_A ) model_normal_load.eval() UpperCAmelCase__ : Optional[int] = model_normal_load(_A , _A )['''sample'''] assert torch_all_close(_A , _A , rtol=1e-3 ) def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' ) model.eval() model.to(_A ) UpperCAmelCase__ : Union[str, Any] = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) UpperCAmelCase__ : str = noise.to(_A ) UpperCAmelCase__ : str = torch.tensor([10] * noise.shape[0] ).to(_A ) with torch.no_grad(): UpperCAmelCase__ : Optional[int] = model(_A , _A ).sample UpperCAmelCase__ : List[Any] = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off UpperCAmelCase__ : Tuple = torch.tensor([-1_3.3_2_5_8, -2_0.1_1_0_0, -1_5.9_8_7_3, -1_7.6_6_1_7, -2_3.0_5_9_6, -1_7.9_4_1_9, -1_3.3_6_7_5, -1_6.1_8_8_9, -1_2.3_8_0_0] ) # fmt: on self.assertTrue(torch_all_close(_A , _A , rtol=1e-3 ) ) class lowerCamelCase_ ( __a , __a , unittest.TestCase ): lowerCAmelCase__ = UNetaDModel lowerCAmelCase__ = 'sample' @property def lowercase_ ( self : Any , _A : str=(32, 32) ): '''simple docstring''' UpperCAmelCase__ : Tuple = 4 UpperCAmelCase__ : List[str] = 3 UpperCAmelCase__ : str = floats_tensor((batch_size, num_channels) + sizes ).to(_A ) UpperCAmelCase__ : Dict = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=_A ) return {"sample": noise, "timestep": time_step} @property def lowercase_ ( self : List[str] ): '''simple docstring''' return (3, 32, 32) @property def lowercase_ ( self : List[Any] ): '''simple docstring''' return (3, 32, 32) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : List[str] = { '''block_out_channels''': [32, 64, 64, 64], '''in_channels''': 3, '''layers_per_block''': 1, '''out_channels''': 3, '''time_embedding_type''': '''fourier''', '''norm_eps''': 1e-6, '''mid_block_scale_factor''': math.sqrt(2.0 ), '''norm_num_groups''': None, '''down_block_types''': [ '''SkipDownBlock2D''', '''AttnSkipDownBlock2D''', '''SkipDownBlock2D''', '''SkipDownBlock2D''', ], '''up_block_types''': [ '''SkipUpBlock2D''', '''SkipUpBlock2D''', '''AttnSkipUpBlock2D''', '''SkipUpBlock2D''', ], } UpperCAmelCase__ : Tuple = self.dummy_input return init_dict, inputs_dict @slow def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : str = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' , output_loading_info=_A ) self.assertIsNotNone(_A ) self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 ) model.to(_A ) UpperCAmelCase__ : List[str] = self.dummy_input UpperCAmelCase__ : Dict = floats_tensor((4, 3) + (256, 256) ).to(_A ) UpperCAmelCase__ : Optional[Any] = noise UpperCAmelCase__ : Any = model(**_A ) assert image is not None, "Make sure output is not None" @slow def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : int = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' ) model.to(_A ) UpperCAmelCase__ : Optional[Any] = 4 UpperCAmelCase__ : List[str] = 3 UpperCAmelCase__ : Dict = (256, 256) UpperCAmelCase__ : Optional[int] = torch.ones((batch_size, num_channels) + sizes ).to(_A ) UpperCAmelCase__ : Union[str, Any] = torch.tensor(batch_size * [1e-4] ).to(_A ) with torch.no_grad(): UpperCAmelCase__ : Optional[int] = model(_A , _A ).sample UpperCAmelCase__ : Any = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off UpperCAmelCase__ : Tuple = torch.tensor([-4_8_4_2.8_6_9_1, -6_4_9_9.6_6_3_1, -3_8_0_0.1_9_5_3, -7_9_7_8.2_6_8_6, -1_0_9_8_0.7_1_2_9, -2_0_0_2_8.8_5_3_5, 8_1_4_8.2_8_2_2, 2_3_4_2.2_9_0_5, 5_6_7.7_6_0_8] ) # fmt: on self.assertTrue(torch_all_close(_A , _A , rtol=1e-2 ) ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Dict = UNetaDModel.from_pretrained('''fusing/ncsnpp-ffhq-ve-dummy-update''' ) model.to(_A ) UpperCAmelCase__ : str = 4 UpperCAmelCase__ : Any = 3 UpperCAmelCase__ : int = (32, 32) UpperCAmelCase__ : Optional[Any] = torch.ones((batch_size, num_channels) + sizes ).to(_A ) UpperCAmelCase__ : Optional[Any] = torch.tensor(batch_size * [1e-4] ).to(_A ) with torch.no_grad(): UpperCAmelCase__ : int = model(_A , _A ).sample UpperCAmelCase__ : Dict = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off UpperCAmelCase__ : Any = torch.tensor([-0.0_3_2_5, -0.0_9_0_0, -0.0_8_6_9, -0.0_3_3_2, -0.0_7_2_5, -0.0_2_7_0, -0.0_1_0_1, 0.0_2_2_7, 0.0_2_5_6] ) # fmt: on self.assertTrue(torch_all_close(_A , _A , rtol=1e-2 ) ) def lowercase_ ( self : Tuple ): '''simple docstring''' pass
299
1
'''simple docstring''' from sklearn.metrics import fa_score, matthews_corrcoef import datasets from .record_evaluation import evaluate as evaluate_record UpperCamelCase__ = '''\ @article{wang2019superglue, title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems}, author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R}, journal={arXiv preprint arXiv:1905.00537}, year={2019} } ''' UpperCamelCase__ = '''\ SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after GLUE with a new set of more difficult language understanding tasks, improved resources, and a new public leaderboard. ''' UpperCamelCase__ = ''' Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset. Args: predictions: list of predictions to score. Depending on the SuperGlUE subset: - for \'record\': list of question-answer dictionaries with the following keys: - \'idx\': index of the question as specified by the dataset - \'prediction_text\': the predicted answer text - for \'multirc\': list of question-answer dictionaries with the following keys: - \'idx\': index of the question-answer pair as specified by the dataset - \'prediction\': the predicted answer label - otherwise: list of predicted labels references: list of reference labels. Depending on the SuperGLUE subset: - for \'record\': list of question-answers dictionaries with the following keys: - \'idx\': index of the question as specified by the dataset - \'answers\': list of possible answers - otherwise: list of reference labels Returns: depending on the SuperGLUE subset: - for \'record\': - \'exact_match\': Exact match between answer and gold answer - \'f1\': F1 score - for \'multirc\': - \'exact_match\': Exact match between answer and gold answer - \'f1_m\': Per-question macro-F1 score - \'f1_a\': Average F1 score over all answers - for \'axb\': \'matthews_correlation\': Matthew Correlation - for \'cb\': - \'accuracy\': Accuracy - \'f1\': F1 score - for all others: - \'accuracy\': Accuracy Examples: >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"] >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\') >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0, \'f1\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\') >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}] >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'exact_match\': 1.0, \'f1\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\') >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\') >>> references = [0, 1] >>> predictions = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'matthews_correlation\': 1.0} ''' def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]: return float((preds == labels).mean() ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="binary" ) -> str: UpperCAmelCase__ : Any = simple_accuracy(lowerCAmelCase__ , lowerCAmelCase__ ) UpperCAmelCase__ : Any = float(fa_score(y_true=lowerCAmelCase__ , y_pred=lowerCAmelCase__ , average=lowerCAmelCase__ ) ) return { "accuracy": acc, "f1": fa, } def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple: UpperCAmelCase__ : Tuple = {} for id_pred, label in zip(lowerCAmelCase__ , lowerCAmelCase__ ): UpperCAmelCase__ : Union[str, Any] = F"""{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}""" UpperCAmelCase__ : int = id_pred['''prediction'''] if question_id in question_map: question_map[question_id].append((pred, label) ) else: UpperCAmelCase__ : List[Any] = [(pred, label)] UpperCAmelCase__ , UpperCAmelCase__ : Any = [], [] for question, preds_labels in question_map.items(): UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = zip(*lowerCAmelCase__ ) UpperCAmelCase__ : Dict = fa_score(y_true=lowerCAmelCase__ , y_pred=lowerCAmelCase__ , average='''macro''' ) fas.append(lowerCAmelCase__ ) UpperCAmelCase__ : Optional[int] = int(sum(pred == label for pred, label in preds_labels ) == len(lowerCAmelCase__ ) ) ems.append(lowerCAmelCase__ ) UpperCAmelCase__ : Any = float(sum(lowerCAmelCase__ ) / len(lowerCAmelCase__ ) ) UpperCAmelCase__ : List[Any] = sum(lowerCAmelCase__ ) / len(lowerCAmelCase__ ) UpperCAmelCase__ : int = float(fa_score(y_true=lowerCAmelCase__ , y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) ) return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a} @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase_ ( datasets.Metric ): def lowercase_ ( self : Tuple ): '''simple docstring''' if self.config_name not in [ "boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg", ]: raise KeyError( '''You should supply a configuration name selected in ''' '''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None , ) def lowercase_ ( self : str ): '''simple docstring''' if self.config_name == "record": return { "predictions": { "idx": { "passage": datasets.Value('''int64''' ), "query": datasets.Value('''int64''' ), }, "prediction_text": datasets.Value('''string''' ), }, "references": { "idx": { "passage": datasets.Value('''int64''' ), "query": datasets.Value('''int64''' ), }, "answers": datasets.Sequence(datasets.Value('''string''' ) ), }, } elif self.config_name == "multirc": return { "predictions": { "idx": { "answer": datasets.Value('''int64''' ), "paragraph": datasets.Value('''int64''' ), "question": datasets.Value('''int64''' ), }, "prediction": datasets.Value('''int64''' ), }, "references": datasets.Value('''int64''' ), } else: return { "predictions": datasets.Value('''int64''' ), "references": datasets.Value('''int64''' ), } def lowercase_ ( self : str , _A : Dict , _A : List[Any] ): '''simple docstring''' if self.config_name == "axb": return {"matthews_correlation": matthews_corrcoef(_A , _A )} elif self.config_name == "cb": return acc_and_fa(_A , _A , fa_avg='''macro''' ) elif self.config_name == "record": UpperCAmelCase__ : Optional[int] = [ { '''qas''': [ {'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]} for ref in references ] } ] UpperCAmelCase__ : List[Any] = {pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions} return evaluate_record(_A , _A )[0] elif self.config_name == "multirc": return evaluate_multirc(_A , _A ) elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]: return {"accuracy": simple_accuracy(_A , _A )} else: raise KeyError( '''You should supply a configuration name selected in ''' '''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
299
'''simple docstring''' from __future__ import annotations def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> tuple[float, list[float]]: UpperCAmelCase__ : Optional[Any] = list(range(len(lowerCAmelCase__ ) ) ) UpperCAmelCase__ : Optional[Any] = [v / w for v, w in zip(lowerCAmelCase__ , lowerCAmelCase__ )] index.sort(key=lambda lowerCAmelCase__ : ratio[i] , reverse=lowerCAmelCase__ ) UpperCAmelCase__ : float = 0 UpperCAmelCase__ : list[float] = [0] * len(lowerCAmelCase__ ) for i in index: if weight[i] <= capacity: UpperCAmelCase__ : List[str] = 1 max_value += value[i] capacity -= weight[i] else: UpperCAmelCase__ : Tuple = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
299
1
'''simple docstring''' def a__ ( lowerCAmelCase__ = 10_00 ) -> int: return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) ) if __name__ == "__main__": print(solution())
299
'''simple docstring''' from ..utils import DummyObject, requires_backends class lowerCamelCase_ ( metaclass=__a ): lowerCAmelCase__ = ['torch', 'transformers', 'onnx'] def __init__( self : int , *_A : Tuple , **_A : Union[str, Any] ): '''simple docstring''' requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase_ ( cls : Union[str, Any] , *_A : List[Any] , **_A : Any ): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase_ ( cls : int , *_A : Union[str, Any] , **_A : Optional[Any] ): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) class lowerCamelCase_ ( metaclass=__a ): lowerCAmelCase__ = ['torch', 'transformers', 'onnx'] def __init__( self : Any , *_A : List[str] , **_A : Tuple ): '''simple docstring''' requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase_ ( cls : Tuple , *_A : Tuple , **_A : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase_ ( cls : List[Any] , *_A : List[str] , **_A : Dict ): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) class lowerCamelCase_ ( metaclass=__a ): lowerCAmelCase__ = ['torch', 'transformers', 'onnx'] def __init__( self : Dict , *_A : Any , **_A : int ): '''simple docstring''' requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase_ ( cls : List[Any] , *_A : List[Any] , **_A : Optional[int] ): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase_ ( cls : int , *_A : Dict , **_A : Optional[Any] ): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) class lowerCamelCase_ ( metaclass=__a ): lowerCAmelCase__ = ['torch', 'transformers', 'onnx'] def __init__( self : List[Any] , *_A : Optional[int] , **_A : Optional[Any] ): '''simple docstring''' requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase_ ( cls : Dict , *_A : Any , **_A : Tuple ): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase_ ( cls : int , *_A : Union[str, Any] , **_A : Dict ): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) class lowerCamelCase_ ( metaclass=__a ): lowerCAmelCase__ = ['torch', 'transformers', 'onnx'] def __init__( self : List[Any] , *_A : Optional[int] , **_A : Dict ): '''simple docstring''' requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase_ ( cls : Dict , *_A : str , **_A : Dict ): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase_ ( cls : Union[str, Any] , *_A : Optional[int] , **_A : int ): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) class lowerCamelCase_ ( metaclass=__a ): lowerCAmelCase__ = ['torch', 'transformers', 'onnx'] def __init__( self : Optional[Any] , *_A : Union[str, Any] , **_A : Dict ): '''simple docstring''' requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase_ ( cls : List[str] , *_A : str , **_A : List[str] ): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase_ ( cls : Dict , *_A : str , **_A : Any ): '''simple docstring''' requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
299
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available UpperCamelCase__ = { '''configuration_ernie''': ['''ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ErnieConfig''', '''ErnieOnnxConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = [ '''ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ErnieForCausalLM''', '''ErnieForMaskedLM''', '''ErnieForMultipleChoice''', '''ErnieForNextSentencePrediction''', '''ErnieForPreTraining''', '''ErnieForQuestionAnswering''', '''ErnieForSequenceClassification''', '''ErnieForTokenClassification''', '''ErnieModel''', '''ErniePreTrainedModel''', ] if TYPE_CHECKING: from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ernie import ( ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST, ErnieForCausalLM, ErnieForMaskedLM, ErnieForMultipleChoice, ErnieForNextSentencePrediction, ErnieForPreTraining, ErnieForQuestionAnswering, ErnieForSequenceClassification, ErnieForTokenClassification, ErnieModel, ErniePreTrainedModel, ) else: import sys UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
299
'''simple docstring''' from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase__ = {'''configuration_mmbt''': ['''MMBTConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings'''] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
299
1
'''simple docstring''' import argparse import collections import os import re import tempfile import pandas as pd from datasets import Dataset from huggingface_hub import hf_hub_download, upload_folder from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/update_metadata.py UpperCamelCase__ = '''src/transformers''' # This is to make sure the transformers module imported is the one in the repo. UpperCamelCase__ = direct_transformers_import(TRANSFORMERS_PATH) # Regexes that match TF/Flax/PT model names. UpperCamelCase__ = re.compile(R'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''') UpperCamelCase__ = re.compile(R'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''') # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. UpperCamelCase__ = re.compile(R'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''') # Fill this with tuples (pipeline_tag, model_mapping, auto_model) UpperCamelCase__ = [ ('''pretraining''', '''MODEL_FOR_PRETRAINING_MAPPING_NAMES''', '''AutoModelForPreTraining'''), ('''feature-extraction''', '''MODEL_MAPPING_NAMES''', '''AutoModel'''), ('''audio-classification''', '''MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForAudioClassification'''), ('''text-generation''', '''MODEL_FOR_CAUSAL_LM_MAPPING_NAMES''', '''AutoModelForCausalLM'''), ('''automatic-speech-recognition''', '''MODEL_FOR_CTC_MAPPING_NAMES''', '''AutoModelForCTC'''), ('''image-classification''', '''MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForImageClassification'''), ('''image-segmentation''', '''MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES''', '''AutoModelForImageSegmentation'''), ('''fill-mask''', '''MODEL_FOR_MASKED_LM_MAPPING_NAMES''', '''AutoModelForMaskedLM'''), ('''object-detection''', '''MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES''', '''AutoModelForObjectDetection'''), ( '''zero-shot-object-detection''', '''MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES''', '''AutoModelForZeroShotObjectDetection''', ), ('''question-answering''', '''MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES''', '''AutoModelForQuestionAnswering'''), ('''text2text-generation''', '''MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES''', '''AutoModelForSeq2SeqLM'''), ('''text-classification''', '''MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForSequenceClassification'''), ('''automatic-speech-recognition''', '''MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES''', '''AutoModelForSpeechSeq2Seq'''), ( '''table-question-answering''', '''MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES''', '''AutoModelForTableQuestionAnswering''', ), ('''token-classification''', '''MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForTokenClassification'''), ('''multiple-choice''', '''MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES''', '''AutoModelForMultipleChoice'''), ( '''next-sentence-prediction''', '''MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES''', '''AutoModelForNextSentencePrediction''', ), ( '''audio-frame-classification''', '''MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForAudioFrameClassification''', ), ('''audio-xvector''', '''MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES''', '''AutoModelForAudioXVector'''), ( '''document-question-answering''', '''MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES''', '''AutoModelForDocumentQuestionAnswering''', ), ( '''visual-question-answering''', '''MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES''', '''AutoModelForVisualQuestionAnswering''', ), ('''image-to-text''', '''MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES''', '''AutoModelForVision2Seq'''), ( '''zero-shot-image-classification''', '''MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForZeroShotImageClassification''', ), ('''depth-estimation''', '''MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES''', '''AutoModelForDepthEstimation'''), ('''video-classification''', '''MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForVideoClassification'''), ('''mask-generation''', '''MODEL_FOR_MASK_GENERATION_MAPPING_NAMES''', '''AutoModelForMaskGeneration'''), ] def a__ ( lowerCAmelCase__ ) -> Optional[int]: UpperCAmelCase__ : Union[str, Any] = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , lowerCAmelCase__ ) return [m.group(0 ) for m in matches] def a__ ( ) -> List[str]: UpperCAmelCase__ : Any = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES UpperCAmelCase__ : Optional[Any] = { config.replace('''Config''' , '''''' ): model_type for model_type, config in config_maping_names.items() } # Dictionaries flagging if each model prefix has a backend in PT/TF/Flax. UpperCAmelCase__ : Optional[int] = collections.defaultdict(lowerCAmelCase__ ) UpperCAmelCase__ : Optional[Any] = collections.defaultdict(lowerCAmelCase__ ) UpperCAmelCase__ : List[Any] = collections.defaultdict(lowerCAmelCase__ ) # Let's lookup through all transformers object (once) and find if models are supported by a given backend. for attr_name in dir(lowerCAmelCase__ ): UpperCAmelCase__ : List[str] = None if _re_tf_models.match(lowerCAmelCase__ ) is not None: UpperCAmelCase__ : List[str] = tf_models UpperCAmelCase__ : Any = _re_tf_models.match(lowerCAmelCase__ ).groups()[0] elif _re_flax_models.match(lowerCAmelCase__ ) is not None: UpperCAmelCase__ : List[str] = flax_models UpperCAmelCase__ : Tuple = _re_flax_models.match(lowerCAmelCase__ ).groups()[0] elif _re_pt_models.match(lowerCAmelCase__ ) is not None: UpperCAmelCase__ : str = pt_models UpperCAmelCase__ : List[str] = _re_pt_models.match(lowerCAmelCase__ ).groups()[0] if lookup_dict is not None: while len(lowerCAmelCase__ ) > 0: if attr_name in model_prefix_to_model_type: UpperCAmelCase__ : List[str] = True break # Try again after removing the last word in the name UpperCAmelCase__ : int = ''''''.join(camel_case_split(lowerCAmelCase__ )[:-1] ) UpperCAmelCase__ : List[Any] = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) ) UpperCAmelCase__ : str = list(lowerCAmelCase__ ) all_models.sort() UpperCAmelCase__ : List[Any] = {'''model_type''': all_models} UpperCAmelCase__ : Optional[Any] = [pt_models[t] for t in all_models] UpperCAmelCase__ : Union[str, Any] = [tf_models[t] for t in all_models] UpperCAmelCase__ : Tuple = [flax_models[t] for t in all_models] # Now let's use the auto-mapping names to make sure UpperCAmelCase__ : Tuple = {} for t in all_models: if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES: UpperCAmelCase__ : str = '''AutoProcessor''' elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES: UpperCAmelCase__ : List[str] = '''AutoTokenizer''' elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES: UpperCAmelCase__ : int = '''AutoFeatureExtractor''' else: # Default to AutoTokenizer if a model has nothing, for backward compatibility. UpperCAmelCase__ : int = '''AutoTokenizer''' UpperCAmelCase__ : Dict = [processors[t] for t in all_models] return pd.DataFrame(lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ ) -> Union[str, Any]: UpperCAmelCase__ : Union[str, Any] = [ transformers_module.models.auto.modeling_auto, transformers_module.models.auto.modeling_tf_auto, transformers_module.models.auto.modeling_flax_auto, ] for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS: UpperCAmelCase__ : List[Any] = [model_mapping, F"""TF_{model_mapping}""", F"""FLAX_{model_mapping}"""] UpperCAmelCase__ : Optional[Any] = [auto_class, F"""TF_{auto_class}""", F"""Flax_{auto_class}"""] # Loop through all three frameworks for module, cls, mapping in zip(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): # The type of pipeline may not exist in this framework if not hasattr(lowerCAmelCase__ , lowerCAmelCase__ ): continue # First extract all model_names UpperCAmelCase__ : Union[str, Any] = [] for name in getattr(lowerCAmelCase__ , lowerCAmelCase__ ).values(): if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): model_names.append(lowerCAmelCase__ ) else: model_names.extend(list(lowerCAmelCase__ ) ) # Add pipeline tag and auto model class for those models table.update({model_name: (pipeline_tag, cls) for model_name in model_names} ) return table def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict: UpperCAmelCase__ : Optional[Any] = get_frameworks_table() UpperCAmelCase__ : int = Dataset.from_pandas(lowerCAmelCase__ ) UpperCAmelCase__ : List[Any] = hf_hub_download( '''huggingface/transformers-metadata''' , '''pipeline_tags.json''' , repo_type='''dataset''' , token=lowerCAmelCase__ ) UpperCAmelCase__ : Optional[Any] = Dataset.from_json(lowerCAmelCase__ ) UpperCAmelCase__ : List[str] = { tags_dataset[i]['''model_class''']: (tags_dataset[i]['''pipeline_tag'''], tags_dataset[i]['''auto_class''']) for i in range(len(lowerCAmelCase__ ) ) } UpperCAmelCase__ : Optional[int] = update_pipeline_and_auto_class_table(lowerCAmelCase__ ) # Sort the model classes to avoid some nondeterministic updates to create false update commits. UpperCAmelCase__ : str = sorted(table.keys() ) UpperCAmelCase__ : List[Any] = pd.DataFrame( { '''model_class''': model_classes, '''pipeline_tag''': [table[m][0] for m in model_classes], '''auto_class''': [table[m][1] for m in model_classes], } ) UpperCAmelCase__ : Optional[int] = Dataset.from_pandas(lowerCAmelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: frameworks_dataset.to_json(os.path.join(lowerCAmelCase__ , '''frameworks.json''' ) ) tags_dataset.to_json(os.path.join(lowerCAmelCase__ , '''pipeline_tags.json''' ) ) if commit_sha is not None: UpperCAmelCase__ : List[str] = ( F"""Update with commit {commit_sha}\n\nSee: """ F"""https://github.com/huggingface/transformers/commit/{commit_sha}""" ) else: UpperCAmelCase__ : Optional[Any] = '''Update''' upload_folder( repo_id='''huggingface/transformers-metadata''' , folder_path=lowerCAmelCase__ , repo_type='''dataset''' , token=lowerCAmelCase__ , commit_message=lowerCAmelCase__ , ) def a__ ( ) -> Tuple: UpperCAmelCase__ : List[Any] = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS} UpperCAmelCase__ : Union[str, Any] = transformers_module.pipelines.SUPPORTED_TASKS UpperCAmelCase__ : str = [] for key in pipeline_tasks: if key not in in_table: UpperCAmelCase__ : List[str] = pipeline_tasks[key]['''pt'''] if isinstance(lowerCAmelCase__ , (list, tuple) ): UpperCAmelCase__ : Optional[int] = model[0] UpperCAmelCase__ : Optional[Any] = model.__name__ if model not in in_table.values(): missing.append(lowerCAmelCase__ ) if len(lowerCAmelCase__ ) > 0: UpperCAmelCase__ : Union[str, Any] = ''', '''.join(lowerCAmelCase__ ) raise ValueError( '''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside ''' F"""`utils/update_metadata.py`: {msg}. Please add them!""" ) if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() parser.add_argument('''--token''', type=str, help='''The token to use to push to the transformers-metadata dataset.''') parser.add_argument('''--commit_sha''', type=str, help='''The sha of the commit going with this update.''') parser.add_argument('''--check-only''', action='''store_true''', help='''Activate to just check all pipelines are present.''') UpperCamelCase__ = parser.parse_args() if args.check_only: check_pipeline_tags() else: update_metadata(args.token, args.commit_sha)
299
'''simple docstring''' import inspect import tempfile from collections import OrderedDict, UserDict from collections.abc import MutableMapping from contextlib import ExitStack, contextmanager from dataclasses import fields from enum import Enum from typing import Any, ContextManager, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy if is_flax_available(): import jax.numpy as jnp class lowerCamelCase_ ( __a ): def __get__( self : str , _A : Tuple , _A : List[str]=None ): '''simple docstring''' if obj is None: return self if self.fget is None: raise AttributeError('''unreadable attribute''' ) UpperCAmelCase__ : Union[str, Any] = '''__cached_''' + self.fget.__name__ UpperCAmelCase__ : Any = getattr(_A , _A , _A ) if cached is None: UpperCAmelCase__ : Dict = self.fget(_A ) setattr(_A , _A , _A ) return cached def a__ ( lowerCAmelCase__ ) -> Optional[int]: UpperCAmelCase__ : Tuple = val.lower() if val in {"y", "yes", "t", "true", "on", "1"}: return 1 if val in {"n", "no", "f", "false", "off", "0"}: return 0 raise ValueError(F"""invalid truth value {val!r}""" ) def a__ ( lowerCAmelCase__ ) -> Optional[Any]: if is_torch_fx_proxy(lowerCAmelCase__ ): return True if is_torch_available(): import torch if isinstance(lowerCAmelCase__ , torch.Tensor ): return True if is_tf_available(): import tensorflow as tf if isinstance(lowerCAmelCase__ , tf.Tensor ): return True if is_flax_available(): import jax.numpy as jnp from jax.core import Tracer if isinstance(lowerCAmelCase__ , (jnp.ndarray, Tracer) ): return True return isinstance(lowerCAmelCase__ , np.ndarray ) def a__ ( lowerCAmelCase__ ) -> Any: return isinstance(lowerCAmelCase__ , np.ndarray ) def a__ ( lowerCAmelCase__ ) -> int: return _is_numpy(lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ ) -> Optional[Any]: import torch return isinstance(lowerCAmelCase__ , torch.Tensor ) def a__ ( lowerCAmelCase__ ) -> List[str]: return False if not is_torch_available() else _is_torch(lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ ) -> Optional[Any]: import torch return isinstance(lowerCAmelCase__ , torch.device ) def a__ ( lowerCAmelCase__ ) -> List[str]: return False if not is_torch_available() else _is_torch_device(lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ ) -> Any: import torch if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): if hasattr(lowerCAmelCase__ , lowerCAmelCase__ ): UpperCAmelCase__ : Any = getattr(lowerCAmelCase__ , lowerCAmelCase__ ) else: return False return isinstance(lowerCAmelCase__ , torch.dtype ) def a__ ( lowerCAmelCase__ ) -> Optional[int]: return False if not is_torch_available() else _is_torch_dtype(lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ ) -> List[Any]: import tensorflow as tf return isinstance(lowerCAmelCase__ , tf.Tensor ) def a__ ( lowerCAmelCase__ ) -> List[str]: return False if not is_tf_available() else _is_tensorflow(lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ ) -> Any: import tensorflow as tf # the `is_symbolic_tensor` predicate is only available starting with TF 2.14 if hasattr(lowerCAmelCase__ , '''is_symbolic_tensor''' ): return tf.is_symbolic_tensor(lowerCAmelCase__ ) return type(lowerCAmelCase__ ) == tf.Tensor def a__ ( lowerCAmelCase__ ) -> Union[str, Any]: return False if not is_tf_available() else _is_tf_symbolic_tensor(lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ ) -> Tuple: import jax.numpy as jnp # noqa: F811 return isinstance(lowerCAmelCase__ , jnp.ndarray ) def a__ ( lowerCAmelCase__ ) -> List[Any]: return False if not is_flax_available() else _is_jax(lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ ) -> Tuple: if isinstance(lowerCAmelCase__ , (dict, UserDict) ): return {k: to_py_obj(lowerCAmelCase__ ) for k, v in obj.items()} elif isinstance(lowerCAmelCase__ , (list, tuple) ): return [to_py_obj(lowerCAmelCase__ ) for o in obj] elif is_tf_tensor(lowerCAmelCase__ ): return obj.numpy().tolist() elif is_torch_tensor(lowerCAmelCase__ ): return obj.detach().cpu().tolist() elif is_jax_tensor(lowerCAmelCase__ ): return np.asarray(lowerCAmelCase__ ).tolist() elif isinstance(lowerCAmelCase__ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays return obj.tolist() else: return obj def a__ ( lowerCAmelCase__ ) -> Tuple: if isinstance(lowerCAmelCase__ , (dict, UserDict) ): return {k: to_numpy(lowerCAmelCase__ ) for k, v in obj.items()} elif isinstance(lowerCAmelCase__ , (list, tuple) ): return np.array(lowerCAmelCase__ ) elif is_tf_tensor(lowerCAmelCase__ ): return obj.numpy() elif is_torch_tensor(lowerCAmelCase__ ): return obj.detach().cpu().numpy() elif is_jax_tensor(lowerCAmelCase__ ): return np.asarray(lowerCAmelCase__ ) else: return obj class lowerCamelCase_ ( __a ): def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : List[str] = fields(self ) # Safety and consistency checks if not len(_A ): raise ValueError(f"""{self.__class__.__name__} has no fields.""" ) if not all(field.default is None for field in class_fields[1:] ): raise ValueError(f"""{self.__class__.__name__} should not have more than one required field.""" ) UpperCAmelCase__ : Dict = getattr(self , class_fields[0].name ) UpperCAmelCase__ : Any = all(getattr(self , field.name ) is None for field in class_fields[1:] ) if other_fields_are_none and not is_tensor(_A ): if isinstance(_A , _A ): UpperCAmelCase__ : List[Any] = first_field.items() UpperCAmelCase__ : Optional[int] = True else: try: UpperCAmelCase__ : Optional[int] = iter(_A ) UpperCAmelCase__ : Optional[int] = True except TypeError: UpperCAmelCase__ : Optional[Any] = False # if we provided an iterator as first field and the iterator is a (key, value) iterator # set the associated fields if first_field_iterator: for idx, element in enumerate(_A ): if ( not isinstance(_A , (list, tuple) ) or not len(_A ) == 2 or not isinstance(element[0] , _A ) ): if idx == 0: # If we do not have an iterator of key/values, set it as attribute UpperCAmelCase__ : List[Any] = first_field else: # If we have a mixed iterator, raise an error raise ValueError( f"""Cannot set key/value for {element}. It needs to be a tuple (key, value).""" ) break setattr(self , element[0] , element[1] ) if element[1] is not None: UpperCAmelCase__ : List[str] = element[1] elif first_field is not None: UpperCAmelCase__ : Optional[Any] = first_field else: for field in class_fields: UpperCAmelCase__ : Optional[int] = getattr(self , field.name ) if v is not None: UpperCAmelCase__ : str = v def __delitem__( self : Union[str, Any] , *_A : Any , **_A : str ): '''simple docstring''' raise Exception(f"""You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.""" ) def lowercase_ ( self : Any , *_A : List[str] , **_A : Tuple ): '''simple docstring''' raise Exception(f"""You cannot use ``setdefault`` on a {self.__class__.__name__} instance.""" ) def lowercase_ ( self : Optional[Any] , *_A : Any , **_A : Tuple ): '''simple docstring''' raise Exception(f"""You cannot use ``pop`` on a {self.__class__.__name__} instance.""" ) def lowercase_ ( self : Optional[Any] , *_A : Dict , **_A : List[Any] ): '''simple docstring''' raise Exception(f"""You cannot use ``update`` on a {self.__class__.__name__} instance.""" ) def __getitem__( self : List[str] , _A : Any ): '''simple docstring''' if isinstance(_A , _A ): UpperCAmelCase__ : Union[str, Any] = dict(self.items() ) return inner_dict[k] else: return self.to_tuple()[k] def __setattr__( self : int , _A : Union[str, Any] , _A : str ): '''simple docstring''' if name in self.keys() and value is not None: # Don't call self.__setitem__ to avoid recursion errors super().__setitem__(_A , _A ) super().__setattr__(_A , _A ) def __setitem__( self : Any , _A : Optional[int] , _A : List[str] ): '''simple docstring''' super().__setitem__(_A , _A ) # Don't call self.__setattr__ to avoid recursion errors super().__setattr__(_A , _A ) def lowercase_ ( self : Optional[Any] ): '''simple docstring''' return tuple(self[k] for k in self.keys() ) class lowerCamelCase_ ( __a , __a ): @classmethod def lowercase_ ( cls : Optional[Any] , _A : Optional[Any] ): '''simple docstring''' raise ValueError( f"""{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}""" ) class lowerCamelCase_ ( __a ): lowerCAmelCase__ = 'longest' lowerCAmelCase__ = 'max_length' lowerCAmelCase__ = 'do_not_pad' class lowerCamelCase_ ( __a ): lowerCAmelCase__ = 'pt' lowerCAmelCase__ = 'tf' lowerCAmelCase__ = 'np' lowerCAmelCase__ = 'jax' class lowerCamelCase_ : def __init__( self : List[Any] , _A : List[ContextManager] ): '''simple docstring''' UpperCAmelCase__ : str = context_managers UpperCAmelCase__ : int = ExitStack() def __enter__( self : str ): '''simple docstring''' for context_manager in self.context_managers: self.stack.enter_context(_A ) def __exit__( self : Dict , *_A : List[Any] , **_A : str ): '''simple docstring''' self.stack.__exit__(*_A , **_A ) def a__ ( lowerCAmelCase__ ) -> Any: UpperCAmelCase__ : int = infer_framework(lowerCAmelCase__ ) if framework == "tf": UpperCAmelCase__ : Optional[Any] = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": UpperCAmelCase__ : List[Any] = inspect.signature(model_class.forward ) # PyTorch models else: UpperCAmelCase__ : List[Any] = inspect.signature(model_class.__call__ ) # Flax models for p in signature.parameters: if p == "return_loss" and signature.parameters[p].default is True: return True return False def a__ ( lowerCAmelCase__ ) -> Optional[int]: UpperCAmelCase__ : Dict = model_class.__name__ UpperCAmelCase__ : Union[str, Any] = infer_framework(lowerCAmelCase__ ) if framework == "tf": UpperCAmelCase__ : Tuple = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": UpperCAmelCase__ : List[str] = inspect.signature(model_class.forward ) # PyTorch models else: UpperCAmelCase__ : int = inspect.signature(model_class.__call__ ) # Flax models if "QuestionAnswering" in model_name: return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")] else: return [p for p in signature.parameters if "label" in p] def a__ ( lowerCAmelCase__ , lowerCAmelCase__ = "" , lowerCAmelCase__ = "." ) -> Any: def _flatten_dict(lowerCAmelCase__ , lowerCAmelCase__="" , lowerCAmelCase__="." ): for k, v in d.items(): UpperCAmelCase__ : int = str(lowerCAmelCase__ ) + delimiter + str(lowerCAmelCase__ ) if parent_key else k if v and isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): yield from flatten_dict(lowerCAmelCase__ , lowerCAmelCase__ , delimiter=lowerCAmelCase__ ).items() else: yield key, v return dict(_flatten_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) ) @contextmanager def a__ ( lowerCAmelCase__ , lowerCAmelCase__ = False ) -> int: if use_temp_dir: with tempfile.TemporaryDirectory() as tmp_dir: yield tmp_dir else: yield working_dir def a__ ( lowerCAmelCase__ , lowerCAmelCase__=None ) -> Optional[Any]: if is_numpy_array(lowerCAmelCase__ ): return np.transpose(lowerCAmelCase__ , axes=lowerCAmelCase__ ) elif is_torch_tensor(lowerCAmelCase__ ): return array.T if axes is None else array.permute(*lowerCAmelCase__ ) elif is_tf_tensor(lowerCAmelCase__ ): import tensorflow as tf return tf.transpose(lowerCAmelCase__ , perm=lowerCAmelCase__ ) elif is_jax_tensor(lowerCAmelCase__ ): return jnp.transpose(lowerCAmelCase__ , axes=lowerCAmelCase__ ) else: raise ValueError(F"""Type not supported for transpose: {type(lowerCAmelCase__ )}.""" ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple: if is_numpy_array(lowerCAmelCase__ ): return np.reshape(lowerCAmelCase__ , lowerCAmelCase__ ) elif is_torch_tensor(lowerCAmelCase__ ): return array.reshape(*lowerCAmelCase__ ) elif is_tf_tensor(lowerCAmelCase__ ): import tensorflow as tf return tf.reshape(lowerCAmelCase__ , lowerCAmelCase__ ) elif is_jax_tensor(lowerCAmelCase__ ): return jnp.reshape(lowerCAmelCase__ , lowerCAmelCase__ ) else: raise ValueError(F"""Type not supported for reshape: {type(lowerCAmelCase__ )}.""" ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__=None ) -> List[Any]: if is_numpy_array(lowerCAmelCase__ ): return np.squeeze(lowerCAmelCase__ , axis=lowerCAmelCase__ ) elif is_torch_tensor(lowerCAmelCase__ ): return array.squeeze() if axis is None else array.squeeze(dim=lowerCAmelCase__ ) elif is_tf_tensor(lowerCAmelCase__ ): import tensorflow as tf return tf.squeeze(lowerCAmelCase__ , axis=lowerCAmelCase__ ) elif is_jax_tensor(lowerCAmelCase__ ): return jnp.squeeze(lowerCAmelCase__ , axis=lowerCAmelCase__ ) else: raise ValueError(F"""Type not supported for squeeze: {type(lowerCAmelCase__ )}.""" ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]: if is_numpy_array(lowerCAmelCase__ ): return np.expand_dims(lowerCAmelCase__ , lowerCAmelCase__ ) elif is_torch_tensor(lowerCAmelCase__ ): return array.unsqueeze(dim=lowerCAmelCase__ ) elif is_tf_tensor(lowerCAmelCase__ ): import tensorflow as tf return tf.expand_dims(lowerCAmelCase__ , axis=lowerCAmelCase__ ) elif is_jax_tensor(lowerCAmelCase__ ): return jnp.expand_dims(lowerCAmelCase__ , axis=lowerCAmelCase__ ) else: raise ValueError(F"""Type not supported for expand_dims: {type(lowerCAmelCase__ )}.""" ) def a__ ( lowerCAmelCase__ ) -> int: if is_numpy_array(lowerCAmelCase__ ): return np.size(lowerCAmelCase__ ) elif is_torch_tensor(lowerCAmelCase__ ): return array.numel() elif is_tf_tensor(lowerCAmelCase__ ): import tensorflow as tf return tf.size(lowerCAmelCase__ ) elif is_jax_tensor(lowerCAmelCase__ ): return array.size else: raise ValueError(F"""Type not supported for expand_dims: {type(lowerCAmelCase__ )}.""" ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]: for key, value in auto_map.items(): if isinstance(lowerCAmelCase__ , (tuple, list) ): UpperCAmelCase__ : int = [F"""{repo_id}--{v}""" if (v is not None and '''--''' not in v) else v for v in value] elif value is not None and "--" not in value: UpperCAmelCase__ : str = F"""{repo_id}--{value}""" return auto_map def a__ ( lowerCAmelCase__ ) -> Tuple: for base_class in inspect.getmro(lowerCAmelCase__ ): UpperCAmelCase__ : Optional[int] = base_class.__module__ UpperCAmelCase__ : Optional[int] = base_class.__name__ if module.startswith('''tensorflow''' ) or module.startswith('''keras''' ) or name == "TFPreTrainedModel": return "tf" elif module.startswith('''torch''' ) or name == "PreTrainedModel": return "pt" elif module.startswith('''flax''' ) or module.startswith('''jax''' ) or name == "FlaxPreTrainedModel": return "flax" else: raise TypeError(F"""Could not infer framework from class {model_class}.""" )
299
1
'''simple docstring''' from pickle import UnpicklingError import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict from ..utils import logging UpperCamelCase__ = logging.get_logger(__name__) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]: try: with open(lowerCAmelCase__ , '''rb''' ) as flax_state_f: UpperCAmelCase__ : Optional[int] = from_bytes(lowerCAmelCase__ , flax_state_f.read() ) except UnpicklingError as e: try: with open(lowerCAmelCase__ ) as f: if f.read().startswith('''version''' ): raise OSError( '''You seem to have cloned a repository without having git-lfs installed. Please''' ''' install git-lfs and run `git lfs install` followed by `git lfs pull` in the''' ''' folder you cloned.''' ) else: raise ValueError from e except (UnicodeDecodeError, ValueError): raise EnvironmentError(F"""Unable to convert {model_file} to Flax deserializable object. """ ) return load_flax_weights_in_pytorch_model(lowerCAmelCase__ , lowerCAmelCase__ ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Any: try: import torch # noqa: F401 except ImportError: logger.error( '''Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise # check if we have bf16 weights UpperCAmelCase__ : Any = flatten_dict(jax.tree_util.tree_map(lambda lowerCAmelCase__ : x.dtype == jnp.bfloataa , lowerCAmelCase__ ) ).values() if any(lowerCAmelCase__ ): # convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( '''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` ''' '''before loading those in PyTorch model.''' ) UpperCAmelCase__ : Optional[Any] = jax.tree_util.tree_map( lambda lowerCAmelCase__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , lowerCAmelCase__ ) UpperCAmelCase__ : Dict = '''''' UpperCAmelCase__ : Any = flatten_dict(lowerCAmelCase__ , sep='''.''' ) UpperCAmelCase__ : Union[str, Any] = pt_model.state_dict() # keep track of unexpected & missing keys UpperCAmelCase__ : Union[str, Any] = [] UpperCAmelCase__ : Optional[Any] = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): UpperCAmelCase__ : int = flax_key_tuple.split('''.''' ) if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4: UpperCAmelCase__ : int = flax_key_tuple_array[:-1] + ['''weight'''] UpperCAmelCase__ : Optional[int] = jnp.transpose(lowerCAmelCase__ , (3, 2, 0, 1) ) elif flax_key_tuple_array[-1] == "kernel": UpperCAmelCase__ : Any = flax_key_tuple_array[:-1] + ['''weight'''] UpperCAmelCase__ : int = flax_tensor.T elif flax_key_tuple_array[-1] == "scale": UpperCAmelCase__ : Union[str, Any] = flax_key_tuple_array[:-1] + ['''weight'''] if "time_embedding" not in flax_key_tuple_array: for i, flax_key_tuple_string in enumerate(lowerCAmelCase__ ): UpperCAmelCase__ : List[str] = ( flax_key_tuple_string.replace('''_0''' , '''.0''' ) .replace('''_1''' , '''.1''' ) .replace('''_2''' , '''.2''' ) .replace('''_3''' , '''.3''' ) .replace('''_4''' , '''.4''' ) .replace('''_5''' , '''.5''' ) .replace('''_6''' , '''.6''' ) .replace('''_7''' , '''.7''' ) .replace('''_8''' , '''.8''' ) .replace('''_9''' , '''.9''' ) ) UpperCAmelCase__ : str = '''.'''.join(lowerCAmelCase__ ) if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( F"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """ F"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" ) else: # add weight to pytorch dict UpperCAmelCase__ : Any = np.asarray(lowerCAmelCase__ ) if not isinstance(lowerCAmelCase__ , np.ndarray ) else flax_tensor UpperCAmelCase__ : Tuple = torch.from_numpy(lowerCAmelCase__ ) # remove from missing keys missing_keys.remove(lowerCAmelCase__ ) else: # weight is not expected by PyTorch model unexpected_keys.append(lowerCAmelCase__ ) pt_model.load_state_dict(lowerCAmelCase__ ) # re-transform missing_keys to list UpperCAmelCase__ : Dict = list(lowerCAmelCase__ ) if len(lowerCAmelCase__ ) > 0: logger.warning( '''Some weights of the Flax model were not used when initializing the PyTorch model''' F""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing""" F""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture""" ''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This''' F""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect""" ''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a''' ''' FlaxBertForSequenceClassification model).''' ) if len(lowerCAmelCase__ ) > 0: logger.warning( F"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly""" F""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to""" ''' use it for predictions and inference.''' ) return pt_model
299
'''simple docstring''' import argparse from typing import List import evaluate import numpy as np import torch from datasets import DatasetDict, load_dataset # New Code # # We'll be using StratifiedKFold for this example from sklearn.model_selection import StratifiedKFold from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to perform Cross Validation, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## UpperCamelCase__ = 1_6 UpperCamelCase__ = 3_2 def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 16 ) -> Dict: UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained('''bert-base-cased''' ) UpperCAmelCase__ : str = DatasetDict( { '''train''': dataset['''train'''].select(lowerCAmelCase__ ), '''validation''': dataset['''train'''].select(lowerCAmelCase__ ), '''test''': dataset['''validation'''], } ) def tokenize_function(lowerCAmelCase__ ): # max_length=None => use the model max length (it's actually the default) UpperCAmelCase__ : Optional[int] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): UpperCAmelCase__ : Dict = datasets.map( lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCAmelCase__ : int = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(lowerCAmelCase__ ): # On TPU it's best to pad everything to the same length or training will be very slow. UpperCAmelCase__ : Optional[Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": UpperCAmelCase__ : Any = 16 elif accelerator.mixed_precision != "no": UpperCAmelCase__ : Dict = 8 else: UpperCAmelCase__ : List[Any] = None return tokenizer.pad( lowerCAmelCase__ , padding='''longest''' , max_length=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_tensors='''pt''' , ) # Instantiate dataloaders. UpperCAmelCase__ : List[Any] = DataLoader( tokenized_datasets['''train'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ ) UpperCAmelCase__ : List[str] = DataLoader( tokenized_datasets['''validation'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ ) UpperCAmelCase__ : List[Any] = DataLoader( tokenized_datasets['''test'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ ) return train_dataloader, eval_dataloader, test_dataloader def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str: # New Code # UpperCAmelCase__ : List[str] = [] # Download the dataset UpperCAmelCase__ : Union[str, Any] = load_dataset('''glue''' , '''mrpc''' ) # Create our splits UpperCAmelCase__ : str = StratifiedKFold(n_splits=int(args.num_folds ) ) # Initialize accelerator UpperCAmelCase__ : Dict = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCAmelCase__ : Any = config['''lr'''] UpperCAmelCase__ : Any = int(config['''num_epochs'''] ) UpperCAmelCase__ : Any = int(config['''seed'''] ) UpperCAmelCase__ : Dict = int(config['''batch_size'''] ) UpperCAmelCase__ : Any = evaluate.load('''glue''' , '''mrpc''' ) # If the batch size is too big we use gradient accumulation UpperCAmelCase__ : Optional[Any] = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: UpperCAmelCase__ : Any = batch_size // MAX_GPU_BATCH_SIZE UpperCAmelCase__ : List[Any] = MAX_GPU_BATCH_SIZE set_seed(lowerCAmelCase__ ) # New Code # # Create our folds: UpperCAmelCase__ : Union[str, Any] = kfold.split(np.zeros(datasets['''train'''].num_rows ) , datasets['''train''']['''label'''] ) UpperCAmelCase__ : Dict = [] # Iterate over them for i, (train_idxs, valid_idxs) in enumerate(lowerCAmelCase__ ): UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = get_fold_dataloaders( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCAmelCase__ : List[str] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=lowerCAmelCase__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). UpperCAmelCase__ : Optional[Any] = model.to(accelerator.device ) # Instantiate optimizer UpperCAmelCase__ : Union[str, Any] = AdamW(params=model.parameters() , lr=lowerCAmelCase__ ) # Instantiate scheduler UpperCAmelCase__ : Any = get_linear_schedule_with_warmup( optimizer=lowerCAmelCase__ , num_warmup_steps=1_00 , num_training_steps=(len(lowerCAmelCase__ ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = accelerator.prepare( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # Now we train the model for epoch in range(lowerCAmelCase__ ): model.train() for step, batch in enumerate(lowerCAmelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) UpperCAmelCase__ : Union[str, Any] = model(**lowerCAmelCase__ ) UpperCAmelCase__ : Dict = outputs.loss UpperCAmelCase__ : Dict = loss / gradient_accumulation_steps accelerator.backward(lowerCAmelCase__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowerCAmelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCAmelCase__ : str = model(**lowerCAmelCase__ ) UpperCAmelCase__ : Any = outputs.logits.argmax(dim=-1 ) UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=lowerCAmelCase__ , references=lowerCAmelCase__ , ) UpperCAmelCase__ : str = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" , lowerCAmelCase__ ) # New Code # # We also run predictions on the test set at the very end UpperCAmelCase__ : int = [] for step, batch in enumerate(lowerCAmelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCAmelCase__ : str = model(**lowerCAmelCase__ ) UpperCAmelCase__ : Union[str, Any] = outputs.logits UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) fold_predictions.append(predictions.cpu() ) if i == 0: # We need all of the test predictions test_references.append(references.cpu() ) # Use accelerator.print to print only on the main process. test_predictions.append(torch.cat(lowerCAmelCase__ , dim=0 ) ) # We now need to release all our memory and get rid of the current model, optimizer, etc accelerator.free_memory() # New Code # # Finally we check the accuracy of our folded results: UpperCAmelCase__ : Union[str, Any] = torch.cat(lowerCAmelCase__ , dim=0 ) UpperCAmelCase__ : Tuple = torch.stack(lowerCAmelCase__ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 ) UpperCAmelCase__ : Optional[Any] = metric.compute(predictions=lowerCAmelCase__ , references=lowerCAmelCase__ ) accelerator.print('''Average test metrics from all folds:''' , lowerCAmelCase__ ) def a__ ( ) -> Any: UpperCAmelCase__ : Tuple = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) # New Code # parser.add_argument('''--num_folds''' , type=lowerCAmelCase__ , default=3 , help='''The number of splits to perform across the dataset''' ) UpperCAmelCase__ : Tuple = parser.parse_args() UpperCAmelCase__ : Any = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(lowerCAmelCase__ , lowerCAmelCase__ ) if __name__ == "__main__": main()
299
1