code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
from math import ceil, sqrt
def lowerCAmelCase__ ( UpperCamelCase__ = 1_0_0_0_0_0_0 ):
'''simple docstring'''
_a : List[Any] = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
_a : List[str] = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
_a : Dict = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(F'''{solution() = }''')
| 357
|
"""simple docstring"""
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir('fixtures/test_sentencepiece.model')
_snake_case = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
_snake_case = 'pt' if is_torch_available() else 'tf'
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( snake_case_ , unittest.TestCase ):
UpperCamelCase : str = CamembertTokenizer
UpperCamelCase : List[Any] = CamembertTokenizerFast
UpperCamelCase : Optional[int] = True
UpperCamelCase : Union[str, Any] = True
def _lowercase ( self : List[Any] ) -> Union[str, Any]:
super().setUp()
# We have a SentencePiece fixture for testing
_a : List[Any] = CamembertTokenizer(UpperCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self : List[str] ) -> Tuple:
_a : Optional[Any] = """<pad>"""
_a : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ )
def _lowercase ( self : Union[str, Any] ) -> str:
_a : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(UpperCAmelCase__ ) , 1004 )
def _lowercase ( self : List[str] ) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1005 )
def _lowercase ( self : Union[str, Any] ) -> str:
_a : Tuple = CamembertTokenizer(UpperCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
_a : List[Any] = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
_a : Any = """I was born in 92000, and this is falsé."""
_a : Union[str, Any] = tokenizer.encode(UpperCAmelCase__ )
_a : Dict = rust_tokenizer.encode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Tuple = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
_a : List[Any] = rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
_a : List[str] = tokenizer.convert_ids_to_tokens(UpperCAmelCase__ )
_a : int = rust_tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def _lowercase ( self : Dict ) -> List[str]:
if not self.test_rust_tokenizer:
return
_a : Optional[int] = self.get_tokenizer()
_a : Tuple = self.get_rust_tokenizer()
_a : List[Any] = """I was born in 92000, and this is falsé."""
_a : List[str] = tokenizer.tokenize(UpperCAmelCase__ )
_a : Union[str, Any] = rust_tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
_a : int = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
_a : Optional[int] = rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
_a : int = self.get_rust_tokenizer()
_a : Optional[Any] = tokenizer.encode(UpperCAmelCase__ )
_a : Dict = rust_tokenizer.encode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def _lowercase ( self : Tuple ) -> List[Any]:
# fmt: off
_a : Dict = {"""input_ids""": [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 27575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 22804, 18818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 10326, 24, 2267, 20, 416, 5072, 15612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
_a : Union[str, Any] = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=UpperCAmelCase__ , )
| 324
| 0
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class UpperCamelCase ( lowerCAmelCase_ ):
UpperCamelCase : torch.FloatTensor
class UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ):
@register_to_config
def __init__( self : List[Any] , UpperCAmelCase__ : int = 32 , UpperCAmelCase__ : int = 64 , UpperCAmelCase__ : int = 20 , UpperCAmelCase__ : int = 768 , UpperCAmelCase__ : Optional[Any]=77 , UpperCAmelCase__ : int=4 , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : str = "silu" , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = "linear" , UpperCAmelCase__ : Optional[str] = "prd" , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[int] = None , ) -> List[Any]:
super().__init__()
_a : List[str] = num_attention_heads
_a : Union[str, Any] = attention_head_dim
_a : Dict = num_attention_heads * attention_head_dim
_a : List[Any] = additional_embeddings
_a : str = time_embed_dim or inner_dim
_a : List[str] = embedding_proj_dim or embedding_dim
_a : Optional[Any] = clip_embed_dim or embedding_dim
_a : Optional[int] = Timesteps(__lowerCAmelCase , __lowerCAmelCase , 0 )
_a : str = TimestepEmbedding(__lowerCAmelCase , __lowerCAmelCase , out_dim=__lowerCAmelCase , act_fn=__lowerCAmelCase )
_a : int = nn.Linear(__lowerCAmelCase , __lowerCAmelCase )
if embedding_proj_norm_type is None:
_a : Tuple = None
elif embedding_proj_norm_type == "layer":
_a : str = nn.LayerNorm(__lowerCAmelCase )
else:
raise ValueError(f"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
_a : Tuple = nn.Linear(__lowerCAmelCase , __lowerCAmelCase )
if encoder_hid_proj_type is None:
_a : Any = None
elif encoder_hid_proj_type == "linear":
_a : List[str] = nn.Linear(__lowerCAmelCase , __lowerCAmelCase )
else:
raise ValueError(f"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
_a : str = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , __lowerCAmelCase ) )
if added_emb_type == "prd":
_a : Optional[int] = nn.Parameter(torch.zeros(1 , 1 , __lowerCAmelCase ) )
elif added_emb_type is None:
_a : Union[str, Any] = None
else:
raise ValueError(
f"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.""" )
_a : str = nn.ModuleList(
[
BasicTransformerBlock(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , dropout=__lowerCAmelCase , activation_fn="""gelu""" , attention_bias=__lowerCAmelCase , )
for d in range(__lowerCAmelCase )
] )
if norm_in_type == "layer":
_a : str = nn.LayerNorm(__lowerCAmelCase )
elif norm_in_type is None:
_a : Optional[int] = None
else:
raise ValueError(f"""Unsupported norm_in_type: {norm_in_type}.""" )
_a : List[Any] = nn.LayerNorm(__lowerCAmelCase )
_a : str = nn.Linear(__lowerCAmelCase , __lowerCAmelCase )
_a : List[str] = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0 )
causal_attention_mask.triu_(1 )
_a : Optional[Any] = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" , __lowerCAmelCase , persistent=__lowerCAmelCase )
_a : Tuple = nn.Parameter(torch.zeros(1 , __lowerCAmelCase ) )
_a : Any = nn.Parameter(torch.zeros(1 , __lowerCAmelCase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def _lowercase ( self : List[Any] ) -> Any:
_a : List[str] = {}
def fn_recursive_add_processors(UpperCAmelCase__ : str , UpperCAmelCase__ : torch.nn.Module , UpperCAmelCase__ : Dict[str, AttentionProcessor] ):
if hasattr(__lowerCAmelCase , """set_processor""" ):
_a : Any = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , __lowerCAmelCase , __lowerCAmelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return processors
def _lowercase ( self : Any , UpperCAmelCase__ : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ) -> Dict:
_a : Dict = len(self.attn_processors.keys() )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and len(__lowerCAmelCase ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(__lowerCAmelCase )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(UpperCAmelCase__ : str , UpperCAmelCase__ : torch.nn.Module , UpperCAmelCase__ : Any ):
if hasattr(__lowerCAmelCase , """set_processor""" ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
module.set_processor(__lowerCAmelCase )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , __lowerCAmelCase , __lowerCAmelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _lowercase ( self : Any ) -> Dict:
self.set_attn_processor(AttnProcessor() )
def _lowercase ( self : str , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Union[torch.Tensor, float, int] , UpperCAmelCase__ : torch.FloatTensor , UpperCAmelCase__ : Optional[torch.FloatTensor] = None , UpperCAmelCase__ : Optional[torch.BoolTensor] = None , UpperCAmelCase__ : bool = True , ) -> Any:
_a : str = hidden_states.shape[0]
_a : List[Any] = timestep
if not torch.is_tensor(__lowerCAmelCase ):
_a : Dict = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(__lowerCAmelCase ) and len(timesteps.shape ) == 0:
_a : Union[str, Any] = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_a : Union[str, Any] = timesteps * torch.ones(__lowerCAmelCase , dtype=timesteps.dtype , device=timesteps.device )
_a : Optional[Any] = self.time_proj(__lowerCAmelCase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_a : Optional[int] = timesteps_projected.to(dtype=self.dtype )
_a : Dict = self.time_embedding(__lowerCAmelCase )
if self.embedding_proj_norm is not None:
_a : Optional[Any] = self.embedding_proj_norm(__lowerCAmelCase )
_a : Dict = self.embedding_proj(__lowerCAmelCase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_a : Optional[Any] = self.encoder_hidden_states_proj(__lowerCAmelCase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""" )
_a : Any = self.proj_in(__lowerCAmelCase )
_a : List[Any] = self.positional_embedding.to(hidden_states.dtype )
_a : Optional[Any] = []
_a : int = 0
if encoder_hidden_states is not None:
additional_embeds.append(__lowerCAmelCase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_a : Optional[Any] = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_a : Dict = hidden_states[:, None, :]
_a : Union[str, Any] = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_a : Optional[Any] = self.prd_embedding.to(hidden_states.dtype ).expand(__lowerCAmelCase , -1 , -1 )
additional_embeds.append(__lowerCAmelCase )
_a : Dict = torch.cat(
__lowerCAmelCase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_a : List[Any] = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_a : List[Any] = F.pad(
__lowerCAmelCase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
_a : List[Any] = hidden_states + positional_embeddings
if attention_mask is not None:
_a : Optional[int] = (1 - attention_mask.to(hidden_states.dtype )) * -10000.0
_a : Dict = F.pad(__lowerCAmelCase , (0, self.additional_embeddings) , value=0.0 )
_a : Tuple = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_a : Optional[Any] = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
_a : int = self.norm_in(__lowerCAmelCase )
for block in self.transformer_blocks:
_a : Union[str, Any] = block(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
_a : Tuple = self.norm_out(__lowerCAmelCase )
if self.prd_embedding is not None:
_a : Any = hidden_states[:, -1]
else:
_a : Optional[int] = hidden_states[:, additional_embeddings_len:]
_a : Tuple = self.proj_to_clip_embeddings(__lowerCAmelCase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=__lowerCAmelCase )
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : str ) -> Optional[int]:
_a : Any = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 358
|
"""simple docstring"""
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
_snake_case = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
_snake_case = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
_snake_case = re.compile(r'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
_snake_case = re.compile(r'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_snake_case = re.compile(r'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
_snake_case = [
('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'),
('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'),
('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'),
('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'),
('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'),
('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'),
('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'),
('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'),
('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'),
(
'zero-shot-object-detection',
'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES',
'AutoModelForZeroShotObjectDetection',
),
('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'),
('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'),
('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'),
('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'),
(
'table-question-answering',
'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForTableQuestionAnswering',
),
('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'),
('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'),
(
'next-sentence-prediction',
'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES',
'AutoModelForNextSentencePrediction',
),
(
'audio-frame-classification',
'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForAudioFrameClassification',
),
('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'),
(
'document-question-answering',
'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForDocumentQuestionAnswering',
),
(
'visual-question-answering',
'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForVisualQuestionAnswering',
),
('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'),
(
'zero-shot-image-classification',
'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForZeroShotImageClassification',
),
('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'),
('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'),
('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'),
]
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : Dict = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , UpperCamelCase__ )
return [m.group(0 ) for m in matches]
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : Tuple = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_a : Optional[int] = {
config.replace("""Config""" , """""" ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
_a : List[Any] = collections.defaultdict(UpperCamelCase__ )
_a : List[str] = collections.defaultdict(UpperCamelCase__ )
_a : Tuple = collections.defaultdict(UpperCamelCase__ )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(UpperCamelCase__ ):
_a : str = None
if _re_tf_models.match(UpperCamelCase__ ) is not None:
_a : List[Any] = tf_models
_a : int = _re_tf_models.match(UpperCamelCase__ ).groups()[0]
elif _re_flax_models.match(UpperCamelCase__ ) is not None:
_a : Any = flax_models
_a : Any = _re_flax_models.match(UpperCamelCase__ ).groups()[0]
elif _re_pt_models.match(UpperCamelCase__ ) is not None:
_a : int = pt_models
_a : int = _re_pt_models.match(UpperCamelCase__ ).groups()[0]
if lookup_dict is not None:
while len(UpperCamelCase__ ) > 0:
if attr_name in model_prefix_to_model_type:
_a : Optional[int] = True
break
# Try again after removing the last word in the name
_a : List[Any] = """""".join(camel_case_split(UpperCamelCase__ )[:-1] )
_a : Optional[int] = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
_a : Dict = list(UpperCamelCase__ )
all_models.sort()
_a : str = {"""model_type""": all_models}
_a : List[Any] = [pt_models[t] for t in all_models]
_a : str = [tf_models[t] for t in all_models]
_a : Optional[int] = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
_a : str = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
_a : List[str] = """AutoProcessor"""
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
_a : str = """AutoTokenizer"""
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
_a : int = """AutoFeatureExtractor"""
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
_a : int = """AutoTokenizer"""
_a : Any = [processors[t] for t in all_models]
return pd.DataFrame(UpperCamelCase__ )
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : List[Any] = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
_a : List[Any] = [model_mapping, F"""TF_{model_mapping}""", F"""FLAX_{model_mapping}"""]
_a : Union[str, Any] = [auto_class, F"""TF_{auto_class}""", F"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
# The type of pipeline may not exist in this framework
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ):
continue
# First extract all model_names
_a : str = []
for name in getattr(UpperCamelCase__ , UpperCamelCase__ ).values():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
model_names.append(UpperCamelCase__ )
else:
model_names.extend(list(UpperCamelCase__ ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Dict = get_frameworks_table()
_a : Optional[Any] = Dataset.from_pandas(UpperCamelCase__ )
_a : Any = hf_hub_download(
"""huggingface/transformers-metadata""" , """pipeline_tags.json""" , repo_type="""dataset""" , token=UpperCamelCase__ )
_a : List[Any] = Dataset.from_json(UpperCamelCase__ )
_a : List[str] = {
tags_dataset[i]["""model_class"""]: (tags_dataset[i]["""pipeline_tag"""], tags_dataset[i]["""auto_class"""])
for i in range(len(UpperCamelCase__ ) )
}
_a : str = update_pipeline_and_auto_class_table(UpperCamelCase__ )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
_a : int = sorted(table.keys() )
_a : Union[str, Any] = pd.DataFrame(
{
"""model_class""": model_classes,
"""pipeline_tag""": [table[m][0] for m in model_classes],
"""auto_class""": [table[m][1] for m in model_classes],
} )
_a : Dict = Dataset.from_pandas(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(UpperCamelCase__ , """frameworks.json""" ) )
tags_dataset.to_json(os.path.join(UpperCamelCase__ , """pipeline_tags.json""" ) )
if commit_sha is not None:
_a : List[str] = (
F"""Update with commit {commit_sha}\n\nSee: """
F"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
_a : Optional[Any] = """Update"""
upload_folder(
repo_id="""huggingface/transformers-metadata""" , folder_path=UpperCamelCase__ , repo_type="""dataset""" , token=UpperCamelCase__ , commit_message=UpperCamelCase__ , )
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : List[str] = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
_a : Any = transformers_module.pipelines.SUPPORTED_TASKS
_a : List[str] = []
for key in pipeline_tasks:
if key not in in_table:
_a : Tuple = pipeline_tasks[key]["""pt"""]
if isinstance(UpperCamelCase__ , (list, tuple) ):
_a : Dict = model[0]
_a : List[str] = model.__name__
if model not in in_table.values():
missing.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
_a : Union[str, Any] = """, """.join(UpperCamelCase__ )
raise ValueError(
"""The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside """
F"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.')
parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.')
parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.')
_snake_case = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 324
| 0
|
"""simple docstring"""
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
_snake_case = {"allegro/herbert-base-cased": 514}
_snake_case = {}
class UpperCamelCase ( SCREAMING_SNAKE_CASE__ ):
UpperCamelCase : Tuple = VOCAB_FILES_NAMES
UpperCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Optional[int] = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : Union[str, Any] = HerbertTokenizer
def __init__( self : List[Any] , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : int="<s>" , UpperCAmelCase__ : Union[str, Any]="<unk>" , UpperCAmelCase__ : Optional[int]="<pad>" , UpperCAmelCase__ : Optional[int]="<mask>" , UpperCAmelCase__ : List[str]="</s>" , **UpperCAmelCase__ : Optional[int] , ) -> List[Any]:
super().__init__(
a_ , a_ , tokenizer_file=a_ , cls_token=a_ , unk_token=a_ , pad_token=a_ , mask_token=a_ , sep_token=a_ , **a_ , )
def _lowercase ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> Optional[int]:
_a : Optional[int] = [self.cls_token_id]
_a : Optional[int] = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : bool = False ) -> Union[str, Any]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
if token_ids_a is None:
return [1] + ([0] * len(a_ )) + [1]
return [1] + ([0] * len(a_ )) + [1] + ([0] * len(a_ )) + [1]
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> Union[str, Any]:
_a : Union[str, Any] = [self.sep_token_id]
_a : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self : Tuple , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Union[str, Any]:
_a : str = self._tokenizer.model.save(a_ , name=a_ )
return tuple(a_ )
| 359
|
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"""files""" , [
["""full:README.md""", """dataset_infos.json"""],
["""empty:README.md""", """dataset_infos.json"""],
["""dataset_infos.json"""],
["""full:README.md"""],
] , )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Dict = tmp_path_factory.mktemp("""dset_infos_dir""" )
if "full:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""---\ndataset_info:\n dataset_size: 42\n---""" )
if "empty:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""""" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / """dataset_infos.json""" , """w""" ) as f:
f.write("""{\"default\": {\"dataset_size\": 42}}""" )
_a : Dict = DatasetInfosDict.from_directory(UpperCamelCase__ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 4_2
@pytest.mark.parametrize(
"""dataset_info""" , [
DatasetInfo(),
DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=4_2 , ),
] , )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Optional[int] = str(UpperCamelCase__ )
dataset_info.write_to_directory(UpperCamelCase__ )
_a : Any = DatasetInfo.from_directory(UpperCamelCase__ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(UpperCamelCase__ , """dataset_info.json""" ) )
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : Dict = DatasetInfo(
description="""foo""" , citation="""bar""" , homepage="""https://foo.bar""" , license="""CC0""" , features=Features({"""a""": Value("""int32""" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train""", """num_examples""": 4_2}] , download_checksums={} , download_size=1_3_3_7 , post_processing_size=4_4_2 , dataset_size=1_2_3_4 , size_in_bytes=1_3_3_7 + 4_4_2 + 1_2_3_4 , )
_a : int = dataset_info._to_yaml_dict()
assert sorted(UpperCamelCase__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
_a : List[str] = yaml.safe_dump(UpperCamelCase__ )
_a : Optional[int] = yaml.safe_load(UpperCamelCase__ )
assert dataset_info_yaml_dict == reloaded
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : List[Any] = DatasetInfo()
_a : Any = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"""dataset_infos_dict""" , [
DatasetInfosDict(),
DatasetInfosDict({"""default""": DatasetInfo()} ),
DatasetInfosDict({"""my_config_name""": DatasetInfo()} ),
DatasetInfosDict(
{
"""default""": DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=4_2 , )
} ),
DatasetInfosDict(
{
"""v1""": DatasetInfo(dataset_size=4_2 ),
"""v2""": DatasetInfo(dataset_size=1_3_3_7 ),
} ),
] , )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : List[Any] = str(UpperCamelCase__ )
dataset_infos_dict.write_to_directory(UpperCamelCase__ )
_a : List[Any] = DatasetInfosDict.from_directory(UpperCamelCase__ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_a : str = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_a : Dict = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(UpperCamelCase__ , """README.md""" ) )
| 324
| 0
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class UpperCamelCase :
def __init__( self : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int]=13 , UpperCAmelCase__ : str=30 , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : List[Any]=3 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : str=32 , UpperCAmelCase__ : Optional[Any]=2 , UpperCAmelCase__ : Optional[Any]=4 , UpperCAmelCase__ : Any=37 , UpperCAmelCase__ : Any="gelu" , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : Union[str, Any]=10 , UpperCAmelCase__ : Tuple=0.0_2 , UpperCAmelCase__ : int=3 , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : str=2 , ) -> str:
_a : Optional[int] = parent
_a : Tuple = batch_size
_a : Dict = image_size
_a : int = patch_size
_a : Union[str, Any] = num_channels
_a : Union[str, Any] = is_training
_a : List[str] = use_labels
_a : str = hidden_size
_a : int = num_hidden_layers
_a : Optional[int] = num_attention_heads
_a : List[str] = intermediate_size
_a : Optional[int] = hidden_act
_a : Tuple = hidden_dropout_prob
_a : Optional[int] = attention_probs_dropout_prob
_a : Tuple = type_sequence_label_size
_a : Dict = initializer_range
_a : Optional[Any] = scope
_a : Optional[Any] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
_a : Optional[Any] = (image_size // patch_size) ** 2
_a : Tuple = num_patches + 2
def _lowercase ( self : Any ) -> Tuple:
_a : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Tuple = None
if self.use_labels:
_a : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a : str = self.get_config()
return config, pixel_values, labels
def _lowercase ( self : List[str] ) -> List[Any]:
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _lowercase ( self : str , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Any ) -> Dict:
_a : int = TFDeiTModel(config=__lowerCAmelCase )
_a : Union[str, Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] ) -> int:
_a : List[str] = TFDeiTForMaskedImageModeling(config=__lowerCAmelCase )
_a : Tuple = model(__lowerCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_a : Optional[Any] = 1
_a : List[Any] = TFDeiTForMaskedImageModeling(__lowerCAmelCase )
_a : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_a : Tuple = model(__lowerCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple ) -> str:
_a : str = self.type_sequence_label_size
_a : Any = TFDeiTForImageClassification(__lowerCAmelCase )
_a : List[Any] = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_a : List[str] = 1
_a : Any = TFDeiTForImageClassification(__lowerCAmelCase )
_a : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_a : List[Any] = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowercase ( self : Optional[int] ) -> Optional[Any]:
_a : Dict = self.prepare_config_and_inputs()
_a , _a , _a : int = config_and_inputs
_a : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
UpperCamelCase : List[str] = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
UpperCamelCase : Any = (
{
'feature-extraction': TFDeiTModel,
'image-classification': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
UpperCamelCase : Optional[Any] = False
UpperCamelCase : Any = False
UpperCamelCase : Union[str, Any] = False
UpperCamelCase : Tuple = False
def _lowercase ( self : str ) -> Union[str, Any]:
_a : int = TFDeiTModelTester(self )
_a : List[str] = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=37 )
def _lowercase ( self : str ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def _lowercase ( self : Union[str, Any] ) -> List[Any]:
pass
def _lowercase ( self : Optional[Any] ) -> int:
_a , _a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Any = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
_a : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase , tf.keras.layers.Dense ) )
def _lowercase ( self : List[str] ) -> Any:
_a , _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Union[str, Any] = model_class(__lowerCAmelCase )
_a : List[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : str = [*signature.parameters.keys()]
_a : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def _lowercase ( self : Any ) -> str:
_a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowercase ( self : str ) -> Any:
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCAmelCase )
def _lowercase ( self : Optional[Any] ) -> Optional[Any]:
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
def _lowercase ( self : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str]=False ) -> Union[str, Any]:
_a : Optional[Any] = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def _lowercase ( self : Any ) -> Optional[int]:
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : List[Any] = TFDeiTModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class UpperCamelCase ( unittest.TestCase ):
@cached_property
def _lowercase ( self : Union[str, Any] ) -> str:
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def _lowercase ( self : Tuple ) -> Optional[int]:
_a : Tuple = TFDeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
_a : int = self.default_image_processor
_a : List[str] = prepare_img()
_a : Optional[Any] = image_processor(images=__lowerCAmelCase , return_tensors="""tf""" )
# forward pass
_a : Any = model(**__lowerCAmelCase )
# verify the logits
_a : Union[str, Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
_a : int = tf.constant([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
| 360
|
"""simple docstring"""
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class UpperCamelCase ( unittest.TestCase , snake_case_ ):
def _lowercase ( self : int ) -> int:
_a : Optional[Any] = load_tool("""text-to-speech""" )
self.tool.setup()
def _lowercase ( self : List[str] ) -> Union[str, Any]:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
_a : str = self.tool("""hey""" )
_a : List[str] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
def _lowercase ( self : Optional[int] ) -> Optional[Any]:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
_a : int = self.tool("""hey""" )
_a : str = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
| 324
| 0
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class UpperCamelCase :
def __init__( self : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple=13 , UpperCAmelCase__ : List[str]=7 , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : str=99 , UpperCAmelCase__ : Any=32 , UpperCAmelCase__ : List[str]=5 , UpperCAmelCase__ : List[str]=4 , UpperCAmelCase__ : str=37 , UpperCAmelCase__ : Optional[Any]="gelu" , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : str=512 , UpperCAmelCase__ : Union[str, Any]=16 , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : Optional[int]=0.0_2 , UpperCAmelCase__ : List[str]=3 , UpperCAmelCase__ : int=4 , UpperCAmelCase__ : List[Any]=None , ) -> List[str]:
_a : Union[str, Any] = parent
_a : str = batch_size
_a : Union[str, Any] = seq_length
_a : Optional[int] = is_training
_a : Tuple = use_token_type_ids
_a : Tuple = use_labels
_a : int = vocab_size
_a : str = hidden_size
_a : List[str] = num_hidden_layers
_a : Dict = num_attention_heads
_a : List[Any] = intermediate_size
_a : Optional[int] = hidden_act
_a : List[Any] = hidden_dropout_prob
_a : Tuple = attention_probs_dropout_prob
_a : Dict = max_position_embeddings
_a : List[str] = type_vocab_size
_a : str = type_sequence_label_size
_a : List[Any] = initializer_range
_a : List[Any] = num_labels
_a : Optional[Any] = num_choices
_a : Tuple = scope
_a : Optional[int] = self.vocab_size - 1
def _lowercase ( self : int ) -> Any:
_a : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a : Dict = None
if self.use_token_type_ids:
_a : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a : Any = None
_a : Dict = None
_a : List[str] = None
if self.use_labels:
_a : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
_a : Optional[int] = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
_a : Union[str, Any] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _lowercase ( self : str , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , *UpperCAmelCase__ : Tuple ) -> str:
_a : Union[str, Any] = OpenAIGPTModel(config=_snake_case )
model.to(_snake_case )
model.eval()
_a : int = model(_snake_case , token_type_ids=_snake_case , head_mask=_snake_case )
_a : str = model(_snake_case , token_type_ids=_snake_case )
_a : str = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple , *UpperCAmelCase__ : Tuple ) -> List[Any]:
_a : int = OpenAIGPTLMHeadModel(_snake_case )
model.to(_snake_case )
model.eval()
_a : Tuple = model(_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , *UpperCAmelCase__ : Optional[Any] ) -> Dict:
_a : Dict = OpenAIGPTDoubleHeadsModel(_snake_case )
model.to(_snake_case )
model.eval()
_a : Dict = model(_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , *UpperCAmelCase__ : Tuple ) -> Dict:
_a : List[str] = self.num_labels
_a : Dict = OpenAIGPTForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
_a : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a : str = model(_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : Any ) -> List[str]:
_a : List[str] = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) : Optional[int] = config_and_inputs
_a : int = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_torch
class UpperCamelCase ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
UpperCamelCase : int = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
UpperCamelCase : Optional[Any] = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
UpperCamelCase : str = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _lowercase ( self : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] ) -> Optional[Any]:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _lowercase ( self : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any=False ) -> Union[str, Any]:
_a : List[str] = super()._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
_a : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=_snake_case , )
_a : Dict = inputs_dict["""labels"""]
_a : Optional[int] = inputs_dict["""labels"""]
_a : str = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=_snake_case , )
_a : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_snake_case )
return inputs_dict
def _lowercase ( self : Union[str, Any] ) -> int:
_a : Optional[Any] = OpenAIGPTModelTester(self )
_a : int = ConfigTester(self , config_class=_snake_case , n_embd=37 )
def _lowercase ( self : Dict ) -> int:
self.config_tester.run_common_tests()
def _lowercase ( self : List[str] ) -> List[str]:
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*_snake_case )
def _lowercase ( self : Any ) -> Tuple:
_a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_snake_case )
def _lowercase ( self : List[Any] ) -> str:
_a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*_snake_case )
def _lowercase ( self : List[Any] ) -> List[Any]:
_a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_snake_case )
@slow
def _lowercase ( self : List[Any] ) -> str:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : List[Any] = OpenAIGPTModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def _lowercase ( self : Optional[int] ) -> Dict:
_a : Tuple = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" )
model.to(_snake_case )
_a : List[str] = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=_snake_case ) # the president is
_a : List[str] = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
_a : List[str] = model.generate(_snake_case , do_sample=_snake_case )
self.assertListEqual(output_ids[0].tolist() , _snake_case )
| 361
|
"""simple docstring"""
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class UpperCamelCase ( snake_case_ ):
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[str]=None , **UpperCAmelCase__ : str ) -> int:
_a : str = parent
_a : Union[str, Any] = config_class
_a : List[Any] = has_text_modality
_a : List[Any] = kwargs
_a : List[Any] = common_properties
def _lowercase ( self : int ) -> Tuple:
_a : List[str] = self.config_class(**self.inputs_dict )
_a : Dict = (
["""hidden_size""", """num_attention_heads""", """num_hidden_layers"""]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["""vocab_size"""] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(UpperCAmelCase__ , UpperCAmelCase__ ) , msg=f"""`{prop}` does not exist""" )
# Test that config has the common properties as setter
for idx, name in enumerate(UpperCAmelCase__ ):
try:
setattr(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
self.parent.assertEqual(
getattr(UpperCAmelCase__ , UpperCAmelCase__ ) , UpperCAmelCase__ , msg=f"""`{name} value {idx} expected, but was {getattr(UpperCAmelCase__ , UpperCAmelCase__ )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(UpperCAmelCase__ ):
try:
_a : Optional[int] = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(UpperCAmelCase__ , UpperCAmelCase__ ) , UpperCAmelCase__ , msg=f"""`{name} value {idx} expected, but was {getattr(UpperCAmelCase__ , UpperCAmelCase__ )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def _lowercase ( self : Optional[int] ) -> Optional[Any]:
_a : Optional[Any] = self.config_class(**self.inputs_dict )
_a : List[str] = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , UpperCAmelCase__ )
def _lowercase ( self : int ) -> List[str]:
_a : Optional[Any] = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Tuple = os.path.join(UpperCAmelCase__ , """config.json""" )
config_first.to_json_file(UpperCAmelCase__ )
_a : List[str] = self.config_class.from_json_file(UpperCAmelCase__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _lowercase ( self : Union[str, Any] ) -> Dict:
_a : Dict = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(UpperCAmelCase__ )
_a : Dict = self.config_class.from_pretrained(UpperCAmelCase__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _lowercase ( self : Dict ) -> Tuple:
_a : List[Any] = self.config_class(**self.inputs_dict )
_a : Any = """test"""
with tempfile.TemporaryDirectory() as tmpdirname:
_a : List[Any] = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
config_first.save_pretrained(UpperCAmelCase__ )
_a : List[Any] = self.config_class.from_pretrained(UpperCAmelCase__ , subfolder=UpperCAmelCase__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _lowercase ( self : List[str] ) -> Union[str, Any]:
_a : Tuple = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
_a : Union[str, Any] = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def _lowercase ( self : Tuple ) -> List[str]:
if self.config_class.is_composition:
return
_a : str = self.config_class()
self.parent.assertIsNotNone(UpperCAmelCase__ )
def _lowercase ( self : List[Any] ) -> Optional[Any]:
_a : Dict = copy.deepcopy(UpperCAmelCase__ )
_a : Any = self.config_class(**UpperCAmelCase__ )
_a : str = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("""torch_dtype""", config.torch_dtype, torch.floataa) )
elif getattr(UpperCAmelCase__ , UpperCAmelCase__ ) != value:
wrong_values.append((key, getattr(UpperCAmelCase__ , UpperCAmelCase__ ), value) )
if len(UpperCAmelCase__ ) > 0:
_a : List[Any] = """\n""".join([f"""- {v[0]}: got {v[1]} instead of {v[2]}""" for v in wrong_values] )
raise ValueError(f"""The following keys were not properly set in the config:\n{errors}""" )
def _lowercase ( self : int ) -> Union[str, Any]:
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 324
| 0
|
"""simple docstring"""
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
return "\n".join(
F"""{number} * {i} = {number * i}""" for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 362
|
"""simple docstring"""
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
_snake_case = HUGGINGFACE_HUB_CACHE
_snake_case = 'config.json'
_snake_case = 'diffusion_pytorch_model.bin'
_snake_case = 'diffusion_flax_model.msgpack'
_snake_case = 'model.onnx'
_snake_case = 'diffusion_pytorch_model.safetensors'
_snake_case = 'weights.pb'
_snake_case = 'https://huggingface.co'
_snake_case = default_cache_path
_snake_case = 'diffusers_modules'
_snake_case = os.getenv('HF_MODULES_CACHE', os.path.join(hf_cache_home, 'modules'))
_snake_case = ['fp16', 'non-ema']
_snake_case = '.self_attn'
| 324
| 0
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase ( snake_case_ , unittest.TestCase ):
UpperCamelCase : List[Any] = BlenderbotSmallTokenizer
UpperCamelCase : Optional[int] = False
def _lowercase ( self : Optional[int] ) -> Optional[int]:
super().setUp()
_a : Dict = ["""__start__""", """adapt""", """act""", """ap@@""", """te""", """__end__""", """__unk__"""]
_a : Any = dict(zip(_a , range(len(_a ) ) ) )
_a : Optional[Any] = ["""#version: 0.2""", """a p""", """t e</w>""", """ap t</w>""", """a d""", """ad apt</w>""", """a c""", """ac t</w>""", """"""]
_a : Optional[int] = {"""unk_token""": """__unk__""", """bos_token""": """__start__""", """eos_token""": """__end__"""}
_a : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_a : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_a ) )
def _lowercase ( self : Optional[int] , **UpperCAmelCase__ : Union[str, Any] ) -> int:
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **_a )
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : str ) -> List[Any]:
_a : Any = """adapt act apte"""
_a : List[str] = """adapt act apte"""
return input_text, output_text
def _lowercase ( self : int ) -> Optional[Any]:
_a : Any = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_a : List[Any] = """adapt act apte"""
_a : Union[str, Any] = ["""adapt""", """act""", """ap@@""", """te"""]
_a : List[Any] = tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
_a : Any = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
_a : Any = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a )
def _lowercase ( self : Optional[int] ) -> Tuple:
_a : int = BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" )
assert tok("""sam""" ).input_ids == [1384]
_a : str = """I am a small frog."""
_a : int = tok([src_text] , padding=_a , truncation=_a )["""input_ids"""]
_a : Optional[int] = tok.batch_decode(_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def _lowercase ( self : str ) -> List[str]:
_a : Dict = BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" )
_a : Union[str, Any] = """I am a small frog ."""
_a : Dict = """."""
_a : int = tok(_a )["""input_ids"""]
_a : Tuple = tok(_a )["""input_ids"""]
assert encoded[-1] == encoded_dot[0]
| 363
|
"""simple docstring"""
from math import factorial
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if successes > trials:
raise ValueError("""successes must be lower or equal to trials""" )
if trials < 0 or successes < 0:
raise ValueError("""the function is defined for non-negative integers""" )
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError("""the function is defined for non-negative integers""" )
if not 0 < prob < 1:
raise ValueError("""prob has to be in range of 1 - 0""" )
_a : Optional[int] = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
_a : Optional[int] = float(factorial(UpperCamelCase__ ) )
coefficient /= factorial(UpperCamelCase__ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('Probability of 2 successes out of 4 trails')
print('with probability of 0.75 is:', end=' ')
print(binomial_distribution(2, 4, 0.75))
| 324
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''',
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE__ ):
UpperCamelCase : List[Any] = 'mgp-str'
def __init__( self : List[Any] , UpperCAmelCase__ : Optional[int]=[32, 128] , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : Optional[int]=27 , UpperCAmelCase__ : int=38 , UpperCAmelCase__ : Tuple=50257 , UpperCAmelCase__ : str=30522 , UpperCAmelCase__ : List[Any]=768 , UpperCAmelCase__ : Union[str, Any]=12 , UpperCAmelCase__ : Union[str, Any]=12 , UpperCAmelCase__ : List[str]=4.0 , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : str=1E-5 , UpperCAmelCase__ : Dict=0.0 , UpperCAmelCase__ : Dict=0.0 , UpperCAmelCase__ : Optional[Any]=0.0 , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Tuple=0.0_2 , **UpperCAmelCase__ : str , ) -> int:
super().__init__(**_SCREAMING_SNAKE_CASE )
_a : List[Any] = image_size
_a : str = patch_size
_a : List[Any] = num_channels
_a : str = max_token_length
_a : str = num_character_labels
_a : Optional[int] = num_bpe_labels
_a : Union[str, Any] = num_wordpiece_labels
_a : List[str] = hidden_size
_a : Any = num_hidden_layers
_a : Dict = num_attention_heads
_a : str = mlp_ratio
_a : List[Any] = distilled
_a : int = layer_norm_eps
_a : str = drop_rate
_a : Dict = qkv_bias
_a : int = attn_drop_rate
_a : Union[str, Any] = drop_path_rate
_a : Union[str, Any] = output_aa_attentions
_a : Tuple = initializer_range
| 364
|
"""simple docstring"""
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a , _a : Dict = len(UpperCamelCase__ ), len(grid[0] )
if (
min(UpperCamelCase__ , UpperCamelCase__ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
_a : Any = 0
count += depth_first_search(UpperCamelCase__ , row + 1 , UpperCamelCase__ , UpperCamelCase__ )
count += depth_first_search(UpperCamelCase__ , row - 1 , UpperCamelCase__ , UpperCamelCase__ )
count += depth_first_search(UpperCamelCase__ , UpperCamelCase__ , col + 1 , UpperCamelCase__ )
count += depth_first_search(UpperCamelCase__ , UpperCamelCase__ , col - 1 , UpperCamelCase__ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 324
| 0
|
"""simple docstring"""
import math
class UpperCamelCase :
def __init__( self : Any , UpperCAmelCase__ : Optional[int]=0 ) -> str: # a graph with Node 0,1,...,N-1
_a : Optional[int] = n
_a : Optional[int] = [
[math.inf for j in range(0 , UpperCAmelCase_ )] for i in range(0 , UpperCAmelCase_ )
] # adjacency matrix for weight
_a : str = [
[math.inf for j in range(0 , UpperCAmelCase_ )] for i in range(0 , UpperCAmelCase_ )
] # dp[i][j] stores minimum distance from i to j
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str ) -> Any:
_a : str = w
def _lowercase ( self : Union[str, Any] ) -> Dict:
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
_a : int = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def _lowercase ( self : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] ) -> Dict:
return self.dp[u][v]
if __name__ == "__main__":
_snake_case = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 365
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_snake_case = {
'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['VisionEncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['TFVisionEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['FlaxVisionEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 324
| 0
|
from sklearn.metrics import recall_score
import datasets
_snake_case = '\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n'
_snake_case = '\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n - `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {\'recall\': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {\'recall\': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {\'recall\': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'recall\': array([1., 0., 0.])}\n'
_snake_case = '\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
def _lowercase ( self : Optional[int] ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , )
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int=None , UpperCAmelCase__ : str=1 , UpperCAmelCase__ : Optional[int]="binary" , UpperCAmelCase__ : int=None , UpperCAmelCase__ : List[str]="warn" , ) -> Optional[int]:
_a : List[Any] = recall_score(
lowerCAmelCase__ , lowerCAmelCase__ , labels=lowerCAmelCase__ , pos_label=lowerCAmelCase__ , average=lowerCAmelCase__ , sample_weight=lowerCAmelCase__ , zero_division=lowerCAmelCase__ , )
return {"recall": float(lowerCAmelCase__ ) if score.size == 1 else score}
| 366
|
"""simple docstring"""
from __future__ import annotations
import time
_snake_case = list[tuple[int, int]]
_snake_case = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_snake_case = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class UpperCamelCase :
def __init__( self : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Node | None ) -> List[str]:
_a : int = pos_x
_a : Union[str, Any] = pos_y
_a : Tuple = (pos_y, pos_x)
_a : Tuple = goal_x
_a : int = goal_y
_a : str = parent
class UpperCamelCase :
def __init__( self : List[Any] , UpperCAmelCase__ : tuple[int, int] , UpperCAmelCase__ : tuple[int, int] ) -> List[str]:
_a : List[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , UpperCAmelCase__ )
_a : List[str] = Node(goal[1] , goal[0] , goal[1] , goal[0] , UpperCAmelCase__ )
_a : Optional[int] = [self.start]
_a : Tuple = False
def _lowercase ( self : str ) -> Path | None:
while self.node_queue:
_a : Tuple = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
_a : Dict = True
return self.retrace_path(UpperCAmelCase__ )
_a : Tuple = self.get_successors(UpperCAmelCase__ )
for node in successors:
self.node_queue.append(UpperCAmelCase__ )
if not self.reached:
return [self.start.pos]
return None
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : Node ) -> list[Node]:
_a : Optional[Any] = []
for action in delta:
_a : str = parent.pos_x + action[1]
_a : List[Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(UpperCAmelCase__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(UpperCAmelCase__ , UpperCAmelCase__ , self.target.pos_y , self.target.pos_x , UpperCAmelCase__ ) )
return successors
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Node | None ) -> Path:
_a : Dict = node
_a : List[str] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_a : Any = current_node.parent
path.reverse()
return path
class UpperCamelCase :
def __init__( self : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] ) -> Any:
_a : Dict = BreadthFirstSearch(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Optional[int] = BreadthFirstSearch(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Dict = False
def _lowercase ( self : Any ) -> Path | None:
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
_a : List[Any] = self.fwd_bfs.node_queue.pop(0 )
_a : Union[str, Any] = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
_a : Optional[int] = True
return self.retrace_bidirectional_path(
UpperCAmelCase__ , UpperCAmelCase__ )
_a : List[str] = current_bwd_node
_a : int = current_fwd_node
_a : Optional[Any] = {
self.fwd_bfs: self.fwd_bfs.get_successors(UpperCAmelCase__ ),
self.bwd_bfs: self.bwd_bfs.get_successors(UpperCAmelCase__ ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(UpperCAmelCase__ )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : Node , UpperCAmelCase__ : Node ) -> Path:
_a : str = self.fwd_bfs.retrace_path(UpperCAmelCase__ )
_a : List[Any] = self.bwd_bfs.retrace_path(UpperCAmelCase__ )
bwd_path.pop()
bwd_path.reverse()
_a : Tuple = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
_snake_case = (0, 0)
_snake_case = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_snake_case = time.time()
_snake_case = BreadthFirstSearch(init, goal)
_snake_case = bfs.search()
_snake_case = time.time() - start_bfs_time
print('Unidirectional BFS computation time : ', bfs_time)
_snake_case = time.time()
_snake_case = BidirectionalBreadthFirstSearch(init, goal)
_snake_case = bd_bfs.search()
_snake_case = time.time() - start_bd_bfs_time
print('Bidirectional BFS computation time : ', bd_bfs_time)
| 324
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/config.json',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/config.json',
'funnel-transformer/medium-base': 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json',
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/config.json',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json',
'funnel-transformer/xlarge-base': 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json',
}
class UpperCamelCase ( UpperCamelCase__ ):
UpperCamelCase : Union[str, Any] = '''funnel'''
UpperCamelCase : int = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
}
def __init__( self : Tuple , UpperCAmelCase__ : Union[str, Any]=30522 , UpperCAmelCase__ : Optional[int]=[4, 4, 4] , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : Dict=768 , UpperCAmelCase__ : Tuple=12 , UpperCAmelCase__ : Optional[int]=64 , UpperCAmelCase__ : Tuple=3072 , UpperCAmelCase__ : int="gelu_new" , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Union[str, Any]=1E-9 , UpperCAmelCase__ : Any="mean" , UpperCAmelCase__ : Optional[int]="relative_shift" , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Dict=True , **UpperCAmelCase__ : Union[str, Any] , ) -> str:
_a : List[Any] = vocab_size
_a : int = block_sizes
_a : Dict = [1] * len(__lowerCamelCase ) if block_repeats is None else block_repeats
assert len(__lowerCamelCase ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
_a : Tuple = num_decoder_layers
_a : Dict = d_model
_a : Tuple = n_head
_a : int = d_head
_a : List[Any] = d_inner
_a : str = hidden_act
_a : Dict = hidden_dropout
_a : Optional[int] = attention_dropout
_a : List[Any] = activation_dropout
_a : str = initializer_range
_a : Optional[Any] = initializer_std
_a : Tuple = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], f"""Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."""
_a : str = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], f"""Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."""
_a : str = attention_type
_a : Union[str, Any] = separate_cls
_a : Union[str, Any] = truncate_seq
_a : List[Any] = pool_q_only
super().__init__(**__lowerCamelCase )
@property
def _lowercase ( self : str ) -> str:
return sum(self.block_sizes )
@num_hidden_layers.setter
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : str ) -> Optional[Any]:
raise NotImplementedError(
"""This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.""" )
@property
def _lowercase ( self : Any ) -> Tuple:
return len(self.block_sizes )
@num_blocks.setter
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : int ) -> str:
raise NotImplementedError("""This model does not support the setting of `num_blocks`. Please set `block_sizes`.""" )
| 367
|
"""simple docstring"""
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_snake_case = logging.getLogger(__name__)
_snake_case = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
_snake_case = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCamelCase :
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Leave None if you want to train a model from'''
''' scratch.'''
)
} , )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(snake_case_ )} , )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class UpperCamelCase :
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''The input training data file (a text file).'''} )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={
'''help''': (
'''The input training data files (multiple files in glob format). '''
'''Very often splitting large files to smaller files can prevent tokenizer going out of memory'''
)
} , )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''An optional input train ref data file for whole word mask in Chinese.'''} , )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''An optional input eval ref data file for whole word mask in Chinese.'''} , )
UpperCamelCase : bool = field(
default=snake_case_ , metadata={'''help''': '''Whether distinct lines of text in the dataset are to be handled as distinct sequences.'''} , )
UpperCamelCase : bool = field(
default=snake_case_ , metadata={'''help''': '''Train with masked-language modeling loss instead of language modeling.'''} )
UpperCamelCase : bool = field(default=snake_case_ , metadata={'''help''': '''Whether ot not to use whole word mask.'''} )
UpperCamelCase : float = field(
default=0.1_5 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
UpperCamelCase : float = field(
default=1 / 6 , metadata={
'''help''': (
'''Ratio of length of a span of masked tokens to surrounding context length for permutation language'''
''' modeling.'''
)
} , )
UpperCamelCase : int = field(
default=5 , metadata={'''help''': '''Maximum length of a span of masked tokens for permutation language modeling.'''} )
UpperCamelCase : int = field(
default=-1 , metadata={
'''help''': (
'''Optional input sequence length after tokenization.'''
'''The training dataset will be truncated in block of this size for training.'''
'''Default to the model max input length for single sentence inputs (take into account special tokens).'''
)
} , )
UpperCamelCase : bool = field(
default=snake_case_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False , UpperCamelCase__ = None , ):
'''simple docstring'''
def _dataset(UpperCamelCase__ , UpperCamelCase__=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("""You need to set world whole masking and mlm to True for Chinese Whole Word Mask""" )
return LineByLineWithRefDataset(
tokenizer=UpperCamelCase__ , file_path=UpperCamelCase__ , block_size=args.block_size , ref_path=UpperCamelCase__ , )
return LineByLineTextDataset(tokenizer=UpperCamelCase__ , file_path=UpperCamelCase__ , block_size=args.block_size )
else:
return TextDataset(
tokenizer=UpperCamelCase__ , file_path=UpperCamelCase__ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=UpperCamelCase__ , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(UpperCamelCase__ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def lowerCAmelCase__ ( ):
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_a : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_a , _a , _a : List[str] = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"""Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file """
"""or remove the --do_eval argument.""" )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , UpperCamelCase__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
_a : str = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
_a : Union[str, Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
_a : str = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.tokenizer_name:
_a : List[str] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
_a : Union[str, Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"""
""" script, save it,and load it from here, using --tokenizer_name""" )
if model_args.model_name_or_path:
_a : Optional[Any] = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , )
else:
logger.info("""Training new model from scratch""" )
_a : List[Any] = AutoModelWithLMHead.from_config(UpperCamelCase__ )
model.resize_token_embeddings(len(UpperCamelCase__ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"""BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"""
"""--mlm flag (masked language modeling).""" )
if data_args.block_size <= 0:
_a : int = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
_a : Optional[Any] = min(data_args.block_size , tokenizer.max_len )
# Get datasets
_a : Optional[Any] = (
get_dataset(UpperCamelCase__ , tokenizer=UpperCamelCase__ , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
_a : Optional[int] = (
get_dataset(UpperCamelCase__ , tokenizer=UpperCamelCase__ , evaluate=UpperCamelCase__ , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
_a : Any = DataCollatorForPermutationLanguageModeling(
tokenizer=UpperCamelCase__ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
_a : Union[str, Any] = DataCollatorForWholeWordMask(
tokenizer=UpperCamelCase__ , mlm_probability=data_args.mlm_probability )
else:
_a : str = DataCollatorForLanguageModeling(
tokenizer=UpperCamelCase__ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
_a : Union[str, Any] = Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , data_collator=UpperCamelCase__ , train_dataset=UpperCamelCase__ , eval_dataset=UpperCamelCase__ , prediction_loss_only=UpperCamelCase__ , )
# Training
if training_args.do_train:
_a : Optional[Any] = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=UpperCamelCase__ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_a : Union[str, Any] = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_a : int = trainer.evaluate()
_a : Dict = math.exp(eval_output["""eval_loss"""] )
_a : Union[str, Any] = {"""perplexity""": perplexity}
_a : Optional[Any] = os.path.join(training_args.output_dir , """eval_results_lm.txt""" )
if trainer.is_world_master():
with open(UpperCamelCase__ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""" , UpperCamelCase__ , str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
results.update(UpperCamelCase__ )
return results
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 324
| 0
|
"""simple docstring"""
import os
import string
import sys
_snake_case = 1 << 8
_snake_case = {
'tab': ord('\t'),
'newline': ord('\r'),
'esc': 27,
'up': 65 + ARROW_KEY_FLAG,
'down': 66 + ARROW_KEY_FLAG,
'right': 67 + ARROW_KEY_FLAG,
'left': 68 + ARROW_KEY_FLAG,
'mod_int': 91,
'undefined': sys.maxsize,
'interrupt': 3,
'insert': 50,
'delete': 51,
'pg_up': 53,
'pg_down': 54,
}
_snake_case = KEYMAP['up']
_snake_case = KEYMAP['left']
if sys.platform == "win32":
_snake_case = []
_snake_case = {
b'\xe0H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\x00H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\xe0P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\x00P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\xe0M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\x00M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\xe0K': KEYMAP['left'] - ARROW_KEY_FLAG,
b'\x00K': KEYMAP['left'] - ARROW_KEY_FLAG,
}
for i in range(10):
_snake_case = ord(str(i))
def lowerCAmelCase__ ( ):
'''simple docstring'''
if os.name == "nt":
import msvcrt
_a : Optional[Any] = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(__lowerCamelCase ) == 0:
# Read the keystroke
_a : Optional[Any] = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
_a : Tuple = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
_a : Optional[int] = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(__lowerCamelCase )
if ord(__lowerCamelCase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_2_6 ) )
_a : str = chr(KEYMAP["""esc"""] )
except KeyError:
_a : Tuple = cha[1]
else:
_a : List[str] = ch.decode(__lowerCamelCase )
else:
_a : Dict = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
_a : Any = sys.stdin.fileno()
_a : Optional[Any] = termios.tcgetattr(__lowerCamelCase )
try:
tty.setraw(__lowerCamelCase )
_a : str = sys.stdin.read(1 )
finally:
termios.tcsetattr(__lowerCamelCase , termios.TCSADRAIN , __lowerCamelCase )
return ch
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : List[Any] = get_raw_chars()
if ord(__lowerCamelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(__lowerCamelCase ) == KEYMAP["esc"]:
_a : str = get_raw_chars()
if ord(__lowerCamelCase ) == KEYMAP["mod_int"]:
_a : Optional[Any] = get_raw_chars()
if ord(__lowerCamelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(__lowerCamelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(__lowerCamelCase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 368
|
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
_snake_case = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['memory_attention', 'encoder_attn'],
['attention', 'attn'],
['/', '.'],
['.LayerNorm.gamma', '_layer_norm.weight'],
['.LayerNorm.beta', '_layer_norm.bias'],
['r.layer_', 'r.layers.'],
['output_proj', 'out_proj'],
['ffn.dense_1.', 'fc2.'],
['ffn.dense.', 'fc1.'],
['ffn_layer_norm', 'final_layer_norm'],
['kernel', 'weight'],
['encoder_layer_norm.', 'encoder.layer_norm.'],
['decoder_layer_norm.', 'decoder.layer_norm.'],
['embeddings.weights', 'shared.weight'],
]
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
_a : Optional[Any] = k.replace(UpperCamelCase__ , UpperCamelCase__ )
return k
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Union[str, Any] = DEFAULTS.copy()
cfg_kwargs.update(UpperCamelCase__ )
_a : Optional[Any] = PegasusConfig(**UpperCamelCase__ )
_a : Tuple = PegasusForConditionalGeneration(UpperCamelCase__ )
_a : str = torch_model.model.state_dict()
_a : Union[str, Any] = {}
for k, v in tf_weights.items():
_a : Any = rename_state_dict_key(UpperCamelCase__ )
if new_k not in sd:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
_a : str = v.T
_a : int = torch.tensor(UpperCamelCase__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
_a : Union[str, Any] = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
_a : str = mapping["""shared.weight"""]
_a : Union[str, Any] = mapping["""shared.weight"""]
_a : Optional[Any] = {k: torch.zeros_like(UpperCamelCase__ ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**UpperCamelCase__ )
_a , _a : int = torch_model.model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
_a : Optional[Any] = [
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def lowerCAmelCase__ ( UpperCamelCase__="./ckpt/aeslc/model.ckpt-32000" ):
'''simple docstring'''
_a : List[Any] = tf.train.list_variables(UpperCamelCase__ )
_a : Optional[int] = {}
_a : Dict = ["""Adafactor""", """global_step"""]
for name, shape in tqdm(UpperCamelCase__ , desc="""converting tf checkpoint to dict""" ):
_a : Optional[Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
_a : str = tf.train.load_variable(UpperCamelCase__ , UpperCamelCase__ )
_a : int = array
return tf_weights
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
# save tokenizer first
_a : Dict = Path(UpperCamelCase__ ).parent.name
_a : Optional[Any] = task_specific_params[F"""summarization_{dataset}"""]["""max_position_embeddings"""]
_a : Tuple = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=UpperCamelCase__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(UpperCamelCase__ )
# convert model
_a : List[Any] = get_tf_weights_as_numpy(UpperCamelCase__ )
_a : Dict = task_specific_params[F"""summarization_{dataset}"""]
if dataset == "large":
_a : Tuple = task_specific_params
_a : Optional[int] = convert_pegasus(UpperCamelCase__ , UpperCamelCase__ )
torch_model.save_pretrained(UpperCamelCase__ )
_a : Dict = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(UpperCamelCase__ , Path(UpperCamelCase__ ) / """pytorch_model.bin""" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument('tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('save_dir', default=None, type=str, help='Path to the output PyTorch model.')
_snake_case = parser.parse_args()
if args.save_dir is None:
_snake_case = Path(args.tf_ckpt_path).parent.name
_snake_case = os.path.join('pegasus', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 324
| 0
|
"""simple docstring"""
import random
from typing import Any
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
for _ in range(len(_snake_case ) ):
_a : Any = random.randint(0 , len(_snake_case ) - 1 )
_a : Optional[Any] = random.randint(0 , len(_snake_case ) - 1 )
_a : Optional[int] = data[b], data[a]
return data
if __name__ == "__main__":
_snake_case = [0, 1, 2, 3, 4, 5, 6, 7]
_snake_case = ["python", "says", "hello", "!"]
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 369
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
UpperCamelCase : Union[str, Any] = StableDiffusionXLImgaImgPipeline
UpperCamelCase : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
UpperCamelCase : Tuple = PipelineTesterMixin.required_optional_params - {'''latents'''}
UpperCamelCase : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _lowercase ( self : Any ) -> List[Any]:
torch.manual_seed(0 )
_a : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=UpperCAmelCase__ , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
_a : Union[str, Any] = EulerDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , )
torch.manual_seed(0 )
_a : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_a : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=32 , )
_a : Tuple = CLIPTextModel(UpperCAmelCase__ )
_a : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=UpperCAmelCase__ )
_a : Dict = CLIPTextModelWithProjection(UpperCAmelCase__ )
_a : Dict = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=UpperCAmelCase__ )
_a : Any = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int=0 ) -> int:
_a : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
_a : Any = image / 2 + 0.5
if str(UpperCAmelCase__ ).startswith("""mps""" ):
_a : Any = torch.manual_seed(UpperCAmelCase__ )
else:
_a : Tuple = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
_a : Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.7_5,
}
return inputs
def _lowercase ( self : Any ) -> List[Any]:
_a : Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_a : Dict = self.get_dummy_components()
_a : List[Any] = StableDiffusionXLImgaImgPipeline(**UpperCAmelCase__ )
_a : Union[str, Any] = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_a : List[str] = self.get_dummy_inputs(UpperCAmelCase__ )
_a : List[str] = sd_pipe(**UpperCAmelCase__ ).images
_a : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_a : List[str] = np.array([0.4_6_5_6, 0.4_8_4_0, 0.4_4_3_9, 0.6_6_9_8, 0.5_5_7_4, 0.4_5_2_4, 0.5_7_9_9, 0.5_9_4_3, 0.5_1_6_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _lowercase ( self : Any ) -> Any:
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def _lowercase ( self : List[Any] ) -> Optional[Any]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def _lowercase ( self : Any ) -> Any:
pass
def _lowercase ( self : Tuple ) -> Union[str, Any]:
_a : int = self.get_dummy_components()
_a : Any = StableDiffusionXLImgaImgPipeline(**UpperCAmelCase__ )
_a : Dict = sd_pipe.to(UpperCAmelCase__ )
_a : List[str] = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
# forward without prompt embeds
_a : int = self.get_dummy_inputs(UpperCAmelCase__ )
_a : List[str] = 3 * ["""this is a negative prompt"""]
_a : Dict = negative_prompt
_a : Dict = 3 * [inputs["""prompt"""]]
_a : Optional[Any] = sd_pipe(**UpperCAmelCase__ )
_a : Tuple = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
_a : int = self.get_dummy_inputs(UpperCAmelCase__ )
_a : Union[str, Any] = 3 * ["""this is a negative prompt"""]
_a : int = 3 * [inputs.pop("""prompt""" )]
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) : List[str] = sd_pipe.encode_prompt(UpperCAmelCase__ , negative_prompt=UpperCAmelCase__ )
_a : Tuple = sd_pipe(
**UpperCAmelCase__ , prompt_embeds=UpperCAmelCase__ , negative_prompt_embeds=UpperCAmelCase__ , pooled_prompt_embeds=UpperCAmelCase__ , negative_pooled_prompt_embeds=UpperCAmelCase__ , )
_a : Dict = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : List[str] ) -> Union[str, Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : str="cpu" , UpperCAmelCase__ : str=torch.floataa , UpperCAmelCase__ : List[Any]=0 ) -> List[str]:
_a : List[str] = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
_a : Union[str, Any] = np.random.RandomState(UpperCAmelCase__ ).standard_normal((1, 4, 64, 64) )
_a : List[Any] = torch.from_numpy(UpperCAmelCase__ ).to(device=UpperCAmelCase__ , dtype=UpperCAmelCase__ )
_a : Any = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _lowercase ( self : int ) -> Union[str, Any]:
_a : Union[str, Any] = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_a : List[str] = self.get_inputs(UpperCAmelCase__ )
_a : Tuple = pipe(**UpperCAmelCase__ ).images
_a : List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_a : int = np.array([0.4_9_4_9_3, 0.4_7_8_9_6, 0.4_0_7_9_8, 0.5_4_2_1_4, 0.5_3_2_1_2, 0.4_8_2_0_2, 0.4_7_6_5_6, 0.4_6_3_2_9, 0.4_8_5_0_6] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 324
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
_snake_case = None
_snake_case = logging.get_logger(__name__)
_snake_case = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
_snake_case = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
},
'tokenizer_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json',
},
}
_snake_case = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
_snake_case = '▁'
class UpperCamelCase ( _a ):
UpperCamelCase : Tuple = VOCAB_FILES_NAMES
UpperCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : List[Any] = AlbertTokenizer
def __init__( self : List[Any] , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : str=False , UpperCAmelCase__ : List[Any]="[CLS]" , UpperCAmelCase__ : int="[SEP]" , UpperCAmelCase__ : Dict="<unk>" , UpperCAmelCase__ : int="[SEP]" , UpperCAmelCase__ : List[Any]="<pad>" , UpperCAmelCase__ : Tuple="[CLS]" , UpperCAmelCase__ : int="[MASK]" , **UpperCAmelCase__ : Dict , ) -> Optional[int]:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_a : List[Any] = (
AddedToken(_a , lstrip=_a , rstrip=_a , normalized=_a )
if isinstance(_a , _a )
else mask_token
)
super().__init__(
_a , tokenizer_file=_a , do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , **_a , )
_a : str = do_lower_case
_a : int = remove_space
_a : Union[str, Any] = keep_accents
_a : Any = vocab_file
_a : Optional[Any] = False if not self.vocab_file else True
def _lowercase ( self : str , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> int:
_a : Union[str, Any] = [self.sep_token_id]
_a : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> Tuple:
_a : str = [self.sep_token_id]
_a : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self : Tuple , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> List[Any]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a : str = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 370
|
"""simple docstring"""
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger()
@dataclass
class UpperCamelCase :
UpperCamelCase : nn.Module
UpperCamelCase : List[nn.Module] = field(default_factory=snake_case_ )
UpperCamelCase : list = field(default_factory=snake_case_ )
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tensor , UpperCAmelCase__ : Tensor ) -> Any:
_a : int = len(list(m.modules() ) ) == 1 or isinstance(UpperCAmelCase__ , nn.Convad ) or isinstance(UpperCAmelCase__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(UpperCAmelCase__ )
def __call__( self : Tuple , UpperCAmelCase__ : Tensor ) -> Tuple:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(UpperCAmelCase__ )
[x.remove() for x in self.handles]
return self
@property
def _lowercase ( self : Optional[int] ) -> int:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda UpperCAmelCase__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class UpperCamelCase :
UpperCamelCase : nn.Module
UpperCamelCase : nn.Module
UpperCamelCase : int = 0
UpperCamelCase : List = field(default_factory=snake_case_ )
UpperCamelCase : List = field(default_factory=snake_case_ )
def __call__( self : Optional[Any] , UpperCAmelCase__ : Tensor ) -> Tuple:
_a : Union[str, Any] = Tracker(self.dest )(UpperCAmelCase__ ).parametrized
_a : List[Any] = Tracker(self.src )(UpperCAmelCase__ ).parametrized
_a : Tuple = list(filter(lambda UpperCAmelCase__ : type(UpperCAmelCase__ ) not in self.src_skip , UpperCAmelCase__ ) )
_a : Union[str, Any] = list(filter(lambda UpperCAmelCase__ : type(UpperCAmelCase__ ) not in self.dest_skip , UpperCAmelCase__ ) )
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise Exception(
f"""Numbers of operations are different. Source module has {len(UpperCAmelCase__ )} operations while"""
f""" destination module has {len(UpperCAmelCase__ )}.""" )
for dest_m, src_m in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"""Transfered from={src_m} to={dest_m}""" )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = True ):
'''simple docstring'''
print(F"""Converting {name}...""" )
with torch.no_grad():
_a : List[str] = timm.create_model(UpperCamelCase__ , pretrained=UpperCamelCase__ ).eval()
_a : str = ResNetForImageClassification(UpperCamelCase__ ).eval()
_a : List[str] = ModuleTransfer(src=UpperCamelCase__ , dest=UpperCamelCase__ )
_a : List[str] = torch.randn((1, 3, 2_2_4, 2_2_4) )
module_transfer(UpperCamelCase__ )
assert torch.allclose(from_model(UpperCamelCase__ ) , our_model(UpperCamelCase__ ).logits ), "The model logits don't match the original one."
_a : Dict = F"""resnet{'-'.join(name.split('resnet' ) )}"""
print(UpperCamelCase__ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add model""" , use_temp_dir=UpperCamelCase__ , )
# we can use the convnext one
_a : Optional[Any] = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add image processor""" , use_temp_dir=UpperCamelCase__ , )
print(F"""Pushed {checkpoint_name}""" )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = True ):
'''simple docstring'''
_a : Any = """imagenet-1k-id2label.json"""
_a : Optional[int] = 1_0_0_0
_a : Any = (1, num_labels)
_a : Union[str, Any] = """huggingface/label-files"""
_a : Tuple = num_labels
_a : Optional[int] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
_a : Optional[Any] = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
_a : Any = idalabel
_a : Tuple = {v: k for k, v in idalabel.items()}
_a : List[str] = partial(UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ )
_a : Union[str, Any] = {
"""resnet18""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type="""basic""" ),
"""resnet26""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type="""bottleneck""" ),
"""resnet34""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type="""basic""" ),
"""resnet50""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type="""bottleneck""" ),
"""resnet101""": ImageNetPreTrainedConfig(
depths=[3, 4, 2_3, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type="""bottleneck""" ),
"""resnet152""": ImageNetPreTrainedConfig(
depths=[3, 8, 3_6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type="""bottleneck""" ),
}
if model_name:
convert_weight_and_push(UpperCamelCase__ , names_to_config[model_name] , UpperCamelCase__ , UpperCamelCase__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return config, expected_shape
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported resnet* architecture,'
' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
_snake_case = parser.parse_args()
_snake_case = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 324
| 0
|
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class UpperCamelCase :
def __init__( self : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict=13 , UpperCAmelCase__ : Optional[Any]=7 , UpperCAmelCase__ : Tuple=6 , UpperCAmelCase__ : Optional[int]=17 , UpperCAmelCase__ : Tuple=23 , UpperCAmelCase__ : int=11 , UpperCAmelCase__ : Optional[int]=True , ) -> List[str]:
_a : int = parent
_a : List[Any] = batch_size
_a : Dict = seq_length
_a : Dict = act_dim
_a : int = state_dim
_a : Dict = hidden_size
_a : List[str] = max_length
_a : Union[str, Any] = is_training
def _lowercase ( self : Dict ) -> Any:
_a : List[str] = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
_a : Optional[int] = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
_a : Optional[Any] = floats_tensor((self.batch_size, self.seq_length, 1) )
_a : Tuple = floats_tensor((self.batch_size, self.seq_length, 1) )
_a : Dict = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000 )
_a : Optional[int] = random_attention_mask((self.batch_size, self.seq_length) )
_a : Optional[Any] = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def _lowercase ( self : str ) -> Optional[Any]:
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] , ) -> List[Any]:
_a : Optional[int] = DecisionTransformerModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_a : List[Any] = model(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def _lowercase ( self : int ) -> int:
_a : Union[str, Any] = self.prepare_config_and_inputs()
(
_a
) : List[Any] = config_and_inputs
_a : Union[str, Any] = {
"""states""": states,
"""actions""": actions,
"""rewards""": rewards,
"""returns_to_go""": returns_to_go,
"""timesteps""": timesteps,
"""attention_mask""": attention_mask,
}
return config, inputs_dict
@require_torch
class UpperCamelCase ( _a , _a , _a , unittest.TestCase ):
UpperCamelCase : Tuple = (DecisionTransformerModel,) if is_torch_available() else ()
UpperCamelCase : Optional[int] = ()
UpperCamelCase : List[Any] = {'''feature-extraction''': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
UpperCamelCase : Any = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
UpperCamelCase : Optional[int] = False
UpperCamelCase : List[str] = False
UpperCamelCase : List[str] = False
UpperCamelCase : Optional[int] = False
UpperCamelCase : Optional[int] = False
UpperCamelCase : Tuple = False
UpperCamelCase : List[Any] = False
UpperCamelCase : Tuple = False
UpperCamelCase : Any = False
def _lowercase ( self : Optional[Any] ) -> List[str]:
_a : Union[str, Any] = DecisionTransformerModelTester(self )
_a : Tuple = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def _lowercase ( self : List[str] ) -> int:
self.config_tester.run_common_tests()
def _lowercase ( self : List[str] ) -> int:
_a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
@slow
def _lowercase ( self : str ) -> Optional[int]:
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Tuple = DecisionTransformerModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def _lowercase ( self : Optional[int] ) -> Dict:
_a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : int = model_class(snake_case_ )
_a : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Optional[int] = [*signature.parameters.keys()]
_a : Union[str, Any] = [
"""states""",
"""actions""",
"""rewards""",
"""returns_to_go""",
"""timesteps""",
"""attention_mask""",
]
self.assertListEqual(arg_names[: len(snake_case_ )] , snake_case_ )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def _lowercase ( self : Union[str, Any] ) -> List[str]:
_a : Tuple = 2 # number of steps of autoregressive prediction we will perform
_a : int = 10 # defined by the RL environment, may be normalized
_a : int = DecisionTransformerModel.from_pretrained("""edbeeching/decision-transformer-gym-hopper-expert""" )
_a : Union[str, Any] = model.to(snake_case_ )
_a : Dict = model.config
torch.manual_seed(0 )
_a : Dict = torch.randn(1 , 1 , config.state_dim ).to(device=snake_case_ , dtype=torch.floataa ) # env.reset()
_a : List[str] = torch.tensor(
[[0.2_4_2_7_9_3, -0.2_8_6_9_3_0_7_4, 0.8_7_4_2_6_1_3], [0.6_7_8_1_5_2_7_4, -0.0_8_1_0_1_0_8_5, -0.1_2_9_5_2_1_4_7]] , device=snake_case_ )
_a : Optional[Any] = torch.tensor(snake_case_ , device=snake_case_ , dtype=torch.floataa ).reshape(1 , 1 , 1 )
_a : Union[str, Any] = state
_a : List[str] = torch.zeros(1 , 0 , config.act_dim , device=snake_case_ , dtype=torch.floataa )
_a : List[Any] = torch.zeros(1 , 0 , device=snake_case_ , dtype=torch.floataa )
_a : List[str] = torch.tensor(0 , device=snake_case_ , dtype=torch.long ).reshape(1 , 1 )
for step in range(snake_case_ ):
_a : List[Any] = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=snake_case_ )] , dim=1 )
_a : Optional[Any] = torch.cat([rewards, torch.zeros(1 , 1 , device=snake_case_ )] , dim=1 )
_a : Any = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
_a : str = model(
states=snake_case_ , actions=snake_case_ , rewards=snake_case_ , returns_to_go=snake_case_ , timesteps=snake_case_ , attention_mask=snake_case_ , return_dict=snake_case_ , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) )
_a : List[str] = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=snake_case_ , dtype=torch.floataa ),
1.0,
False,
{},
)
_a : List[Any] = action_pred[0, -1]
_a : int = torch.cat([states, state] , dim=1 )
_a : str = returns_to_go[0, -1] - reward
_a : Optional[Any] = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
_a : List[Any] = torch.cat(
[timesteps, torch.ones((1, 1) , device=snake_case_ , dtype=torch.long ) * (step + 1)] , dim=1 )
| 371
|
"""simple docstring"""
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 324
| 0
|
"""simple docstring"""
_snake_case = {
'joule': 1.0,
'kilojoule': 1000,
'megajoule': 100_0000,
'gigajoule': 10_0000_0000,
'wattsecond': 1.0,
'watthour': 3600,
'kilowatthour': 360_0000,
'newtonmeter': 1.0,
'calorie_nutr': 41_86.8,
'kilocalorie_nutr': 418_6800.00,
'electronvolt': 1.602_176_634e-19,
'britishthermalunit_it': 10_55.0_55_85,
'footpound': 1.35_58_18,
}
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
_a : List[Any] = (
F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
F"""Valid values are: {', '.join(__lowerCAmelCase )}"""
)
raise ValueError(__lowerCAmelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350
|
"""simple docstring"""
_snake_case = 8.31_44_62 # Unit - J mol-1 K-1
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 324
| 0
|
"""simple docstring"""
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
return "".join(chr(ord(_UpperCAmelCase ) - 3_2 ) if """a""" <= char <= """z""" else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 351
|
"""simple docstring"""
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
_snake_case = logging.getLogger(__name__)
_snake_case = 'pytorch_model.bin'
@dataclasses.dataclass
class UpperCamelCase :
UpperCamelCase : str = dataclasses.field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models.'''} )
UpperCamelCase : Optional[str] = dataclasses.field(
default=snake_case_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co.'''} , )
@dataclasses.dataclass
class UpperCamelCase :
UpperCamelCase : str = dataclasses.field(metadata={'''help''': '''A csv or a json file containing the training data.'''} )
UpperCamelCase : str = dataclasses.field(metadata={'''help''': '''A csv or a json file containing the data to predict on.'''} )
UpperCamelCase : Optional[str] = dataclasses.field(
default=snake_case_ , metadata={'''help''': '''A csv or a json file containing the validation data.'''} )
UpperCamelCase : Optional[str] = dataclasses.field(
default=snake_case_ , metadata={'''help''': '''The name of the task to train on.'''} , )
UpperCamelCase : Optional[List[str]] = dataclasses.field(
default=snake_case_ , metadata={'''help''': '''The list of labels for the task.'''} )
@dataclasses.dataclass
class UpperCamelCase :
UpperCamelCase : str = dataclasses.field(
metadata={'''help''': '''The output directory where the model predictions and checkpoints will be written.'''} )
UpperCamelCase : Optional[str] = dataclasses.field(
default='''accuracy''' , metadata={'''help''': '''The evaluation metric used for the task.'''} )
UpperCamelCase : Optional[str] = dataclasses.field(
default='''no''' , metadata={
'''help''': '''The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]'''
} , )
UpperCamelCase : Optional[int] = dataclasses.field(
default=10 , metadata={'''help''': '''Number of evaluation calls with no improvement after which training will be stopped.'''} , )
UpperCamelCase : Optional[float] = dataclasses.field(
default=0.0 , metadata={
'''help''': '''How much the specified evaluation metric must improve to satisfy early stopping conditions.'''
} , )
UpperCamelCase : Optional[bool] = dataclasses.field(
default=snake_case_ , metadata={'''help''': '''Whether to filter the pseudo-labeled data based on the confidence score.'''} , )
UpperCamelCase : Optional[bool] = dataclasses.field(
default=snake_case_ , metadata={'''help''': '''Whether to filter the pseudo-labeled data based on the validation performance.'''} , )
UpperCamelCase : Optional[bool] = dataclasses.field(
default=snake_case_ , metadata={'''help''': '''Whether to fine-tune on labeled data after pseudo training.'''} , )
UpperCamelCase : Optional[float] = dataclasses.field(
default=0.0 , metadata={'''help''': '''Confidence threshold for pseudo-labeled data filtering.'''} , )
UpperCamelCase : Optional[int] = dataclasses.field(
default=100 , metadata={'''help''': '''Number of evaluation calls with no improvement after which training will be stopped.'''} , )
UpperCamelCase : Optional[int] = dataclasses.field(
default=snake_case_ , metadata={'''help''': '''Random seed for initialization.'''} , )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Optional[int] = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
_a : Union[str, Any] = dataset.filter(lambda UpperCamelCase__ : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
_a : Any = int(eval_result * len(UpperCamelCase__ ) )
print(UpperCamelCase__ )
_a : str = dataset.sort("""probability""" , reverse=UpperCamelCase__ )
_a : Any = dataset.select(range(UpperCamelCase__ ) )
_a : Tuple = dataset.remove_columns(["""label""", """probability"""] )
_a : Optional[Any] = dataset.rename_column("""prediction""" , """label""" )
_a : Dict = dataset.map(lambda UpperCamelCase__ : {"label": idalabel[example["label"]]} )
_a : Union[str, Any] = dataset.shuffle(seed=args.seed )
_a : Optional[int] = os.path.join(UpperCamelCase__ , F"""train_pseudo.{args.data_file_extension}""" )
if args.data_file_extension == "csv":
dataset.to_csv(UpperCamelCase__ , index=UpperCamelCase__ )
else:
dataset.to_json(UpperCamelCase__ )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
_a : Optional[int] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
_a : Dict = STModelArguments(model_name_or_path=UpperCamelCase__ )
_a : Union[str, Any] = STDataArguments(train_file=UpperCamelCase__ , infer_file=UpperCamelCase__ )
_a : Any = STTrainingArguments(output_dir=UpperCamelCase__ )
_a : Any = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(UpperCamelCase__ ).items():
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for key, value in kwargs.items():
if hasattr(UpperCamelCase__ , UpperCamelCase__ ):
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Sanity checks
_a : Union[str, Any] = {}
_a : Tuple = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
_a : int = args.train_file
_a : List[Any] = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
_a : Union[str, Any] = args.eval_file
for key in data_files:
_a : Optional[Any] = data_files[key].split(""".""" )[-1]
assert extension in ["csv", "json"], F"""`{key}_file` should be a csv or a json file."""
if args.data_file_extension is None:
_a : str = extension
else:
assert extension == args.data_file_extension, F"""`{key}_file` should be a {args.data_file_extension} file`."""
assert (
args.eval_metric in datasets.list_metrics()
), F"""{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}."""
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info("""Creating the initial data directory for self-training...""" )
_a : Tuple = F"""{args.output_dir}/self-train_iter-{{}}""".format
_a : Dict = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=UpperCamelCase__ )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
accelerator.wait_for_everyone()
_a : str = None
_a : int = None
_a : str = 0
_a : List[Any] = False
# Show the progress bar
_a : List[Any] = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
_a : Union[str, Any] = data_dir_format(UpperCamelCase__ )
assert os.path.exists(UpperCamelCase__ )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
_a : str = os.path.join(UpperCamelCase__ , """stage-1""" )
_a : Tuple = {
"""accelerator""": accelerator,
"""model_name_or_path""": args.model_name_or_path,
"""cache_dir""": args.cache_dir,
"""do_train""": True,
"""train_file""": data_files["""train"""] if iteration == 0 else data_files["""train_pseudo"""],
"""do_eval""": True if args.eval_file is not None else False,
"""eval_file""": data_files["""eval"""],
"""do_predict""": True,
"""infer_file""": data_files["""infer"""],
"""task_name""": args.task_name,
"""label_list""": args.label_list,
"""output_dir""": current_output_dir,
"""eval_metric""": args.eval_metric,
"""evaluation_strategy""": args.evaluation_strategy,
"""early_stopping_patience""": args.early_stopping_patience,
"""early_stopping_threshold""": args.early_stopping_threshold,
"""seed""": args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(UpperCamelCase__ , UpperCamelCase__ ):
arguments_dict.update({key: value} )
_a : int = os.path.join(UpperCamelCase__ , """best-checkpoint""" , UpperCamelCase__ )
if os.path.exists(UpperCamelCase__ ):
logger.info(
"""Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.""" , UpperCamelCase__ , UpperCamelCase__ , )
else:
logger.info("""***** Running self-training: iteration: %d, stage: 1 *****""" , UpperCamelCase__ )
finetune(**UpperCamelCase__ )
accelerator.wait_for_everyone()
assert os.path.exists(UpperCamelCase__ )
logger.info("""Self-training job completed: iteration: %d, stage: 1.""" , UpperCamelCase__ )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
_a : Dict = os.path.join(UpperCamelCase__ , """best-checkpoint""" )
_a : List[str] = os.path.join(UpperCamelCase__ , """stage-2""" )
# Update arguments_dict
_a : int = model_path
_a : Dict = data_files["""train"""]
_a : int = current_output_dir
_a : Any = os.path.join(UpperCamelCase__ , """best-checkpoint""" , UpperCamelCase__ )
if os.path.exists(UpperCamelCase__ ):
logger.info(
"""Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.""" , UpperCamelCase__ , UpperCamelCase__ , )
else:
logger.info("""***** Running self-training: iteration: %d, stage: 2 *****""" , UpperCamelCase__ )
finetune(**UpperCamelCase__ )
accelerator.wait_for_everyone()
assert os.path.exists(UpperCamelCase__ )
logger.info("""Self-training job completed: iteration: %d, stage: 2.""" , UpperCamelCase__ )
_a : List[Any] = iteration
_a : int = data_dir_format(iteration + 1 )
_a : Dict = AutoConfig.from_pretrained(os.path.join(UpperCamelCase__ , """best-checkpoint""" ) )
_a : Union[str, Any] = config.idalabel
_a : Any = os.path.join(UpperCamelCase__ , """eval_results_best-checkpoint.json""" )
_a : Any = os.path.join(UpperCamelCase__ , """test_results_best-checkpoint.json""" )
assert os.path.exists(UpperCamelCase__ )
with open(UpperCamelCase__ , """r""" ) as f:
_a : Tuple = float(json.load(UpperCamelCase__ )[args.eval_metric] )
_a : Dict = os.path.join(UpperCamelCase__ , """infer_output_best-checkpoint.csv""" )
assert os.path.exists(UpperCamelCase__ )
# Loading the dataset from local csv or json files.
_a : List[Any] = load_dataset(args.data_file_extension , data_files={"""data""": data_files["""infer"""]} )["""data"""]
_a : Any = load_dataset("""csv""" , data_files={"""data""": infer_output_file} )["""data"""]
if accelerator.is_main_process:
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
shutil.copy(UpperCamelCase__ , os.path.join(UpperCamelCase__ , F"""eval_results_iter-{iteration}.json""" ) )
if os.path.exists(UpperCamelCase__ ):
shutil.copy(UpperCamelCase__ , os.path.join(UpperCamelCase__ , F"""test_results_iter-{iteration}.json""" ) )
create_pseudo_labeled_data(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
accelerator.wait_for_everyone()
_a : List[str] = os.path.join(UpperCamelCase__ , F"""train_pseudo.{args.data_file_extension}""" )
if args.evaluation_strategy != IntervalStrategy.NO.value:
_a : Any = eval_result
if best_iteration is None:
_a : Union[str, Any] = new_iteration
_a : str = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
_a : Union[str, Any] = new_iteration
_a : List[str] = new_eval_result
_a : Optional[Any] = 0
else:
if new_eval_result == best_eval_result:
_a : Tuple = new_iteration
_a : List[Any] = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
_a : Union[str, Any] = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info("""Best iteration: %d""" , UpperCamelCase__ )
logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , UpperCamelCase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(UpperCamelCase__ , F"""eval_results_iter-{iteration}.json""" ) , os.path.join(UpperCamelCase__ , """eval_results_best-iteration.json""" ) , )
else:
# Assume that the last iteration is the best
logger.info("""Best iteration: %d""" , args.max_selftrain_iterations - 1 )
logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , UpperCamelCase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(UpperCamelCase__ , F"""eval_results_iter-{args.max_selftrain_iterations - 1}.json""" ) , os.path.join(UpperCamelCase__ , """eval_results_best-iteration.json""" ) , )
| 324
| 0
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : Optional[Any] ) -> Optional[Any]:
_a : str = tempfile.mkdtemp()
# fmt: off
_a : int = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
_a : Any = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
_a : Optional[int] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
_a : Optional[int] = {"""unk_token""": """<unk>"""}
_a : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_a : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__SCREAMING_SNAKE_CASE ) )
_a : Optional[Any] = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"""image_std""": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
_a : Optional[Any] = os.path.join(self.tmpdirname , __SCREAMING_SNAKE_CASE )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _lowercase ( self : Dict , **UpperCAmelCase__ : str ) -> Any:
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def _lowercase ( self : Tuple , **UpperCAmelCase__ : Union[str, Any] ) -> List[str]:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def _lowercase ( self : int , **UpperCAmelCase__ : str ) -> Any:
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def _lowercase ( self : int ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def _lowercase ( self : Optional[Any] ) -> str:
_a : Optional[int] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_a : List[str] = [Image.fromarray(np.moveaxis(__SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowercase ( self : str ) -> Any:
_a : Dict = self.get_tokenizer()
_a : Optional[Any] = self.get_rust_tokenizer()
_a : Union[str, Any] = self.get_image_processor()
_a : Optional[int] = CLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
processor_slow.save_pretrained(self.tmpdirname )
_a : str = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__SCREAMING_SNAKE_CASE )
_a : List[str] = CLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
processor_fast.save_pretrained(self.tmpdirname )
_a : List[str] = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.tokenizer , __SCREAMING_SNAKE_CASE )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.image_processor , __SCREAMING_SNAKE_CASE )
def _lowercase ( self : Union[str, Any] ) -> List[Any]:
_a : Dict = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_a : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_a : int = self.get_image_processor(do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0 )
_a : Dict = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __SCREAMING_SNAKE_CASE )
def _lowercase ( self : Any ) -> Dict:
_a : str = self.get_image_processor()
_a : str = self.get_tokenizer()
_a : Any = CLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
_a : Optional[int] = self.prepare_image_inputs()
_a : Optional[int] = image_processor(__SCREAMING_SNAKE_CASE , return_tensors="""np""" )
_a : Union[str, Any] = processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowercase ( self : List[str] ) -> Optional[int]:
_a : List[Any] = self.get_image_processor()
_a : Tuple = self.get_tokenizer()
_a : List[str] = CLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
_a : Union[str, Any] = """lower newer"""
_a : Optional[Any] = processor(text=__SCREAMING_SNAKE_CASE )
_a : List[str] = tokenizer(__SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowercase ( self : str ) -> int:
_a : int = self.get_image_processor()
_a : Any = self.get_tokenizer()
_a : Any = CLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
_a : List[str] = """lower newer"""
_a : Optional[int] = self.prepare_image_inputs()
_a : int = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(__SCREAMING_SNAKE_CASE ):
processor()
def _lowercase ( self : str ) -> Optional[Any]:
_a : Optional[Any] = self.get_image_processor()
_a : List[str] = self.get_tokenizer()
_a : Optional[Any] = CLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
_a : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_a : int = processor.batch_decode(__SCREAMING_SNAKE_CASE )
_a : int = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _lowercase ( self : Tuple ) -> int:
_a : Optional[Any] = self.get_image_processor()
_a : Dict = self.get_tokenizer()
_a : Tuple = CLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
_a : str = """lower newer"""
_a : List[str] = self.prepare_image_inputs()
_a : Union[str, Any] = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 352
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
_snake_case = None
_snake_case = logging.get_logger(__name__)
_snake_case = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
_snake_case = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/tokenizer.json',
},
}
_snake_case = {
'camembert-base': 512,
}
_snake_case = '▁'
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : Any = VOCAB_FILES_NAMES
UpperCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : Dict = ['''input_ids''', '''attention_mask''']
UpperCamelCase : Optional[Any] = CamembertTokenizer
def __init__( self : int , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Optional[Any]="<s>" , UpperCAmelCase__ : Optional[int]="</s>" , UpperCAmelCase__ : Tuple="</s>" , UpperCAmelCase__ : Tuple="<s>" , UpperCAmelCase__ : Tuple="<unk>" , UpperCAmelCase__ : Tuple="<pad>" , UpperCAmelCase__ : int="<mask>" , UpperCAmelCase__ : Optional[int]=["<s>NOTUSED", "</s>NOTUSED"] , **UpperCAmelCase__ : Optional[Any] , ) -> Union[str, Any]:
# Mask token behave like a normal word, i.e. include the space before it
_a : List[Any] = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else mask_token
super().__init__(
UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , **UpperCAmelCase__ , )
_a : int = vocab_file
_a : int = False if not self.vocab_file else True
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_a : List[Any] = [self.cls_token_id]
_a : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
_a : Union[str, Any] = [self.sep_token_id]
_a : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a : List[str] = os.path.join(
UpperCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ):
copyfile(self.vocab_file , UpperCAmelCase__ )
return (out_vocab_file,)
| 324
| 0
|
"""simple docstring"""
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
_snake_case = re.compile(r'\b(a|an|the)\b', re.UNICODE)
_snake_case = None
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : Dict = argparse.ArgumentParser("""Official evaluation script for SQuAD version 2.0.""" )
parser.add_argument("""data_file""" , metavar="""data.json""" , help="""Input data JSON file.""" )
parser.add_argument("""pred_file""" , metavar="""pred.json""" , help="""Model predictions.""" )
parser.add_argument(
"""--out-file""" , """-o""" , metavar="""eval.json""" , help="""Write accuracy metrics to file (default is stdout).""" )
parser.add_argument(
"""--na-prob-file""" , """-n""" , metavar="""na_prob.json""" , help="""Model estimates of probability of no answer.""" )
parser.add_argument(
"""--na-prob-thresh""" , """-t""" , type=lowerCamelCase__ , default=1.0 , help="""Predict \"\" if no-answer probability exceeds this (default = 1.0).""" , )
parser.add_argument(
"""--out-image-dir""" , """-p""" , metavar="""out_images""" , default=lowerCamelCase__ , help="""Save precision-recall curves to directory.""" )
parser.add_argument("""--verbose""" , """-v""" , action="""store_true""" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : Dict = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
_a : int = bool(qa["""answers"""]["""text"""] )
return qid_to_has_ans
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
def remove_articles(UpperCamelCase__ ):
return ARTICLES_REGEX.sub(""" """ , lowerCamelCase__ )
def white_space_fix(UpperCamelCase__ ):
return " ".join(text.split() )
def remove_punc(UpperCamelCase__ ):
_a : str = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCamelCase__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase__ ) ) ) )
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
if not s:
return []
return normalize_answer(lowerCamelCase__ ).split()
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
return int(normalize_answer(lowerCamelCase__ ) == normalize_answer(lowerCamelCase__ ) )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Tuple = get_tokens(lowerCamelCase__ )
_a : List[str] = get_tokens(lowerCamelCase__ )
_a : Dict = collections.Counter(lowerCamelCase__ ) & collections.Counter(lowerCamelCase__ )
_a : Optional[Any] = sum(common.values() )
if len(lowerCamelCase__ ) == 0 or len(lowerCamelCase__ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
_a : Tuple = 1.0 * num_same / len(lowerCamelCase__ )
_a : Any = 1.0 * num_same / len(lowerCamelCase__ )
_a : Tuple = (2 * precision * recall) / (precision + recall)
return fa
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : List[str] = {}
_a : List[str] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
_a : Union[str, Any] = qa["""id"""]
_a : List[Any] = [t for t in qa["""answers"""]["""text"""] if normalize_answer(lowerCamelCase__ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
_a : Dict = [""""""]
if qid not in preds:
print(F"""Missing prediction for {qid}""" )
continue
_a : Optional[int] = preds[qid]
# Take max over all gold answers
_a : Dict = max(compute_exact(lowerCamelCase__ , lowerCamelCase__ ) for a in gold_answers )
_a : int = max(compute_fa(lowerCamelCase__ , lowerCamelCase__ ) for a in gold_answers )
return exact_scores, fa_scores
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Optional[int] = {}
for qid, s in scores.items():
_a : Optional[int] = na_probs[qid] > na_prob_thresh
if pred_na:
_a : Tuple = float(not qid_to_has_ans[qid] )
else:
_a : str = s
return new_scores
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ):
'''simple docstring'''
if not qid_list:
_a : Dict = len(lowerCamelCase__ )
return collections.OrderedDict(
[
("""exact""", 1_0_0.0 * sum(exact_scores.values() ) / total),
("""f1""", 1_0_0.0 * sum(fa_scores.values() ) / total),
("""total""", total),
] )
else:
_a : Optional[Any] = len(lowerCamelCase__ )
return collections.OrderedDict(
[
("""exact""", 1_0_0.0 * sum(exact_scores[k] for k in qid_list ) / total),
("""f1""", 1_0_0.0 * sum(fa_scores[k] for k in qid_list ) / total),
("""total""", total),
] )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
for k in new_eval:
_a : List[str] = new_eval[k]
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
plt.step(lowerCamelCase__ , lowerCamelCase__ , color="""b""" , alpha=0.2 , where="""post""" )
plt.fill_between(lowerCamelCase__ , lowerCamelCase__ , step="""post""" , alpha=0.2 , color="""b""" )
plt.xlabel("""Recall""" )
plt.ylabel("""Precision""" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(lowerCamelCase__ )
plt.savefig(lowerCamelCase__ )
plt.clf()
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None ):
'''simple docstring'''
_a : str = sorted(lowerCamelCase__ , key=lambda UpperCamelCase__ : na_probs[k] )
_a : Any = 0.0
_a : Dict = 1.0
_a : Union[str, Any] = 0.0
_a : Dict = [1.0]
_a : str = [0.0]
_a : Dict = 0.0
for i, qid in enumerate(lowerCamelCase__ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
_a : List[Any] = true_pos / float(i + 1 )
_a : str = true_pos / float(lowerCamelCase__ )
if i == len(lowerCamelCase__ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(lowerCamelCase__ )
recalls.append(lowerCamelCase__ )
if out_image:
plot_pr_curve(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return {"ap": 1_0_0.0 * avg_prec}
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if out_image_dir and not os.path.exists(lowerCamelCase__ ):
os.makedirs(lowerCamelCase__ )
_a : int = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
_a : Tuple = make_precision_recall_eval(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , out_image=os.path.join(lowerCamelCase__ , """pr_exact.png""" ) , title="""Precision-Recall curve for Exact Match score""" , )
_a : Dict = make_precision_recall_eval(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , out_image=os.path.join(lowerCamelCase__ , """pr_f1.png""" ) , title="""Precision-Recall curve for F1 score""" , )
_a : Tuple = {k: float(lowerCamelCase__ ) for k, v in qid_to_has_ans.items()}
_a : List[str] = make_precision_recall_eval(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , out_image=os.path.join(lowerCamelCase__ , """pr_oracle.png""" ) , title="""Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)""" , )
merge_eval(lowerCamelCase__ , lowerCamelCase__ , """pr_exact""" )
merge_eval(lowerCamelCase__ , lowerCamelCase__ , """pr_f1""" )
merge_eval(lowerCamelCase__ , lowerCamelCase__ , """pr_oracle""" )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if not qid_list:
return
_a : Dict = [na_probs[k] for k in qid_list]
_a : Tuple = np.ones_like(lowerCamelCase__ ) / float(len(lowerCamelCase__ ) )
plt.hist(lowerCamelCase__ , weights=lowerCamelCase__ , bins=2_0 , range=(0.0, 1.0) )
plt.xlabel("""Model probability of no-answer""" )
plt.ylabel("""Proportion of dataset""" )
plt.title(F"""Histogram of no-answer probability: {name}""" )
plt.savefig(os.path.join(lowerCamelCase__ , F"""na_prob_hist_{name}.png""" ) )
plt.clf()
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Any = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
_a : Any = num_no_ans
_a : Dict = cur_score
_a : Optional[int] = 0.0
_a : int = sorted(lowerCamelCase__ , key=lambda UpperCamelCase__ : na_probs[k] )
for i, qid in enumerate(lowerCamelCase__ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
_a : Tuple = scores[qid]
else:
if preds[qid]:
_a : List[Any] = -1
else:
_a : List[Any] = 0
cur_score += diff
if cur_score > best_score:
_a : Any = cur_score
_a : Tuple = na_probs[qid]
return 1_0_0.0 * best_score / len(lowerCamelCase__ ), best_thresh
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a , _a : Any = find_best_thresh(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
_a , _a : List[Any] = find_best_thresh(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
_a : Any = best_exact
_a : Tuple = exact_thresh
_a : Tuple = best_fa
_a : Optional[int] = fa_thresh
def lowerCAmelCase__ ( ):
'''simple docstring'''
with open(OPTS.data_file ) as f:
_a : int = json.load(lowerCamelCase__ )
_a : List[Any] = dataset_json["""data"""]
with open(OPTS.pred_file ) as f:
_a : Tuple = json.load(lowerCamelCase__ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
_a : Tuple = json.load(lowerCamelCase__ )
else:
_a : List[str] = {k: 0.0 for k in preds}
_a : Union[str, Any] = make_qid_to_has_ans(lowerCamelCase__ ) # maps qid to True/False
_a : List[str] = [k for k, v in qid_to_has_ans.items() if v]
_a : int = [k for k, v in qid_to_has_ans.items() if not v]
_a , _a : List[str] = get_raw_scores(lowerCamelCase__ , lowerCamelCase__ )
_a : Tuple = apply_no_ans_threshold(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , OPTS.na_prob_thresh )
_a : Optional[Any] = apply_no_ans_threshold(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , OPTS.na_prob_thresh )
_a : Union[str, Any] = make_eval_dict(lowerCamelCase__ , lowerCamelCase__ )
if has_ans_qids:
_a : List[Any] = make_eval_dict(lowerCamelCase__ , lowerCamelCase__ , qid_list=lowerCamelCase__ )
merge_eval(lowerCamelCase__ , lowerCamelCase__ , """HasAns""" )
if no_ans_qids:
_a : Optional[Any] = make_eval_dict(lowerCamelCase__ , lowerCamelCase__ , qid_list=lowerCamelCase__ )
merge_eval(lowerCamelCase__ , lowerCamelCase__ , """NoAns""" )
if OPTS.na_prob_file:
find_all_best_thresh(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , OPTS.out_image_dir )
histogram_na_prob(lowerCamelCase__ , lowerCamelCase__ , OPTS.out_image_dir , """hasAns""" )
histogram_na_prob(lowerCamelCase__ , lowerCamelCase__ , OPTS.out_image_dir , """noAns""" )
if OPTS.out_file:
with open(OPTS.out_file , """w""" ) as f:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
else:
print(json.dumps(lowerCamelCase__ , indent=2 ) )
if __name__ == "__main__":
_snake_case = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
main()
| 353
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_snake_case = logging.get_logger(__name__)
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : Dict = ['''pixel_values''']
def __init__( self : Any , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : int = 32 , UpperCAmelCase__ : Optional[Any]=PILImageResampling.BILINEAR , UpperCAmelCase__ : bool = True , **UpperCAmelCase__ : List[str] , ) -> None:
_a : int = do_resize
_a : Union[str, Any] = do_rescale
_a : Any = size_divisor
_a : Any = resample
super().__init__(**UpperCAmelCase__ )
def _lowercase ( self : Tuple , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[ChannelDimension] = None , **UpperCAmelCase__ : Optional[Any] ) -> np.ndarray:
_a , _a : Tuple = get_image_size(UpperCAmelCase__ )
# Rounds the height and width down to the closest multiple of size_divisor
_a : Optional[Any] = height // size_divisor * size_divisor
_a : Union[str, Any] = width // size_divisor * size_divisor
_a : Any = resize(UpperCAmelCase__ , (new_h, new_w) , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
return image
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : float , UpperCAmelCase__ : Optional[ChannelDimension] = None , **UpperCAmelCase__ : Optional[int] ) -> np.ndarray:
return rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[Union[TensorType, str]] = None , UpperCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase__ : int , ) -> BatchFeature:
_a : Dict = do_resize if do_resize is not None else self.do_resize
_a : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
_a : str = size_divisor if size_divisor is not None else self.size_divisor
_a : Any = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("""size_divisor is required for resizing""" )
_a : List[str] = make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError("""Invalid image(s)""" )
# All transformations expect numpy arrays.
_a : Tuple = [to_numpy_array(UpperCAmelCase__ ) for img in images]
if do_resize:
_a : Optional[int] = [self.resize(UpperCAmelCase__ , size_divisor=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images]
if do_rescale:
_a : str = [self.rescale(UpperCAmelCase__ , scale=1 / 255 ) for image in images]
_a : Any = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
_a : Optional[int] = {"""pixel_values""": images}
return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
| 324
| 0
|
_snake_case = 6_5521
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : Dict = 1
_a : Optional[Any] = 0
for plain_chr in plain_text:
_a : Union[str, Any] = (a + ord(_lowerCAmelCase )) % MOD_ADLER
_a : Tuple = (b + a) % MOD_ADLER
return (b << 1_6) | a
| 354
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCamelCase ( unittest.TestCase ):
@property
def _lowercase ( self : Optional[int] ) -> Union[str, Any]:
torch.manual_seed(0 )
_a : List[str] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def _lowercase ( self : Dict ) -> Dict:
_a : str = self.dummy_uncond_unet
_a : Optional[int] = KarrasVeScheduler()
_a : List[str] = KarrasVePipeline(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_a : int = torch.manual_seed(0 )
_a : List[Any] = pipe(num_inference_steps=2 , generator=UpperCAmelCase__ , output_type="""numpy""" ).images
_a : Tuple = torch.manual_seed(0 )
_a : int = pipe(num_inference_steps=2 , generator=UpperCAmelCase__ , output_type="""numpy""" , return_dict=UpperCAmelCase__ )[0]
_a : int = image[0, -3:, -3:, -1]
_a : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_a : str = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : Tuple ) -> List[str]:
_a : Optional[Any] = """google/ncsnpp-celebahq-256"""
_a : Any = UNetaDModel.from_pretrained(UpperCAmelCase__ )
_a : Dict = KarrasVeScheduler()
_a : int = KarrasVePipeline(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_a : Optional[int] = torch.manual_seed(0 )
_a : Tuple = pipe(num_inference_steps=20 , generator=UpperCAmelCase__ , output_type="""numpy""" ).images
_a : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_a : Optional[int] = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 324
| 0
|
"""simple docstring"""
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
_snake_case = logging.getLogger()
_snake_case = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCamelCase ( lowerCAmelCase__ ):
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : int ) -> Any:
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
_a : str = {"""source""": """What is love ?""", """target""": """life"""}
_a : Optional[int] = {"""train""": 12, """val""": 2, """test""": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
_a : Optional[Any] = """\n""".join([contents[field]] * n_lines[split] )
with open(os.path.join(_SCREAMING_SNAKE_CASE , f"""{split}.{field}""" ) , """w""" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
def _lowercase ( self : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] = "pytorch" ) -> List[str]:
_a : Optional[int] = self.get_auto_remove_tmp_dir()
_a : List[Any] = os.path.join(_SCREAMING_SNAKE_CASE , """output""" )
_a : List[Any] = os.path.join(_SCREAMING_SNAKE_CASE , """data""" )
self._create_dummy_data(data_dir=_SCREAMING_SNAKE_CASE )
_a : Dict = f"""\n --data_dir {data_dir} \\n --output_dir {output_dir} \\n --model_name_or_path facebook/rag-sequence-base \\n --model_type rag_sequence \\n --do_train \\n --do_predict \\n --n_val -1 \\n --val_check_interval 1.0 \\n --train_batch_size 2 \\n --eval_batch_size 1 \\n --max_source_length 25 \\n --max_target_length 25 \\n --val_max_target_length 25 \\n --test_max_target_length 25 \\n --label_smoothing 0.1 \\n --dropout 0.1 \\n --attention_dropout 0.1 \\n --weight_decay 0.001 \\n --adam_epsilon 1e-08 \\n --max_grad_norm 0.1 \\n --lr_scheduler polynomial \\n --learning_rate 3e-04 \\n --num_train_epochs 1 \\n --warmup_steps 4 \\n --gradient_accumulation_steps 1 \\n --distributed-port 8787 \\n --use_dummy_dataset 1 \\n --distributed_retriever {distributed_retriever} \\n """.split()
if gpus > 0:
testargs.append(f"""--gpus={gpus}""" )
if is_apex_available():
testargs.append("""--fp16""" )
else:
testargs.append("""--gpus=0""" )
testargs.append("""--distributed_backend=ddp_cpu""" )
testargs.append("""--num_processes=2""" )
_a : Optional[Any] = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(_SCREAMING_SNAKE_CASE , env=self.get_env() )
_a : int = os.path.join(_SCREAMING_SNAKE_CASE , """metrics.json""" )
with open(_SCREAMING_SNAKE_CASE ) as f:
_a : int = json.load(_SCREAMING_SNAKE_CASE )
return result
@require_torch_gpu
def _lowercase ( self : Dict ) -> Tuple:
_a : List[str] = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
def _lowercase ( self : Dict ) -> List[str]:
_a : List[Any] = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_gpu
@require_ray
def _lowercase ( self : str ) -> Any:
_a : Any = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
@require_ray
def _lowercase ( self : Optional[int] ) -> Optional[Any]:
_a : Tuple = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
| 355
|
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_snake_case = 16
_snake_case = 32
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ = 1_6 ):
'''simple docstring'''
_a : str = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_a : Dict = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(UpperCamelCase__ ):
# max_length=None => use the model max length (it's actually the default)
_a : Optional[int] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_a : Tuple = datasets.map(
UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_a : List[Any] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(UpperCamelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_a : Union[str, Any] = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_a : int = 1_6
elif accelerator.mixed_precision != "no":
_a : int = 8
else:
_a : str = None
return tokenizer.pad(
UpperCamelCase__ , padding="""longest""" , max_length=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
_a : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )
_a : List[str] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_snake_case = mocked_dataloaders # noqa: F811
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , UpperCamelCase__ ) == "1":
_a : str = 2
# Initialize accelerator
_a : int = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_a : Any = config["""lr"""]
_a : Union[str, Any] = int(config["""num_epochs"""] )
_a : str = int(config["""seed"""] )
_a : List[Any] = int(config["""batch_size"""] )
_a : Tuple = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
_a : Optional[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_a : Optional[Any] = batch_size // MAX_GPU_BATCH_SIZE
_a : str = MAX_GPU_BATCH_SIZE
set_seed(UpperCamelCase__ )
_a , _a : Optional[int] = get_dataloaders(UpperCamelCase__ , UpperCamelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_a : int = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=UpperCamelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_a : List[str] = model.to(accelerator.device )
# Instantiate optimizer
_a : List[str] = AdamW(params=model.parameters() , lr=UpperCamelCase__ )
# Instantiate scheduler
_a : List[str] = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(UpperCamelCase__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_a , _a , _a , _a , _a : Optional[Any] = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Now we train the model
for epoch in range(UpperCamelCase__ ):
model.train()
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_a : Optional[Any] = model(**UpperCamelCase__ )
_a : str = outputs.loss
_a : Optional[int] = loss / gradient_accumulation_steps
accelerator.backward(UpperCamelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
_a : Union[str, Any] = 0
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_a : Dict = model(**UpperCamelCase__ )
_a : Optional[Any] = outputs.logits.argmax(dim=-1 )
_a , _a : int = accelerator.gather((predictions, batch["""labels"""]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(UpperCamelCase__ ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
_a : str = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_a : int = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=UpperCamelCase__ , references=UpperCamelCase__ , )
_a : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , UpperCamelCase__ )
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : Tuple = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=UpperCamelCase__ , default=UpperCamelCase__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
_a : Optional[Any] = parser.parse_args()
_a : Tuple = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6}
training_function(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 324
| 0
|
"""simple docstring"""
from __future__ import annotations
_snake_case = tuple[int, int, int]
_snake_case = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
_snake_case = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# -------------------------- default selection --------------------------
# rotors --------------------------
_snake_case = 'EGZWVONAHDCLFQMSIPJBYUKXTR'
_snake_case = 'FOBHMDKEXQNRAULPGSJVTYICZW'
_snake_case = 'ZJXESIUQLHAVRMDOYGTNFWPBKC'
# reflector --------------------------
_snake_case = {
'A': 'N',
'N': 'A',
'B': 'O',
'O': 'B',
'C': 'P',
'P': 'C',
'D': 'Q',
'Q': 'D',
'E': 'R',
'R': 'E',
'F': 'S',
'S': 'F',
'G': 'T',
'T': 'G',
'H': 'U',
'U': 'H',
'I': 'V',
'V': 'I',
'J': 'W',
'W': 'J',
'K': 'X',
'X': 'K',
'L': 'Y',
'Y': 'L',
'M': 'Z',
'Z': 'M',
}
# -------------------------- extra rotors --------------------------
_snake_case = 'RMDJXFUWGISLHVTCQNKYPBEZOA'
_snake_case = 'SGLCPQWZHKXAREONTFBVIYJUDM'
_snake_case = 'HVSICLTYKQUBXDWAJZOMFGPREN'
_snake_case = 'RZWQHFMVDBKICJLNTUXAGYPSOE'
_snake_case = 'LFKIJODBEGAMQPXVUHYSTCZRWN'
_snake_case = 'KOAEGVDHXPQZMLFTYWJNBRCIUS'
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
# Checks if there are 3 unique rotors
if (unique_rotsel := len(set(UpperCamelCase__ ) )) < 3:
_a : Tuple = F"""Please use 3 unique rotors (not {unique_rotsel})"""
raise Exception(UpperCamelCase__ )
# Checks if rotor positions are valid
_a : str = rotpos
if not 0 < rotorposa <= len(UpperCamelCase__ ):
_a : Optional[Any] = F"""First rotor position is not within range of 1..26 ({rotorposa}"""
raise ValueError(UpperCamelCase__ )
if not 0 < rotorposa <= len(UpperCamelCase__ ):
_a : List[str] = F"""Second rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(UpperCamelCase__ )
if not 0 < rotorposa <= len(UpperCamelCase__ ):
_a : List[Any] = F"""Third rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(UpperCamelCase__ )
# Validates string and returns dict
_a : str = _plugboard(UpperCamelCase__ )
return rotpos, rotsel, pbdict
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
# tests the input string if it
# a) is type string
# b) has even length (so pairs can be made)
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_a : int = F"""Plugboard setting isn't type string ({type(UpperCamelCase__ )})"""
raise TypeError(UpperCamelCase__ )
elif len(UpperCamelCase__ ) % 2 != 0:
_a : List[Any] = F"""Odd number of symbols ({len(UpperCamelCase__ )})"""
raise Exception(UpperCamelCase__ )
elif pbstring == "":
return {}
pbstring.replace(""" """ , """""" )
# Checks if all characters are unique
_a : Optional[Any] = set()
for i in pbstring:
if i not in abc:
_a : int = F"""'{i}' not in list of symbols"""
raise Exception(UpperCamelCase__ )
elif i in tmppbl:
_a : List[Any] = F"""Duplicate symbol ({i})"""
raise Exception(UpperCamelCase__ )
else:
tmppbl.add(UpperCamelCase__ )
del tmppbl
# Created the dictionary
_a : List[Any] = {}
for j in range(0 , len(UpperCamelCase__ ) - 1 , 2 ):
_a : Any = pbstring[j + 1]
_a : Optional[int] = pbstring[j]
return pb
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = (rotora, rotora, rotora) , UpperCamelCase__ = "" , ):
'''simple docstring'''
_a : str = text.upper()
_a : Optional[int] = _validator(
UpperCamelCase__ , UpperCamelCase__ , plugb.upper() )
_a : Union[str, Any] = rotor_position
_a : List[str] = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
_a : Tuple = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
_a : Optional[int] = plugboard[symbol]
# rotor ra --------------------------
_a : Optional[Any] = abc.index(UpperCamelCase__ ) + rotorposa
_a : Tuple = rotora[index % len(UpperCamelCase__ )]
# rotor rb --------------------------
_a : Optional[int] = abc.index(UpperCamelCase__ ) + rotorposa
_a : Optional[Any] = rotora[index % len(UpperCamelCase__ )]
# rotor rc --------------------------
_a : Union[str, Any] = abc.index(UpperCamelCase__ ) + rotorposa
_a : Dict = rotora[index % len(UpperCamelCase__ )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
_a : Any = reflector[symbol]
# 2nd rotors
_a : int = abc[rotora.index(UpperCamelCase__ ) - rotorposa]
_a : Optional[Any] = abc[rotora.index(UpperCamelCase__ ) - rotorposa]
_a : Optional[int] = abc[rotora.index(UpperCamelCase__ ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
_a : List[str] = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(UpperCamelCase__ ):
_a : Optional[int] = 0
rotorposa += 1
if rotorposa >= len(UpperCamelCase__ ):
_a : Any = 0
rotorposa += 1
if rotorposa >= len(UpperCamelCase__ ):
_a : Dict = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(UpperCamelCase__ )
return "".join(UpperCamelCase__ )
if __name__ == "__main__":
_snake_case = 'This is my Python script that emulates the Enigma machine from WWII.'
_snake_case = (1, 1, 1)
_snake_case = 'pictures'
_snake_case = (rotora, rotora, rotora)
_snake_case = enigma(message, rotor_pos, rotor_sel, pb)
print('Encrypted message:', en)
print('Decrypted message:', enigma(en, rotor_pos, rotor_sel, pb))
| 356
|
"""simple docstring"""
import numpy as np
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 324
| 0
|
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class UpperCamelCase :
def __init__( self : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : Dict=10 , UpperCAmelCase__ : List[str]=3 , UpperCAmelCase__ : Union[str, Any]=32 * 4 , UpperCAmelCase__ : Optional[Any]=32 * 6 , UpperCAmelCase__ : Optional[int]=4 , UpperCAmelCase__ : Optional[int]=32 , ) -> Optional[Any]:
_a : Optional[int] = parent
_a : Optional[Any] = batch_size
_a : Any = is_training
_a : Tuple = use_auxiliary_loss
_a : str = num_queries
_a : Dict = num_channels
_a : List[str] = min_size
_a : Optional[Any] = max_size
_a : str = num_labels
_a : str = mask_feature_size
def _lowercase ( self : int ) -> int:
_a : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCamelCase__ )
_a : Optional[int] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCamelCase__ )
_a : int = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCamelCase__ ) > 0.5
).float()
_a : Optional[int] = (torch.rand((self.batch_size, self.num_labels) , device=lowerCamelCase__ ) > 0.5).long()
_a : Dict = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _lowercase ( self : int ) -> List[str]:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def _lowercase ( self : str ) -> List[str]:
_a : Tuple = self.prepare_config_and_inputs()
_a : Optional[Any] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def _lowercase ( self : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any ) -> int:
_a : Optional[int] = output.encoder_hidden_states
_a : List[str] = output.pixel_decoder_hidden_states
_a : Union[str, Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , config.decoder_config.decoder_layers )
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple=False ) -> Optional[int]:
with torch.no_grad():
_a : List[Any] = MaskFormerModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_a : Optional[Any] = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
_a : Optional[int] = model(lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCamelCase__ , lowerCamelCase__ )
def _lowercase ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] ) -> Any:
_a : int = MaskFormerForInstanceSegmentation(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
def comm_check_on_output(UpperCAmelCase__ : Any ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_a : Optional[Any] = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
_a : Union[str, Any] = model(lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
_a : Union[str, Any] = model(
pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class UpperCamelCase ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
UpperCamelCase : str = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
UpperCamelCase : str = (
{"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
UpperCamelCase : List[str] = False
UpperCamelCase : List[Any] = False
UpperCamelCase : Any = False
UpperCamelCase : Optional[int] = False
def _lowercase ( self : Tuple ) -> Optional[int]:
_a : int = MaskFormerModelTester(self )
_a : int = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _lowercase ( self : List[Any] ) -> List[Any]:
_a : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def _lowercase ( self : List[str] ) -> Tuple:
_a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*lowerCamelCase__ )
@unittest.skip(reason="""MaskFormer does not use inputs_embeds""" )
def _lowercase ( self : List[Any] ) -> int:
pass
@unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" )
def _lowercase ( self : Dict ) -> int:
pass
@unittest.skip(reason="""MaskFormer is not a generative model""" )
def _lowercase ( self : Optional[int] ) -> str:
pass
@unittest.skip(reason="""MaskFormer does not use token embeddings""" )
def _lowercase ( self : Dict ) -> Union[str, Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`""" )
def _lowercase ( self : Optional[int] ) -> Optional[Any]:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _lowercase ( self : List[Any] ) -> Tuple:
pass
def _lowercase ( self : int ) -> Optional[int]:
_a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Dict = model_class(lowerCamelCase__ )
_a : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Tuple = [*signature.parameters.keys()]
_a : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
@slow
def _lowercase ( self : int ) -> int:
for model_name in ["facebook/maskformer-swin-small-coco"]:
_a : Any = MaskFormerModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def _lowercase ( self : List[str] ) -> Optional[int]:
_a : Optional[Any] = (self.model_tester.min_size,) * 2
_a : Dict = {
'''pixel_values''': torch.randn((2, 3, *size) , device=lowerCamelCase__ ),
'''mask_labels''': torch.randn((2, 10, *size) , device=lowerCamelCase__ ),
'''class_labels''': torch.zeros(2 , 10 , device=lowerCamelCase__ ).long(),
}
_a : List[Any] = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(lowerCamelCase__ )
_a : List[str] = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
def _lowercase ( self : str ) -> List[Any]:
_a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def _lowercase ( self : Any ) -> Optional[Any]:
_a : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[Any] = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
_a : Tuple = model(**lowerCamelCase__ , output_attentions=lowerCamelCase__ )
self.assertTrue(outputs.attentions is not None )
def _lowercase ( self : List[str] ) -> Optional[int]:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
_a : List[str] = self.all_model_classes[1]
_a : Tuple = self.model_tester.prepare_config_and_inputs()
_a : str = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
_a : Optional[int] = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ ).loss
loss.backward()
def _lowercase ( self : Optional[Any] ) -> Any:
_a : List[str] = self.all_model_classes[1]
_a : Tuple = self.model_tester.prepare_config_and_inputs()
_a : Optional[Any] = True
_a : List[Any] = True
_a : str = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
_a : str = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
_a : Tuple = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_a : Any = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
_a : List[str] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_a : int = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCamelCase__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_snake_case = 1e-4
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class UpperCamelCase ( unittest.TestCase ):
@cached_property
def _lowercase ( self : Any ) -> Tuple:
return (
MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" )
if is_vision_available()
else None
)
def _lowercase ( self : Dict ) -> str:
_a : Union[str, Any] = MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(lowerCamelCase__ )
_a : Dict = self.default_image_processor
_a : int = prepare_img()
_a : Any = image_processor(lowerCamelCase__ , return_tensors="""pt""" ).to(lowerCamelCase__ )
_a : int = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 800, 1088) )
with torch.no_grad():
_a : List[Any] = model(**lowerCamelCase__ )
_a : str = torch.tensor(
[[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
_a : Dict = torch.tensor(
[[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
_a : Tuple = torch.tensor(
[[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def _lowercase ( self : str ) -> Tuple:
_a : Tuple = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(lowerCamelCase__ )
.eval()
)
_a : int = self.default_image_processor
_a : Union[str, Any] = prepare_img()
_a : Optional[int] = image_processor(lowerCamelCase__ , return_tensors="""pt""" ).to(lowerCamelCase__ )
_a : Optional[int] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 800, 1088) )
with torch.no_grad():
_a : List[Any] = model(**lowerCamelCase__ )
# masks_queries_logits
_a : Optional[int] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_a : Union[str, Any] = [
[-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3],
[-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5],
[-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2],
]
_a : str = torch.tensor(lowerCamelCase__ ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
# class_queries_logits
_a : str = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_a : Optional[int] = torch.tensor(
[
[1.6512E00, -5.2572E00, -3.3519E00],
[3.6169E-02, -5.9025E00, -2.9313E00],
[1.0766E-04, -7.7630E00, -5.1263E00],
] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def _lowercase ( self : Union[str, Any] ) -> List[Any]:
_a : Optional[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" )
.to(lowerCamelCase__ )
.eval()
)
_a : Tuple = self.default_image_processor
_a : str = prepare_img()
_a : Optional[Any] = image_processor(lowerCamelCase__ , return_tensors="""pt""" ).to(lowerCamelCase__ )
_a : int = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 800, 1088) )
with torch.no_grad():
_a : Optional[Any] = model(**lowerCamelCase__ )
# masks_queries_logits
_a : str = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_a : int = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]]
_a : List[Any] = torch.tensor(lowerCamelCase__ ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
# class_queries_logits
_a : Dict = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_a : List[Any] = torch.tensor(
[[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def _lowercase ( self : Tuple ) -> List[Any]:
_a : List[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(lowerCamelCase__ )
.eval()
)
_a : str = self.default_image_processor
_a : Dict = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="""pt""" , )
_a : Optional[Any] = inputs['''pixel_values'''].to(lowerCamelCase__ )
_a : Optional[Any] = [el.to(lowerCamelCase__ ) for el in inputs['''mask_labels''']]
_a : List[str] = [el.to(lowerCamelCase__ ) for el in inputs['''class_labels''']]
with torch.no_grad():
_a : Optional[Any] = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
| 357
|
"""simple docstring"""
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir('fixtures/test_sentencepiece.model')
_snake_case = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
_snake_case = 'pt' if is_torch_available() else 'tf'
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( snake_case_ , unittest.TestCase ):
UpperCamelCase : str = CamembertTokenizer
UpperCamelCase : List[Any] = CamembertTokenizerFast
UpperCamelCase : Optional[int] = True
UpperCamelCase : Union[str, Any] = True
def _lowercase ( self : List[Any] ) -> Union[str, Any]:
super().setUp()
# We have a SentencePiece fixture for testing
_a : List[Any] = CamembertTokenizer(UpperCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self : List[str] ) -> Tuple:
_a : Optional[Any] = """<pad>"""
_a : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ )
def _lowercase ( self : Union[str, Any] ) -> str:
_a : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(UpperCAmelCase__ ) , 1004 )
def _lowercase ( self : List[str] ) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1005 )
def _lowercase ( self : Union[str, Any] ) -> str:
_a : Tuple = CamembertTokenizer(UpperCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
_a : List[Any] = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
_a : Any = """I was born in 92000, and this is falsé."""
_a : Union[str, Any] = tokenizer.encode(UpperCAmelCase__ )
_a : Dict = rust_tokenizer.encode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Tuple = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
_a : List[Any] = rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
_a : List[str] = tokenizer.convert_ids_to_tokens(UpperCAmelCase__ )
_a : int = rust_tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def _lowercase ( self : Dict ) -> List[str]:
if not self.test_rust_tokenizer:
return
_a : Optional[int] = self.get_tokenizer()
_a : Tuple = self.get_rust_tokenizer()
_a : List[Any] = """I was born in 92000, and this is falsé."""
_a : List[str] = tokenizer.tokenize(UpperCAmelCase__ )
_a : Union[str, Any] = rust_tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
_a : int = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
_a : Optional[int] = rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
_a : int = self.get_rust_tokenizer()
_a : Optional[Any] = tokenizer.encode(UpperCAmelCase__ )
_a : Dict = rust_tokenizer.encode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def _lowercase ( self : Tuple ) -> List[Any]:
# fmt: off
_a : Dict = {"""input_ids""": [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 27575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 22804, 18818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 10326, 24, 2267, 20, 416, 5072, 15612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
_a : Union[str, Any] = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=UpperCAmelCase__ , )
| 324
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['PerceiverFeatureExtractor']
_snake_case = ['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 358
|
"""simple docstring"""
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
_snake_case = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
_snake_case = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
_snake_case = re.compile(r'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
_snake_case = re.compile(r'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_snake_case = re.compile(r'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
_snake_case = [
('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'),
('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'),
('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'),
('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'),
('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'),
('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'),
('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'),
('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'),
('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'),
(
'zero-shot-object-detection',
'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES',
'AutoModelForZeroShotObjectDetection',
),
('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'),
('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'),
('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'),
('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'),
(
'table-question-answering',
'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForTableQuestionAnswering',
),
('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'),
('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'),
(
'next-sentence-prediction',
'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES',
'AutoModelForNextSentencePrediction',
),
(
'audio-frame-classification',
'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForAudioFrameClassification',
),
('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'),
(
'document-question-answering',
'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForDocumentQuestionAnswering',
),
(
'visual-question-answering',
'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForVisualQuestionAnswering',
),
('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'),
(
'zero-shot-image-classification',
'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForZeroShotImageClassification',
),
('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'),
('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'),
('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'),
]
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : Dict = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , UpperCamelCase__ )
return [m.group(0 ) for m in matches]
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : Tuple = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_a : Optional[int] = {
config.replace("""Config""" , """""" ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
_a : List[Any] = collections.defaultdict(UpperCamelCase__ )
_a : List[str] = collections.defaultdict(UpperCamelCase__ )
_a : Tuple = collections.defaultdict(UpperCamelCase__ )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(UpperCamelCase__ ):
_a : str = None
if _re_tf_models.match(UpperCamelCase__ ) is not None:
_a : List[Any] = tf_models
_a : int = _re_tf_models.match(UpperCamelCase__ ).groups()[0]
elif _re_flax_models.match(UpperCamelCase__ ) is not None:
_a : Any = flax_models
_a : Any = _re_flax_models.match(UpperCamelCase__ ).groups()[0]
elif _re_pt_models.match(UpperCamelCase__ ) is not None:
_a : int = pt_models
_a : int = _re_pt_models.match(UpperCamelCase__ ).groups()[0]
if lookup_dict is not None:
while len(UpperCamelCase__ ) > 0:
if attr_name in model_prefix_to_model_type:
_a : Optional[int] = True
break
# Try again after removing the last word in the name
_a : List[Any] = """""".join(camel_case_split(UpperCamelCase__ )[:-1] )
_a : Optional[int] = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
_a : Dict = list(UpperCamelCase__ )
all_models.sort()
_a : str = {"""model_type""": all_models}
_a : List[Any] = [pt_models[t] for t in all_models]
_a : str = [tf_models[t] for t in all_models]
_a : Optional[int] = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
_a : str = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
_a : List[str] = """AutoProcessor"""
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
_a : str = """AutoTokenizer"""
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
_a : int = """AutoFeatureExtractor"""
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
_a : int = """AutoTokenizer"""
_a : Any = [processors[t] for t in all_models]
return pd.DataFrame(UpperCamelCase__ )
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : List[Any] = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
_a : List[Any] = [model_mapping, F"""TF_{model_mapping}""", F"""FLAX_{model_mapping}"""]
_a : Union[str, Any] = [auto_class, F"""TF_{auto_class}""", F"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
# The type of pipeline may not exist in this framework
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ):
continue
# First extract all model_names
_a : str = []
for name in getattr(UpperCamelCase__ , UpperCamelCase__ ).values():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
model_names.append(UpperCamelCase__ )
else:
model_names.extend(list(UpperCamelCase__ ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Dict = get_frameworks_table()
_a : Optional[Any] = Dataset.from_pandas(UpperCamelCase__ )
_a : Any = hf_hub_download(
"""huggingface/transformers-metadata""" , """pipeline_tags.json""" , repo_type="""dataset""" , token=UpperCamelCase__ )
_a : List[Any] = Dataset.from_json(UpperCamelCase__ )
_a : List[str] = {
tags_dataset[i]["""model_class"""]: (tags_dataset[i]["""pipeline_tag"""], tags_dataset[i]["""auto_class"""])
for i in range(len(UpperCamelCase__ ) )
}
_a : str = update_pipeline_and_auto_class_table(UpperCamelCase__ )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
_a : int = sorted(table.keys() )
_a : Union[str, Any] = pd.DataFrame(
{
"""model_class""": model_classes,
"""pipeline_tag""": [table[m][0] for m in model_classes],
"""auto_class""": [table[m][1] for m in model_classes],
} )
_a : Dict = Dataset.from_pandas(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(UpperCamelCase__ , """frameworks.json""" ) )
tags_dataset.to_json(os.path.join(UpperCamelCase__ , """pipeline_tags.json""" ) )
if commit_sha is not None:
_a : List[str] = (
F"""Update with commit {commit_sha}\n\nSee: """
F"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
_a : Optional[Any] = """Update"""
upload_folder(
repo_id="""huggingface/transformers-metadata""" , folder_path=UpperCamelCase__ , repo_type="""dataset""" , token=UpperCamelCase__ , commit_message=UpperCamelCase__ , )
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : List[str] = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
_a : Any = transformers_module.pipelines.SUPPORTED_TASKS
_a : List[str] = []
for key in pipeline_tasks:
if key not in in_table:
_a : Tuple = pipeline_tasks[key]["""pt"""]
if isinstance(UpperCamelCase__ , (list, tuple) ):
_a : Dict = model[0]
_a : List[str] = model.__name__
if model not in in_table.values():
missing.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
_a : Union[str, Any] = """, """.join(UpperCamelCase__ )
raise ValueError(
"""The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside """
F"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.')
parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.')
parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.')
_snake_case = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 324
| 0
|
"""simple docstring"""
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
UpperCamelCase : List[Any] = GPTaTokenizer
UpperCamelCase : int = GPTaTokenizerFast
UpperCamelCase : str = True
UpperCamelCase : Tuple = {'''add_prefix_space''': True}
UpperCamelCase : Optional[int] = False
def _lowercase ( self : List[str] ) -> Union[str, Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_a : Tuple = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
_a : List[Any] = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
_a : List[str] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_a : Optional[int] = {'unk_token': '<unk>'}
_a : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_a : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCAmelCase__ ) )
def _lowercase ( self : Dict , **UpperCAmelCase__ : Optional[Any] ) -> Optional[int]:
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def _lowercase ( self : Any , **UpperCAmelCase__ : Tuple ) -> Optional[Any]:
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : Dict ) -> List[str]:
_a : List[str] = 'lower newer'
_a : str = 'lower newer'
return input_text, output_text
def _lowercase ( self : Any ) -> List[Any]:
_a : int = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_a : Union[str, Any] = 'lower newer'
_a : Optional[int] = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
_a : Optional[int] = tokenizer.tokenize(UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Optional[Any] = tokens + [tokenizer.unk_token]
_a : Dict = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ )
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
if not self.test_rust_tokenizer:
return
_a : List[str] = self.get_tokenizer()
_a : Any = self.get_rust_tokenizer(add_prefix_space=UpperCAmelCase__ )
_a : Any = 'lower newer'
# Testing tokenization
_a : List[str] = tokenizer.tokenize(UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ )
_a : Optional[int] = rust_tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# Testing conversion to ids without special tokens
_a : int = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ )
_a : Any = rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# Testing conversion to ids with special tokens
_a : Union[str, Any] = self.get_rust_tokenizer(add_prefix_space=UpperCAmelCase__ )
_a : Any = tokenizer.encode(UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ )
_a : Tuple = rust_tokenizer.encode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# Testing the unknown token
_a : Dict = tokens + [rust_tokenizer.unk_token]
_a : Optional[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ )
def _lowercase ( self : str , *UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : List[Any] ) -> Optional[Any]:
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def _lowercase ( self : List[str] , UpperCAmelCase__ : Any=15 ) -> Dict:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_a : Dict = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
# Simple input
_a : str = 'This is a simple input'
_a : List[str] = ['This is a simple input 1', 'This is a simple input 2']
_a : Dict = ('This is a simple input', 'This is a pair')
_a : Any = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding="""max_length""" )
# Simple input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding="""max_length""" )
# Simple input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding="""max_length""" , )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding="""max_length""" )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding="""max_length""" )
# Pair input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding="""max_length""" , )
def _lowercase ( self : List[str] ) -> Optional[int]:
_a : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" )
# Simple input
_a : str = 'This is a simple input'
_a : Union[str, Any] = ['This is a simple input looooooooong', 'This is a simple input']
_a : str = ('This is a simple input', 'This is a pair')
_a : Tuple = [
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
_a : List[Any] = tokenizer.pad_token_id
_a : Union[str, Any] = tokenizer(UpperCAmelCase__ , padding="""max_length""" , max_length=30 , return_tensors="""np""" )
_a : Optional[Any] = tokenizer(UpperCAmelCase__ , padding=UpperCAmelCase__ , truncate=UpperCAmelCase__ , return_tensors="""np""" )
_a : Optional[int] = tokenizer(*UpperCAmelCase__ , padding="""max_length""" , max_length=60 , return_tensors="""np""" )
_a : str = tokenizer(UpperCAmelCase__ , padding=UpperCAmelCase__ , truncate=UpperCAmelCase__ , return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def _lowercase ( self : int ) -> List[Any]:
_a : Dict = '$$$'
_a : Optional[int] = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=UpperCAmelCase__ , add_bos_token=UpperCAmelCase__ )
_a : Optional[Any] = 'This is a simple input'
_a : List[Any] = ['This is a simple input 1', 'This is a simple input 2']
_a : Optional[Any] = tokenizer.bos_token_id
_a : Union[str, Any] = tokenizer(UpperCAmelCase__ )
_a : Any = tokenizer(UpperCAmelCase__ )
self.assertEqual(out_s.input_ids[0] , UpperCAmelCase__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_a : Tuple = tokenizer.decode(out_s.input_ids )
_a : List[Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , UpperCAmelCase__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def _lowercase ( self : List[str] ) -> List[Any]:
pass
def _lowercase ( self : str ) -> Tuple:
# TODO: change to self.get_tokenizers() when the fast version is implemented
_a : Any = [self.get_tokenizer(do_lower_case=UpperCAmelCase__ , add_bos_token=UpperCAmelCase__ )]
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
_a : Tuple = 'Encode this.'
_a : Optional[Any] = 'This one too please.'
_a : str = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
encoded_sequence += tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
_a : Any = tokenizer.encode_plus(
UpperCAmelCase__ , UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_special_tokens_mask=UpperCAmelCase__ , )
_a : Any = encoded_sequence_dict['input_ids']
_a : Optional[int] = encoded_sequence_dict['special_tokens_mask']
self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) )
_a : Any = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(UpperCAmelCase__ )
]
_a : Optional[int] = [x for x in filtered_sequence if x is not None]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
@require_tokenizers
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : str ) -> Dict:
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
_a : Dict = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , from_slow=UpperCAmelCase__ )
_a : Optional[int] = 'A photo of a cat'
_a : Optional[int] = tokenizer.encode(
UpperCAmelCase__ , )
self.assertEqual(UpperCAmelCase__ , [2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("""test_opt""" )
_a : str = AutoTokenizer.from_pretrained("""./test_opt""" )
_a : Any = tokenizer.encode(
UpperCAmelCase__ , )
self.assertEqual(UpperCAmelCase__ , [2, 250, 1345, 9, 10, 4758] )
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
_a : Optional[int] = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , use_slow=UpperCAmelCase__ )
_a : Tuple = 'A photo of a cat'
_a : Tuple = tokenizer.encode(
UpperCAmelCase__ , )
# Same as above
self.assertEqual(UpperCAmelCase__ , [2, 250, 1345, 9, 10, 4758] )
@unittest.skip("""This test is failing because of a bug in the fast tokenizer""" )
def _lowercase ( self : Tuple ) -> int:
_a : List[str] = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , from_slow=UpperCAmelCase__ )
_a : List[Any] = 'bos'
_a : Optional[int] = tokenizer.get_vocab()['bos']
_a : Tuple = 'A photo of a cat'
_a : Union[str, Any] = tokenizer.encode(
UpperCAmelCase__ , )
# We changed the bos token
self.assertEqual(UpperCAmelCase__ , [31957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("""./tok""" )
_a : Optional[int] = AutoTokenizer.from_pretrained("""./tok""" )
self.assertTrue(tokenizer.is_fast )
_a : int = tokenizer.encode(
UpperCAmelCase__ , )
self.assertEqual(UpperCAmelCase__ , [31957, 250, 1345, 9, 10, 4758] )
| 359
|
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"""files""" , [
["""full:README.md""", """dataset_infos.json"""],
["""empty:README.md""", """dataset_infos.json"""],
["""dataset_infos.json"""],
["""full:README.md"""],
] , )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Dict = tmp_path_factory.mktemp("""dset_infos_dir""" )
if "full:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""---\ndataset_info:\n dataset_size: 42\n---""" )
if "empty:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""""" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / """dataset_infos.json""" , """w""" ) as f:
f.write("""{\"default\": {\"dataset_size\": 42}}""" )
_a : Dict = DatasetInfosDict.from_directory(UpperCamelCase__ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 4_2
@pytest.mark.parametrize(
"""dataset_info""" , [
DatasetInfo(),
DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=4_2 , ),
] , )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Optional[int] = str(UpperCamelCase__ )
dataset_info.write_to_directory(UpperCamelCase__ )
_a : Any = DatasetInfo.from_directory(UpperCamelCase__ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(UpperCamelCase__ , """dataset_info.json""" ) )
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : Dict = DatasetInfo(
description="""foo""" , citation="""bar""" , homepage="""https://foo.bar""" , license="""CC0""" , features=Features({"""a""": Value("""int32""" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train""", """num_examples""": 4_2}] , download_checksums={} , download_size=1_3_3_7 , post_processing_size=4_4_2 , dataset_size=1_2_3_4 , size_in_bytes=1_3_3_7 + 4_4_2 + 1_2_3_4 , )
_a : int = dataset_info._to_yaml_dict()
assert sorted(UpperCamelCase__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
_a : List[str] = yaml.safe_dump(UpperCamelCase__ )
_a : Optional[int] = yaml.safe_load(UpperCamelCase__ )
assert dataset_info_yaml_dict == reloaded
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : List[Any] = DatasetInfo()
_a : Any = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"""dataset_infos_dict""" , [
DatasetInfosDict(),
DatasetInfosDict({"""default""": DatasetInfo()} ),
DatasetInfosDict({"""my_config_name""": DatasetInfo()} ),
DatasetInfosDict(
{
"""default""": DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=4_2 , )
} ),
DatasetInfosDict(
{
"""v1""": DatasetInfo(dataset_size=4_2 ),
"""v2""": DatasetInfo(dataset_size=1_3_3_7 ),
} ),
] , )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : List[Any] = str(UpperCamelCase__ )
dataset_infos_dict.write_to_directory(UpperCamelCase__ )
_a : List[Any] = DatasetInfosDict.from_directory(UpperCamelCase__ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_a : str = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_a : Dict = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(UpperCamelCase__ , """README.md""" ) )
| 324
| 0
|
"""simple docstring"""
_snake_case = {
'joule': 1.0,
'kilojoule': 1000,
'megajoule': 100_0000,
'gigajoule': 10_0000_0000,
'wattsecond': 1.0,
'watthour': 3600,
'kilowatthour': 360_0000,
'newtonmeter': 1.0,
'calorie_nutr': 4186.8,
'kilocalorie_nutr': 418_6800.00,
'electronvolt': 1.602_176_634e-19,
'britishthermalunit_it': 1055.0_5585,
'footpound': 1.35_58_18,
}
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
_a : Optional[int] = (
F"""Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n"""
F"""Valid values are: {', '.join(a__ )}"""
)
raise ValueError(a__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 360
|
"""simple docstring"""
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class UpperCamelCase ( unittest.TestCase , snake_case_ ):
def _lowercase ( self : int ) -> int:
_a : Optional[Any] = load_tool("""text-to-speech""" )
self.tool.setup()
def _lowercase ( self : List[str] ) -> Union[str, Any]:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
_a : str = self.tool("""hey""" )
_a : List[str] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
def _lowercase ( self : Optional[int] ) -> Optional[Any]:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
_a : int = self.tool("""hey""" )
_a : str = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
| 324
| 0
|
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase :
@staticmethod
def _lowercase ( *UpperCAmelCase__ : str , **UpperCAmelCase__ : Tuple ) -> List[str]:
pass
@is_pipeline_test
@require_vision
@require_torch
class UpperCamelCase ( unittest.TestCase ):
UpperCamelCase : Tuple = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def _lowercase ( self : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] ) -> str:
_a : Tuple = pipeline(
"""zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" )
_a : Optional[int] = [
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
}
]
return object_detector, examples
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : int ) -> Optional[Any]:
_a : Dict = object_detector(examples[0] , threshold=0.0 )
_a : List[str] = len(UpperCAmelCase__ )
self.assertGreater(UpperCAmelCase__ , 0 )
self.assertEqual(
UpperCAmelCase__ , [
{
"""score""": ANY(UpperCAmelCase__ ),
"""label""": ANY(UpperCAmelCase__ ),
"""box""": {"""xmin""": ANY(UpperCAmelCase__ ), """ymin""": ANY(UpperCAmelCase__ ), """xmax""": ANY(UpperCAmelCase__ ), """ymax""": ANY(UpperCAmelCase__ )},
}
for i in range(UpperCAmelCase__ )
] , )
@require_tf
@unittest.skip("""Zero Shot Object Detection not implemented in TF""" )
def _lowercase ( self : Union[str, Any] ) -> int:
pass
@require_torch
def _lowercase ( self : Any ) -> Optional[int]:
_a : Dict = pipeline(
"""zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" )
_a : Optional[Any] = object_detector(
"""./tests/fixtures/tests_samples/COCO/000000039769.png""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=0.6_4 , )
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=4 ) , [
{"""score""": 0.7_2_3_5, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7_2_1_8, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7_1_8_4, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.6_7_4_8, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6_6_5_6, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6_6_1_4, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6_4_5_6, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
{"""score""": 0.6_4_2, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}},
{"""score""": 0.6_4_1_9, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
] , )
_a : Dict = object_detector(
[
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
}
] , threshold=0.6_4 , )
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=4 ) , [
[
{"""score""": 0.7_2_3_5, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7_2_1_8, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7_1_8_4, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.6_7_4_8, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6_6_5_6, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6_6_1_4, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6_4_5_6, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
{"""score""": 0.6_4_2, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}},
{"""score""": 0.6_4_1_9, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
]
] , )
@require_torch
@slow
def _lowercase ( self : Union[str, Any] ) -> Tuple:
_a : Tuple = pipeline("""zero-shot-object-detection""" )
_a : List[str] = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , )
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=4 ) , [
{"""score""": 0.2_8_6_8, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.2_7_7, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2_5_3_7, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.1_4_7_4, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.1_2_0_8, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
] , )
_a : Optional[int] = object_detector(
[
{
"""image""": """http://images.cocodataset.org/val2017/000000039769.jpg""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
},
{
"""image""": """http://images.cocodataset.org/val2017/000000039769.jpg""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
},
] , )
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=4 ) , [
[
{"""score""": 0.2_8_6_8, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.2_7_7, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2_5_3_7, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.1_4_7_4, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.1_2_0_8, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
],
[
{"""score""": 0.2_8_6_8, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.2_7_7, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2_5_3_7, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.1_4_7_4, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.1_2_0_8, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
],
] , )
@require_tf
@unittest.skip("""Zero Shot Object Detection not implemented in TF""" )
def _lowercase ( self : Optional[int] ) -> Union[str, Any]:
pass
@require_torch
@slow
def _lowercase ( self : Optional[Any] ) -> int:
_a : Dict = 0.2
_a : List[Any] = pipeline("""zero-shot-object-detection""" )
_a : Optional[int] = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=UpperCAmelCase__ , )
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=4 ) , [
{"""score""": 0.2_8_6_8, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.2_7_7, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2_5_3_7, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
] , )
@require_torch
@slow
def _lowercase ( self : Any ) -> Dict:
_a : Tuple = 2
_a : int = pipeline("""zero-shot-object-detection""" )
_a : List[Any] = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , top_k=UpperCAmelCase__ , )
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=4 ) , [
{"""score""": 0.2_8_6_8, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.2_7_7, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
] , )
| 361
|
"""simple docstring"""
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class UpperCamelCase ( snake_case_ ):
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[str]=None , **UpperCAmelCase__ : str ) -> int:
_a : str = parent
_a : Union[str, Any] = config_class
_a : List[Any] = has_text_modality
_a : List[Any] = kwargs
_a : List[Any] = common_properties
def _lowercase ( self : int ) -> Tuple:
_a : List[str] = self.config_class(**self.inputs_dict )
_a : Dict = (
["""hidden_size""", """num_attention_heads""", """num_hidden_layers"""]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["""vocab_size"""] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(UpperCAmelCase__ , UpperCAmelCase__ ) , msg=f"""`{prop}` does not exist""" )
# Test that config has the common properties as setter
for idx, name in enumerate(UpperCAmelCase__ ):
try:
setattr(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
self.parent.assertEqual(
getattr(UpperCAmelCase__ , UpperCAmelCase__ ) , UpperCAmelCase__ , msg=f"""`{name} value {idx} expected, but was {getattr(UpperCAmelCase__ , UpperCAmelCase__ )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(UpperCAmelCase__ ):
try:
_a : Optional[int] = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(UpperCAmelCase__ , UpperCAmelCase__ ) , UpperCAmelCase__ , msg=f"""`{name} value {idx} expected, but was {getattr(UpperCAmelCase__ , UpperCAmelCase__ )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def _lowercase ( self : Optional[int] ) -> Optional[Any]:
_a : Optional[Any] = self.config_class(**self.inputs_dict )
_a : List[str] = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , UpperCAmelCase__ )
def _lowercase ( self : int ) -> List[str]:
_a : Optional[Any] = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Tuple = os.path.join(UpperCAmelCase__ , """config.json""" )
config_first.to_json_file(UpperCAmelCase__ )
_a : List[str] = self.config_class.from_json_file(UpperCAmelCase__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _lowercase ( self : Union[str, Any] ) -> Dict:
_a : Dict = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(UpperCAmelCase__ )
_a : Dict = self.config_class.from_pretrained(UpperCAmelCase__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _lowercase ( self : Dict ) -> Tuple:
_a : List[Any] = self.config_class(**self.inputs_dict )
_a : Any = """test"""
with tempfile.TemporaryDirectory() as tmpdirname:
_a : List[Any] = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
config_first.save_pretrained(UpperCAmelCase__ )
_a : List[Any] = self.config_class.from_pretrained(UpperCAmelCase__ , subfolder=UpperCAmelCase__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _lowercase ( self : List[str] ) -> Union[str, Any]:
_a : Tuple = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
_a : Union[str, Any] = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def _lowercase ( self : Tuple ) -> List[str]:
if self.config_class.is_composition:
return
_a : str = self.config_class()
self.parent.assertIsNotNone(UpperCAmelCase__ )
def _lowercase ( self : List[Any] ) -> Optional[Any]:
_a : Dict = copy.deepcopy(UpperCAmelCase__ )
_a : Any = self.config_class(**UpperCAmelCase__ )
_a : str = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("""torch_dtype""", config.torch_dtype, torch.floataa) )
elif getattr(UpperCAmelCase__ , UpperCAmelCase__ ) != value:
wrong_values.append((key, getattr(UpperCAmelCase__ , UpperCAmelCase__ ), value) )
if len(UpperCAmelCase__ ) > 0:
_a : List[Any] = """\n""".join([f"""- {v[0]}: got {v[1]} instead of {v[2]}""" for v in wrong_values] )
raise ValueError(f"""The following keys were not properly set in the config:\n{errors}""" )
def _lowercase ( self : int ) -> Union[str, Any]:
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 324
| 0
|
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : Union[str, Any] ) -> str:
_a : Tuple = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
_a : Tuple = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(_a )
_a : Tuple = -1
_a : int = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_a )
_a : List[str] = model.generate(_a , max_new_tokens=10 , do_sample=_a )
_a : List[Any] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_a : List[str] = TextStreamer(_a )
model.generate(_a , max_new_tokens=10 , do_sample=_a , streamer=_a )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_a : Tuple = cs.out[:-1]
self.assertEqual(_a , _a )
def _lowercase ( self : Any ) -> Union[str, Any]:
_a : Dict = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
_a : Optional[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(_a )
_a : int = -1
_a : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_a )
_a : Tuple = model.generate(_a , max_new_tokens=10 , do_sample=_a )
_a : Any = tokenizer.decode(greedy_ids[0] )
_a : List[str] = TextIteratorStreamer(_a )
_a : List[str] = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
_a : str = Thread(target=model.generate , kwargs=_a )
thread.start()
_a : Optional[Any] = """"""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_a , _a )
def _lowercase ( self : Optional[Any] ) -> List[str]:
_a : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
_a : List[str] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(_a )
_a : Optional[int] = -1
_a : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_a )
_a : Optional[Any] = model.generate(_a , max_new_tokens=10 , do_sample=_a )
_a : Dict = greedy_ids[:, input_ids.shape[1] :]
_a : int = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_a : str = TextStreamer(_a , skip_prompt=_a )
model.generate(_a , max_new_tokens=10 , do_sample=_a , streamer=_a )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_a : Any = cs.out[:-1]
self.assertEqual(_a , _a )
def _lowercase ( self : Optional[int] ) -> Optional[int]:
_a : Tuple = AutoTokenizer.from_pretrained("""distilgpt2""" )
_a : Any = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(_a )
_a : int = -1
_a : int = torch.ones((1, 5) , device=_a ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_a : Union[str, Any] = TextStreamer(_a , skip_special_tokens=_a )
model.generate(_a , max_new_tokens=1 , do_sample=_a , streamer=_a )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_a : str = cs.out[:-1] # Remove the final "\n"
_a : Optional[int] = tokenizer(_a , return_tensors="""pt""" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def _lowercase ( self : List[Any] ) -> Dict:
_a : Optional[int] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
_a : Optional[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(_a )
_a : int = -1
_a : Optional[int] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_a )
_a : Tuple = TextIteratorStreamer(_a , timeout=0.0_0_1 )
_a : Tuple = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
_a : Optional[int] = Thread(target=model.generate , kwargs=_a )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_a ):
_a : Optional[int] = """"""
for new_text in streamer:
streamer_text += new_text
| 362
|
"""simple docstring"""
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
_snake_case = HUGGINGFACE_HUB_CACHE
_snake_case = 'config.json'
_snake_case = 'diffusion_pytorch_model.bin'
_snake_case = 'diffusion_flax_model.msgpack'
_snake_case = 'model.onnx'
_snake_case = 'diffusion_pytorch_model.safetensors'
_snake_case = 'weights.pb'
_snake_case = 'https://huggingface.co'
_snake_case = default_cache_path
_snake_case = 'diffusers_modules'
_snake_case = os.getenv('HF_MODULES_CACHE', os.path.join(hf_cache_home, 'modules'))
_snake_case = ['fp16', 'non-ema']
_snake_case = '.self_attn'
| 324
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'studio-ousia/luke-base': 'https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json',
'studio-ousia/luke-large': 'https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json',
}
class UpperCamelCase ( _A ):
UpperCamelCase : int = '''luke'''
def __init__( self : List[Any] , UpperCAmelCase__ : List[Any]=50267 , UpperCAmelCase__ : str=500000 , UpperCAmelCase__ : Union[str, Any]=768 , UpperCAmelCase__ : Union[str, Any]=256 , UpperCAmelCase__ : Tuple=12 , UpperCAmelCase__ : str=12 , UpperCAmelCase__ : Dict=3072 , UpperCAmelCase__ : Any="gelu" , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : List[str]=512 , UpperCAmelCase__ : List[str]=2 , UpperCAmelCase__ : str=0.0_2 , UpperCAmelCase__ : Tuple=1E-12 , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Any=1 , UpperCAmelCase__ : Optional[int]=0 , UpperCAmelCase__ : Union[str, Any]=2 , **UpperCAmelCase__ : Any , ) -> List[Any]:
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
_a : Tuple = vocab_size
_a : Dict = entity_vocab_size
_a : Optional[Any] = hidden_size
_a : Optional[Any] = entity_emb_size
_a : str = num_hidden_layers
_a : List[Any] = num_attention_heads
_a : Optional[Any] = hidden_act
_a : Any = intermediate_size
_a : Optional[Any] = hidden_dropout_prob
_a : Tuple = attention_probs_dropout_prob
_a : Dict = max_position_embeddings
_a : int = type_vocab_size
_a : List[str] = initializer_range
_a : Union[str, Any] = layer_norm_eps
_a : List[Any] = use_entity_aware_attention
_a : Any = classifier_dropout
| 363
|
"""simple docstring"""
from math import factorial
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if successes > trials:
raise ValueError("""successes must be lower or equal to trials""" )
if trials < 0 or successes < 0:
raise ValueError("""the function is defined for non-negative integers""" )
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError("""the function is defined for non-negative integers""" )
if not 0 < prob < 1:
raise ValueError("""prob has to be in range of 1 - 0""" )
_a : Optional[int] = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
_a : Optional[int] = float(factorial(UpperCamelCase__ ) )
coefficient /= factorial(UpperCamelCase__ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('Probability of 2 successes out of 4 trails')
print('with probability of 0.75 is:', end=' ')
print(binomial_distribution(2, 4, 0.75))
| 324
| 0
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__=False ):
'''simple docstring'''
_a : Tuple = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""module.blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""module.blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("""module.cls_token""", """vit.embeddings.cls_token"""),
("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""module.pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""module.norm.weight""", """layernorm.weight"""),
("""module.norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_a : Tuple = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_a : Any = ""
else:
_a : List[str] = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_a : List[str] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.weight""" )
_a : str = state_dict.pop(F"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_a : Optional[int] = in_proj_weight[
: config.hidden_size, :
]
_a : int = in_proj_bias[: config.hidden_size]
_a : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_a : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_a : int = in_proj_weight[
-config.hidden_size :, :
]
_a : Dict = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : Union[str, Any] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(__snake_case , __snake_case )
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
_a : int = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(__snake_case , __snake_case )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : int = dct.pop(__snake_case )
_a : Optional[Any] = val
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : int = ViTMSNConfig()
_a : int = 1_0_0_0
_a : Dict = "datasets/huggingface/label-files"
_a : List[str] = "imagenet-1k-id2label.json"
_a : Optional[Any] = json.load(open(hf_hub_download(__snake_case , __snake_case ) , """r""" ) )
_a : str = {int(__snake_case ): v for k, v in idalabel.items()}
_a : str = idalabel
_a : List[Any] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
_a : List[str] = 3_8_4
_a : str = 1_5_3_6
_a : Dict = 6
elif "l16" in checkpoint_url:
_a : Optional[int] = 1_0_2_4
_a : List[str] = 4_0_9_6
_a : List[str] = 2_4
_a : Any = 1_6
_a : Optional[int] = 0.1
elif "b4" in checkpoint_url:
_a : List[str] = 4
elif "l7" in checkpoint_url:
_a : Dict = 7
_a : int = 1_0_2_4
_a : List[Any] = 4_0_9_6
_a : List[str] = 2_4
_a : Optional[int] = 1_6
_a : Union[str, Any] = 0.1
_a : Optional[Any] = ViTMSNModel(__snake_case )
_a : Union[str, Any] = torch.hub.load_state_dict_from_url(__snake_case , map_location="""cpu""" )["target_encoder"]
_a : List[str] = ViTImageProcessor(size=config.image_size )
remove_projection_head(__snake_case )
_a : Optional[int] = create_rename_keys(__snake_case , base_model=__snake_case )
for src, dest in rename_keys:
rename_key(__snake_case , __snake_case , __snake_case )
read_in_q_k_v(__snake_case , __snake_case , base_model=__snake_case )
model.load_state_dict(__snake_case )
model.eval()
_a : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_a : Dict = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
_a : List[Any] = ViTImageProcessor(
size=config.image_size , image_mean=__snake_case , image_std=__snake_case )
_a : str = image_processor(images=__snake_case , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
_a : Dict = model(**__snake_case )
_a : Dict = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
_a : str = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] )
elif "b16" in checkpoint_url:
_a : int = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] )
elif "l16" in checkpoint_url:
_a : int = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] )
elif "b4" in checkpoint_url:
_a : Any = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] )
else:
_a : str = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , __snake_case , atol=1e-4 )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__snake_case )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__snake_case )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_snake_case = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 364
|
"""simple docstring"""
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a , _a : Dict = len(UpperCamelCase__ ), len(grid[0] )
if (
min(UpperCamelCase__ , UpperCamelCase__ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
_a : Any = 0
count += depth_first_search(UpperCamelCase__ , row + 1 , UpperCamelCase__ , UpperCamelCase__ )
count += depth_first_search(UpperCamelCase__ , row - 1 , UpperCamelCase__ , UpperCamelCase__ )
count += depth_first_search(UpperCamelCase__ , UpperCamelCase__ , col + 1 , UpperCamelCase__ )
count += depth_first_search(UpperCamelCase__ , UpperCamelCase__ , col - 1 , UpperCamelCase__ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 324
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {'vocab_file': 'spiece.model'}
_snake_case = {
'vocab_file': {
'bert_for_seq_generation': (
'https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'
),
}
}
_snake_case = {'bert_for_seq_generation': 512}
class UpperCamelCase ( __snake_case ):
UpperCamelCase : Dict = VOCAB_FILES_NAMES
UpperCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : List[int] = []
UpperCamelCase : Tuple = ["input_ids", "attention_mask"]
def __init__( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any]="<s>" , UpperCAmelCase__ : Any="</s>" , UpperCAmelCase__ : Tuple="<unk>" , UpperCAmelCase__ : Any="<pad>" , UpperCAmelCase__ : Union[str, Any]="<::::>" , UpperCAmelCase__ : Optional[Dict[str, Any]] = None , **UpperCAmelCase__ : Tuple , ) -> None:
_a : Any = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase_ , )
_a : Tuple = vocab_file
_a : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase_ )
@property
def _lowercase ( self : Dict ) -> Any:
return self.sp_model.get_piece_size()
def _lowercase ( self : int ) -> Optional[int]:
_a : List[Any] = {self.convert_ids_to_tokens(lowerCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ) -> Dict:
_a : Dict = self.__dict__.copy()
_a : Optional[int] = None
return state
def __setstate__( self : Dict , UpperCAmelCase__ : List[Any] ) -> Tuple:
_a : Optional[int] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_a : Union[str, Any] = {}
_a : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowercase ( self : Any , UpperCAmelCase__ : str ) -> List[str]:
return self.sp_model.encode(lowerCamelCase_ , out_type=lowerCamelCase_ )
def _lowercase ( self : Dict , UpperCAmelCase__ : Optional[int] ) -> Optional[int]:
return self.sp_model.piece_to_id(lowerCamelCase_ )
def _lowercase ( self : int , UpperCAmelCase__ : int ) -> int:
_a : int = self.sp_model.IdToPiece(lowerCamelCase_ )
return token
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : Tuple ) -> Dict:
_a : Any = []
_a : Any = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCamelCase_ ) + token
_a : List[Any] = []
else:
current_sub_tokens.append(lowerCamelCase_ )
out_string += self.sp_model.decode(lowerCamelCase_ )
return out_string.strip()
def _lowercase ( self : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a : Dict = os.path.join(
lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase_ , """wb""" ) as fi:
_a : int = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase_ )
return (out_vocab_file,)
| 365
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_snake_case = {
'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['VisionEncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['TFVisionEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['FlaxVisionEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 324
| 0
|
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if (ksize % 2) == 0:
_a : Optional[Any] = ksize + 1
_a : Tuple = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(SCREAMING_SNAKE_CASE_ ):
for x in range(SCREAMING_SNAKE_CASE_ ):
# distance from center
_a : Union[str, Any] = x - ksize // 2
_a : List[str] = y - ksize // 2
# degree to radiant
_a : Optional[Any] = theta / 1_8_0 * np.pi
_a : List[Any] = np.cos(_theta )
_a : Any = np.sin(_theta )
# get kernel x
_a : Union[str, Any] = cos_theta * px + sin_theta * py
# get kernel y
_a : str = -sin_theta * px + cos_theta * py
# fill kernel
_a : Tuple = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_snake_case = imread('../image_data/lena.jpg')
# turn image in gray scale value
_snake_case = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_snake_case = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
_snake_case = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_snake_case = out / out.max() * 255
_snake_case = out.astype(np.uinta)
imshow('Original', gray)
imshow('Gabor filter with 20x20 mask and 6 directions', out)
waitKey(0)
| 366
|
"""simple docstring"""
from __future__ import annotations
import time
_snake_case = list[tuple[int, int]]
_snake_case = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_snake_case = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class UpperCamelCase :
def __init__( self : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Node | None ) -> List[str]:
_a : int = pos_x
_a : Union[str, Any] = pos_y
_a : Tuple = (pos_y, pos_x)
_a : Tuple = goal_x
_a : int = goal_y
_a : str = parent
class UpperCamelCase :
def __init__( self : List[Any] , UpperCAmelCase__ : tuple[int, int] , UpperCAmelCase__ : tuple[int, int] ) -> List[str]:
_a : List[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , UpperCAmelCase__ )
_a : List[str] = Node(goal[1] , goal[0] , goal[1] , goal[0] , UpperCAmelCase__ )
_a : Optional[int] = [self.start]
_a : Tuple = False
def _lowercase ( self : str ) -> Path | None:
while self.node_queue:
_a : Tuple = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
_a : Dict = True
return self.retrace_path(UpperCAmelCase__ )
_a : Tuple = self.get_successors(UpperCAmelCase__ )
for node in successors:
self.node_queue.append(UpperCAmelCase__ )
if not self.reached:
return [self.start.pos]
return None
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : Node ) -> list[Node]:
_a : Optional[Any] = []
for action in delta:
_a : str = parent.pos_x + action[1]
_a : List[Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(UpperCAmelCase__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(UpperCAmelCase__ , UpperCAmelCase__ , self.target.pos_y , self.target.pos_x , UpperCAmelCase__ ) )
return successors
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Node | None ) -> Path:
_a : Dict = node
_a : List[str] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_a : Any = current_node.parent
path.reverse()
return path
class UpperCamelCase :
def __init__( self : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] ) -> Any:
_a : Dict = BreadthFirstSearch(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Optional[int] = BreadthFirstSearch(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Dict = False
def _lowercase ( self : Any ) -> Path | None:
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
_a : List[Any] = self.fwd_bfs.node_queue.pop(0 )
_a : Union[str, Any] = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
_a : Optional[int] = True
return self.retrace_bidirectional_path(
UpperCAmelCase__ , UpperCAmelCase__ )
_a : List[str] = current_bwd_node
_a : int = current_fwd_node
_a : Optional[Any] = {
self.fwd_bfs: self.fwd_bfs.get_successors(UpperCAmelCase__ ),
self.bwd_bfs: self.bwd_bfs.get_successors(UpperCAmelCase__ ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(UpperCAmelCase__ )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : Node , UpperCAmelCase__ : Node ) -> Path:
_a : str = self.fwd_bfs.retrace_path(UpperCAmelCase__ )
_a : List[Any] = self.bwd_bfs.retrace_path(UpperCAmelCase__ )
bwd_path.pop()
bwd_path.reverse()
_a : Tuple = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
_snake_case = (0, 0)
_snake_case = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_snake_case = time.time()
_snake_case = BreadthFirstSearch(init, goal)
_snake_case = bfs.search()
_snake_case = time.time() - start_bfs_time
print('Unidirectional BFS computation time : ', bfs_time)
_snake_case = time.time()
_snake_case = BidirectionalBreadthFirstSearch(init, goal)
_snake_case = bd_bfs.search()
_snake_case = time.time() - start_bd_bfs_time
print('Bidirectional BFS computation time : ', bd_bfs_time)
| 324
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
_snake_case = None
_snake_case = logging.get_logger(__name__)
_snake_case = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
_snake_case = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/tokenizer.json',
},
}
_snake_case = {
'camembert-base': 512,
}
_snake_case = '▁'
class UpperCamelCase ( lowerCamelCase_ ):
UpperCamelCase : str = VOCAB_FILES_NAMES
UpperCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : Optional[int] = ["""input_ids""", """attention_mask"""]
UpperCamelCase : Tuple = CamembertTokenizer
def __init__( self : Any , UpperCAmelCase__ : str=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : int="<s>" , UpperCAmelCase__ : Optional[int]="</s>" , UpperCAmelCase__ : Optional[int]="</s>" , UpperCAmelCase__ : Any="<s>" , UpperCAmelCase__ : List[str]="<unk>" , UpperCAmelCase__ : Dict="<pad>" , UpperCAmelCase__ : str="<mask>" , UpperCAmelCase__ : Any=["<s>NOTUSED", "</s>NOTUSED"] , **UpperCAmelCase__ : Optional[int] , ) -> Dict:
_a : Optional[Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
_a : Tuple = vocab_file
_a : Tuple = False if not self.vocab_file else True
def _lowercase ( self : Tuple , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> int:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_a : List[Any] = [self.cls_token_id]
_a : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase ( self : List[Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> Any:
_a : Union[str, Any] = [self.sep_token_id]
_a : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Optional[Any]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a : Dict = os.path.join(
_UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ):
copyfile(self.vocab_file , _UpperCAmelCase )
return (out_vocab_file,)
| 367
|
"""simple docstring"""
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_snake_case = logging.getLogger(__name__)
_snake_case = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
_snake_case = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCamelCase :
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Leave None if you want to train a model from'''
''' scratch.'''
)
} , )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(snake_case_ )} , )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class UpperCamelCase :
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''The input training data file (a text file).'''} )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={
'''help''': (
'''The input training data files (multiple files in glob format). '''
'''Very often splitting large files to smaller files can prevent tokenizer going out of memory'''
)
} , )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''An optional input train ref data file for whole word mask in Chinese.'''} , )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''An optional input eval ref data file for whole word mask in Chinese.'''} , )
UpperCamelCase : bool = field(
default=snake_case_ , metadata={'''help''': '''Whether distinct lines of text in the dataset are to be handled as distinct sequences.'''} , )
UpperCamelCase : bool = field(
default=snake_case_ , metadata={'''help''': '''Train with masked-language modeling loss instead of language modeling.'''} )
UpperCamelCase : bool = field(default=snake_case_ , metadata={'''help''': '''Whether ot not to use whole word mask.'''} )
UpperCamelCase : float = field(
default=0.1_5 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
UpperCamelCase : float = field(
default=1 / 6 , metadata={
'''help''': (
'''Ratio of length of a span of masked tokens to surrounding context length for permutation language'''
''' modeling.'''
)
} , )
UpperCamelCase : int = field(
default=5 , metadata={'''help''': '''Maximum length of a span of masked tokens for permutation language modeling.'''} )
UpperCamelCase : int = field(
default=-1 , metadata={
'''help''': (
'''Optional input sequence length after tokenization.'''
'''The training dataset will be truncated in block of this size for training.'''
'''Default to the model max input length for single sentence inputs (take into account special tokens).'''
)
} , )
UpperCamelCase : bool = field(
default=snake_case_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False , UpperCamelCase__ = None , ):
'''simple docstring'''
def _dataset(UpperCamelCase__ , UpperCamelCase__=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("""You need to set world whole masking and mlm to True for Chinese Whole Word Mask""" )
return LineByLineWithRefDataset(
tokenizer=UpperCamelCase__ , file_path=UpperCamelCase__ , block_size=args.block_size , ref_path=UpperCamelCase__ , )
return LineByLineTextDataset(tokenizer=UpperCamelCase__ , file_path=UpperCamelCase__ , block_size=args.block_size )
else:
return TextDataset(
tokenizer=UpperCamelCase__ , file_path=UpperCamelCase__ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=UpperCamelCase__ , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(UpperCamelCase__ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def lowerCAmelCase__ ( ):
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_a : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_a , _a , _a : List[str] = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"""Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file """
"""or remove the --do_eval argument.""" )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , UpperCamelCase__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
_a : str = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
_a : Union[str, Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
_a : str = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.tokenizer_name:
_a : List[str] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
_a : Union[str, Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"""
""" script, save it,and load it from here, using --tokenizer_name""" )
if model_args.model_name_or_path:
_a : Optional[Any] = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , )
else:
logger.info("""Training new model from scratch""" )
_a : List[Any] = AutoModelWithLMHead.from_config(UpperCamelCase__ )
model.resize_token_embeddings(len(UpperCamelCase__ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"""BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"""
"""--mlm flag (masked language modeling).""" )
if data_args.block_size <= 0:
_a : int = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
_a : Optional[Any] = min(data_args.block_size , tokenizer.max_len )
# Get datasets
_a : Optional[Any] = (
get_dataset(UpperCamelCase__ , tokenizer=UpperCamelCase__ , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
_a : Optional[int] = (
get_dataset(UpperCamelCase__ , tokenizer=UpperCamelCase__ , evaluate=UpperCamelCase__ , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
_a : Any = DataCollatorForPermutationLanguageModeling(
tokenizer=UpperCamelCase__ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
_a : Union[str, Any] = DataCollatorForWholeWordMask(
tokenizer=UpperCamelCase__ , mlm_probability=data_args.mlm_probability )
else:
_a : str = DataCollatorForLanguageModeling(
tokenizer=UpperCamelCase__ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
_a : Union[str, Any] = Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , data_collator=UpperCamelCase__ , train_dataset=UpperCamelCase__ , eval_dataset=UpperCamelCase__ , prediction_loss_only=UpperCamelCase__ , )
# Training
if training_args.do_train:
_a : Optional[Any] = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=UpperCamelCase__ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_a : Union[str, Any] = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_a : int = trainer.evaluate()
_a : Dict = math.exp(eval_output["""eval_loss"""] )
_a : Union[str, Any] = {"""perplexity""": perplexity}
_a : Optional[Any] = os.path.join(training_args.output_dir , """eval_results_lm.txt""" )
if trainer.is_world_master():
with open(UpperCamelCase__ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""" , UpperCamelCase__ , str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
results.update(UpperCamelCase__ )
return results
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 324
| 0
|
"""simple docstring"""
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
_snake_case = 637_8137.0
_snake_case = 635_6752.31_4245
_snake_case = 637_8137
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Tuple = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
_a : Dict = atan((1 - flattening) * tan(radians(UpperCamelCase__ ) ) )
_a : int = atan((1 - flattening) * tan(radians(UpperCamelCase__ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
_a : Tuple = haversine_distance(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
_a : List[str] = (b_lata + b_lata) / 2
_a : Tuple = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
_a : str = (sin(UpperCamelCase__ ) ** 2) * (cos(UpperCamelCase__ ) ** 2)
_a : Tuple = cos(sigma / 2 ) ** 2
_a : Optional[int] = (sigma - sin(UpperCamelCase__ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
_a : Union[str, Any] = (cos(UpperCamelCase__ ) ** 2) * (sin(UpperCamelCase__ ) ** 2)
_a : List[str] = sin(sigma / 2 ) ** 2
_a : str = (sigma + sin(UpperCamelCase__ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 368
|
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
_snake_case = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['memory_attention', 'encoder_attn'],
['attention', 'attn'],
['/', '.'],
['.LayerNorm.gamma', '_layer_norm.weight'],
['.LayerNorm.beta', '_layer_norm.bias'],
['r.layer_', 'r.layers.'],
['output_proj', 'out_proj'],
['ffn.dense_1.', 'fc2.'],
['ffn.dense.', 'fc1.'],
['ffn_layer_norm', 'final_layer_norm'],
['kernel', 'weight'],
['encoder_layer_norm.', 'encoder.layer_norm.'],
['decoder_layer_norm.', 'decoder.layer_norm.'],
['embeddings.weights', 'shared.weight'],
]
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
_a : Optional[Any] = k.replace(UpperCamelCase__ , UpperCamelCase__ )
return k
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Union[str, Any] = DEFAULTS.copy()
cfg_kwargs.update(UpperCamelCase__ )
_a : Optional[Any] = PegasusConfig(**UpperCamelCase__ )
_a : Tuple = PegasusForConditionalGeneration(UpperCamelCase__ )
_a : str = torch_model.model.state_dict()
_a : Union[str, Any] = {}
for k, v in tf_weights.items():
_a : Any = rename_state_dict_key(UpperCamelCase__ )
if new_k not in sd:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
_a : str = v.T
_a : int = torch.tensor(UpperCamelCase__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
_a : Union[str, Any] = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
_a : str = mapping["""shared.weight"""]
_a : Union[str, Any] = mapping["""shared.weight"""]
_a : Optional[Any] = {k: torch.zeros_like(UpperCamelCase__ ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**UpperCamelCase__ )
_a , _a : int = torch_model.model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
_a : Optional[Any] = [
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def lowerCAmelCase__ ( UpperCamelCase__="./ckpt/aeslc/model.ckpt-32000" ):
'''simple docstring'''
_a : List[Any] = tf.train.list_variables(UpperCamelCase__ )
_a : Optional[int] = {}
_a : Dict = ["""Adafactor""", """global_step"""]
for name, shape in tqdm(UpperCamelCase__ , desc="""converting tf checkpoint to dict""" ):
_a : Optional[Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
_a : str = tf.train.load_variable(UpperCamelCase__ , UpperCamelCase__ )
_a : int = array
return tf_weights
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
# save tokenizer first
_a : Dict = Path(UpperCamelCase__ ).parent.name
_a : Optional[Any] = task_specific_params[F"""summarization_{dataset}"""]["""max_position_embeddings"""]
_a : Tuple = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=UpperCamelCase__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(UpperCamelCase__ )
# convert model
_a : List[Any] = get_tf_weights_as_numpy(UpperCamelCase__ )
_a : Dict = task_specific_params[F"""summarization_{dataset}"""]
if dataset == "large":
_a : Tuple = task_specific_params
_a : Optional[int] = convert_pegasus(UpperCamelCase__ , UpperCamelCase__ )
torch_model.save_pretrained(UpperCamelCase__ )
_a : Dict = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(UpperCamelCase__ , Path(UpperCamelCase__ ) / """pytorch_model.bin""" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument('tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('save_dir', default=None, type=str, help='Path to the output PyTorch model.')
_snake_case = parser.parse_args()
if args.save_dir is None:
_snake_case = Path(args.tf_ckpt_path).parent.name
_snake_case = os.path.join('pegasus', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 324
| 0
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {'configuration_timm_backbone': ['TimmBackboneConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['TimmBackbone']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 369
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
UpperCamelCase : Union[str, Any] = StableDiffusionXLImgaImgPipeline
UpperCamelCase : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
UpperCamelCase : Tuple = PipelineTesterMixin.required_optional_params - {'''latents'''}
UpperCamelCase : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _lowercase ( self : Any ) -> List[Any]:
torch.manual_seed(0 )
_a : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=UpperCAmelCase__ , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
_a : Union[str, Any] = EulerDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , )
torch.manual_seed(0 )
_a : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_a : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=32 , )
_a : Tuple = CLIPTextModel(UpperCAmelCase__ )
_a : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=UpperCAmelCase__ )
_a : Dict = CLIPTextModelWithProjection(UpperCAmelCase__ )
_a : Dict = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=UpperCAmelCase__ )
_a : Any = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int=0 ) -> int:
_a : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
_a : Any = image / 2 + 0.5
if str(UpperCAmelCase__ ).startswith("""mps""" ):
_a : Any = torch.manual_seed(UpperCAmelCase__ )
else:
_a : Tuple = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
_a : Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.7_5,
}
return inputs
def _lowercase ( self : Any ) -> List[Any]:
_a : Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_a : Dict = self.get_dummy_components()
_a : List[Any] = StableDiffusionXLImgaImgPipeline(**UpperCAmelCase__ )
_a : Union[str, Any] = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_a : List[str] = self.get_dummy_inputs(UpperCAmelCase__ )
_a : List[str] = sd_pipe(**UpperCAmelCase__ ).images
_a : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_a : List[str] = np.array([0.4_6_5_6, 0.4_8_4_0, 0.4_4_3_9, 0.6_6_9_8, 0.5_5_7_4, 0.4_5_2_4, 0.5_7_9_9, 0.5_9_4_3, 0.5_1_6_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _lowercase ( self : Any ) -> Any:
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def _lowercase ( self : List[Any] ) -> Optional[Any]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def _lowercase ( self : Any ) -> Any:
pass
def _lowercase ( self : Tuple ) -> Union[str, Any]:
_a : int = self.get_dummy_components()
_a : Any = StableDiffusionXLImgaImgPipeline(**UpperCAmelCase__ )
_a : Dict = sd_pipe.to(UpperCAmelCase__ )
_a : List[str] = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
# forward without prompt embeds
_a : int = self.get_dummy_inputs(UpperCAmelCase__ )
_a : List[str] = 3 * ["""this is a negative prompt"""]
_a : Dict = negative_prompt
_a : Dict = 3 * [inputs["""prompt"""]]
_a : Optional[Any] = sd_pipe(**UpperCAmelCase__ )
_a : Tuple = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
_a : int = self.get_dummy_inputs(UpperCAmelCase__ )
_a : Union[str, Any] = 3 * ["""this is a negative prompt"""]
_a : int = 3 * [inputs.pop("""prompt""" )]
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) : List[str] = sd_pipe.encode_prompt(UpperCAmelCase__ , negative_prompt=UpperCAmelCase__ )
_a : Tuple = sd_pipe(
**UpperCAmelCase__ , prompt_embeds=UpperCAmelCase__ , negative_prompt_embeds=UpperCAmelCase__ , pooled_prompt_embeds=UpperCAmelCase__ , negative_pooled_prompt_embeds=UpperCAmelCase__ , )
_a : Dict = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : List[str] ) -> Union[str, Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : str="cpu" , UpperCAmelCase__ : str=torch.floataa , UpperCAmelCase__ : List[Any]=0 ) -> List[str]:
_a : List[str] = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
_a : Union[str, Any] = np.random.RandomState(UpperCAmelCase__ ).standard_normal((1, 4, 64, 64) )
_a : List[Any] = torch.from_numpy(UpperCAmelCase__ ).to(device=UpperCAmelCase__ , dtype=UpperCAmelCase__ )
_a : Any = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _lowercase ( self : int ) -> Union[str, Any]:
_a : Union[str, Any] = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_a : List[str] = self.get_inputs(UpperCAmelCase__ )
_a : Tuple = pipe(**UpperCAmelCase__ ).images
_a : List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_a : int = np.array([0.4_9_4_9_3, 0.4_7_8_9_6, 0.4_0_7_9_8, 0.5_4_2_1_4, 0.5_3_2_1_2, 0.4_8_2_0_2, 0.4_7_6_5_6, 0.4_6_3_2_9, 0.4_8_5_0_6] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 324
| 0
|
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : Optional[int] = torch.exp(_lowerCamelCase )
_a : Dict = torch.sum(_lowerCamelCase , dim=1 ) # sum of exp(x_i)
_a : List[Any] = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_lowerCamelCase ) - B / A
class UpperCamelCase ( nn.Module ):
def __init__( self : List[str] , UpperCAmelCase__ : str ) -> Optional[Any]:
super().__init__()
_a : Tuple = config.output_attentions
_a : List[Any] = config.output_hidden_states
_a : str = nn.ModuleList([BertLayer(UpperCAmelCase__ ) for _ in range(config.num_hidden_layers )] )
_a : Any = nn.ModuleList([BertHighway(UpperCAmelCase__ ) for _ in range(config.num_hidden_layers )] )
_a : int = [-1 for _ in range(config.num_hidden_layers )]
def _lowercase ( self : str , UpperCAmelCase__ : int ) -> Optional[int]:
if (type(UpperCAmelCase__ ) is float) or (type(UpperCAmelCase__ ) is int):
for i in range(len(self.early_exit_entropy ) ):
_a : int = x
else:
_a : Union[str, Any] = x
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : Union[str, Any] ) -> Optional[Any]:
_a : int = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def _lowercase ( self : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : str=None , UpperCAmelCase__ : str=None , ) -> List[str]:
_a : str = ()
_a : List[str] = ()
_a : Tuple = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
_a : List[Any] = all_hidden_states + (hidden_states,)
_a : str = layer_module(
UpperCAmelCase__ , UpperCAmelCase__ , head_mask[i] , UpperCAmelCase__ , UpperCAmelCase__ )
_a : str = layer_outputs[0]
if self.output_attentions:
_a : Dict = all_attentions + (layer_outputs[1],)
_a : int = (hidden_states,)
if self.output_hidden_states:
_a : Dict = current_outputs + (all_hidden_states,)
if self.output_attentions:
_a : Union[str, Any] = current_outputs + (all_attentions,)
_a : Optional[int] = self.highway[i](UpperCAmelCase__ )
# logits, pooled_output
if not self.training:
_a : Dict = highway_exit[0]
_a : str = entropy(UpperCAmelCase__ )
_a : List[Any] = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
_a : str = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
_a : Any = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(UpperCAmelCase__ , i + 1 )
else:
_a : Optional[Any] = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
_a : str = all_hidden_states + (hidden_states,)
_a : Union[str, Any] = (hidden_states,)
if self.output_hidden_states:
_a : Any = outputs + (all_hidden_states,)
if self.output_attentions:
_a : Tuple = outputs + (all_attentions,)
_a : Any = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'''The Bert Model transformer with early exiting (DeeBERT). ''' , a__ , )
class UpperCamelCase ( a__ ):
def __init__( self : int , UpperCAmelCase__ : Dict ) -> Optional[int]:
super().__init__(UpperCAmelCase__ )
_a : Optional[int] = config
_a : Dict = BertEmbeddings(UpperCAmelCase__ )
_a : Any = DeeBertEncoder(UpperCAmelCase__ )
_a : List[Any] = BertPooler(UpperCAmelCase__ )
self.init_weights()
def _lowercase ( self : Optional[Any] ) -> List[Any]:
self.encoder.init_highway_pooler(self.pooler )
def _lowercase ( self : List[str] ) -> Tuple:
return self.embeddings.word_embeddings
def _lowercase ( self : Dict , UpperCAmelCase__ : Tuple ) -> Dict:
_a : Union[str, Any] = value
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : Any ) -> Any:
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(UpperCAmelCase__ )
@add_start_docstrings_to_model_forward(UpperCAmelCase__ )
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : Union[str, Any]=None , ) -> int:
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
_a : Optional[int] = input_ids.size()
elif inputs_embeds is not None:
_a : str = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
_a : Tuple = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_a : Optional[Any] = torch.ones(UpperCAmelCase__ , device=UpperCAmelCase__ )
if encoder_attention_mask is None:
_a : Optional[int] = torch.ones(UpperCAmelCase__ , device=UpperCAmelCase__ )
if token_type_ids is None:
_a : Dict = torch.zeros(UpperCAmelCase__ , dtype=torch.long , device=UpperCAmelCase__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_a : torch.Tensor = self.get_extended_attention_mask(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
_a : Optional[int] = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
_a : Optional[Any] = encoder_attention_mask[:, None, None, :]
_a : List[Any] = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
_a : Dict = (1.0 - encoder_extended_attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_a : int = self.get_head_mask(UpperCAmelCase__ , self.config.num_hidden_layers )
_a : str = self.embeddings(
input_ids=UpperCAmelCase__ , position_ids=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , inputs_embeds=UpperCAmelCase__ )
_a : Optional[Any] = self.encoder(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , head_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , )
_a : str = encoder_outputs[0]
_a : Tuple = self.pooler(UpperCAmelCase__ )
_a : Tuple = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class UpperCamelCase ( a__ ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] ) -> Optional[Any]:
_a : str = message
_a : Dict = exit_layer # start from 1!
class UpperCamelCase ( nn.Module ):
def __init__( self : str , UpperCAmelCase__ : int ) -> Tuple:
super().__init__()
_a : Union[str, Any] = BertPooler(UpperCAmelCase__ )
_a : Tuple = nn.Dropout(config.hidden_dropout_prob )
_a : str = nn.Linear(config.hidden_size , config.num_labels )
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : Optional[int] ) -> Dict:
# Pooler
_a : Optional[Any] = encoder_outputs[0]
_a : int = self.pooler(UpperCAmelCase__ )
# "return" pooler_output
# BertModel
_a : Union[str, Any] = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
_a : Union[str, Any] = bmodel_output[1]
_a : int = self.dropout(UpperCAmelCase__ )
_a : Dict = self.classifier(UpperCAmelCase__ )
return logits, pooled_output
@add_start_docstrings(
'''Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. ''' , a__ , )
class UpperCamelCase ( a__ ):
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] ) -> Union[str, Any]:
super().__init__(UpperCAmelCase__ )
_a : str = config.num_labels
_a : List[Any] = config.num_hidden_layers
_a : Union[str, Any] = DeeBertModel(UpperCAmelCase__ )
_a : List[Any] = nn.Dropout(config.hidden_dropout_prob )
_a : str = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCAmelCase__ )
def _lowercase ( self : List[str] , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : int=-1 , UpperCAmelCase__ : Optional[int]=False , ) -> List[Any]:
_a : int = self.num_layers
try:
_a : Optional[Any] = self.bert(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , position_ids=UpperCAmelCase__ , head_mask=UpperCAmelCase__ , inputs_embeds=UpperCAmelCase__ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
_a : Any = outputs[1]
_a : Tuple = self.dropout(UpperCAmelCase__ )
_a : Any = self.classifier(UpperCAmelCase__ )
_a : Dict = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_a : List[Any] = e.message
_a : Optional[int] = e.exit_layer
_a : Tuple = outputs[0]
if not self.training:
_a : Optional[int] = entropy(UpperCAmelCase__ )
_a : Any = []
_a : Optional[Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_a : Union[str, Any] = MSELoss()
_a : Union[str, Any] = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_a : Any = CrossEntropyLoss()
_a : Optional[int] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_a : int = []
for highway_exit in outputs[-1]:
_a : List[Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCAmelCase__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_a : int = MSELoss()
_a : Any = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_a : Dict = CrossEntropyLoss()
_a : List[str] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCAmelCase__ )
if train_highway:
_a : Union[str, Any] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_a : Any = (loss,) + outputs
if not self.training:
_a : Any = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_a : int = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 370
|
"""simple docstring"""
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger()
@dataclass
class UpperCamelCase :
UpperCamelCase : nn.Module
UpperCamelCase : List[nn.Module] = field(default_factory=snake_case_ )
UpperCamelCase : list = field(default_factory=snake_case_ )
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tensor , UpperCAmelCase__ : Tensor ) -> Any:
_a : int = len(list(m.modules() ) ) == 1 or isinstance(UpperCAmelCase__ , nn.Convad ) or isinstance(UpperCAmelCase__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(UpperCAmelCase__ )
def __call__( self : Tuple , UpperCAmelCase__ : Tensor ) -> Tuple:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(UpperCAmelCase__ )
[x.remove() for x in self.handles]
return self
@property
def _lowercase ( self : Optional[int] ) -> int:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda UpperCAmelCase__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class UpperCamelCase :
UpperCamelCase : nn.Module
UpperCamelCase : nn.Module
UpperCamelCase : int = 0
UpperCamelCase : List = field(default_factory=snake_case_ )
UpperCamelCase : List = field(default_factory=snake_case_ )
def __call__( self : Optional[Any] , UpperCAmelCase__ : Tensor ) -> Tuple:
_a : Union[str, Any] = Tracker(self.dest )(UpperCAmelCase__ ).parametrized
_a : List[Any] = Tracker(self.src )(UpperCAmelCase__ ).parametrized
_a : Tuple = list(filter(lambda UpperCAmelCase__ : type(UpperCAmelCase__ ) not in self.src_skip , UpperCAmelCase__ ) )
_a : Union[str, Any] = list(filter(lambda UpperCAmelCase__ : type(UpperCAmelCase__ ) not in self.dest_skip , UpperCAmelCase__ ) )
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise Exception(
f"""Numbers of operations are different. Source module has {len(UpperCAmelCase__ )} operations while"""
f""" destination module has {len(UpperCAmelCase__ )}.""" )
for dest_m, src_m in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"""Transfered from={src_m} to={dest_m}""" )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = True ):
'''simple docstring'''
print(F"""Converting {name}...""" )
with torch.no_grad():
_a : List[str] = timm.create_model(UpperCamelCase__ , pretrained=UpperCamelCase__ ).eval()
_a : str = ResNetForImageClassification(UpperCamelCase__ ).eval()
_a : List[str] = ModuleTransfer(src=UpperCamelCase__ , dest=UpperCamelCase__ )
_a : List[str] = torch.randn((1, 3, 2_2_4, 2_2_4) )
module_transfer(UpperCamelCase__ )
assert torch.allclose(from_model(UpperCamelCase__ ) , our_model(UpperCamelCase__ ).logits ), "The model logits don't match the original one."
_a : Dict = F"""resnet{'-'.join(name.split('resnet' ) )}"""
print(UpperCamelCase__ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add model""" , use_temp_dir=UpperCamelCase__ , )
# we can use the convnext one
_a : Optional[Any] = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add image processor""" , use_temp_dir=UpperCamelCase__ , )
print(F"""Pushed {checkpoint_name}""" )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = True ):
'''simple docstring'''
_a : Any = """imagenet-1k-id2label.json"""
_a : Optional[int] = 1_0_0_0
_a : Any = (1, num_labels)
_a : Union[str, Any] = """huggingface/label-files"""
_a : Tuple = num_labels
_a : Optional[int] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
_a : Optional[Any] = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
_a : Any = idalabel
_a : Tuple = {v: k for k, v in idalabel.items()}
_a : List[str] = partial(UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ )
_a : Union[str, Any] = {
"""resnet18""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type="""basic""" ),
"""resnet26""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type="""bottleneck""" ),
"""resnet34""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type="""basic""" ),
"""resnet50""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type="""bottleneck""" ),
"""resnet101""": ImageNetPreTrainedConfig(
depths=[3, 4, 2_3, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type="""bottleneck""" ),
"""resnet152""": ImageNetPreTrainedConfig(
depths=[3, 8, 3_6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type="""bottleneck""" ),
}
if model_name:
convert_weight_and_push(UpperCamelCase__ , names_to_config[model_name] , UpperCamelCase__ , UpperCamelCase__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return config, expected_shape
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported resnet* architecture,'
' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
_snake_case = parser.parse_args()
_snake_case = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 324
| 0
|
from numpy import exp, pi, sqrt
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ = 0.0 , UpperCamelCase__ = 1.0 ):
'''simple docstring'''
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 371
|
"""simple docstring"""
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 324
| 0
|
"""simple docstring"""
import logging
import os
from .state import PartialState
class UpperCamelCase ( logging.LoggerAdapter ):
@staticmethod
def _lowercase ( UpperCAmelCase__ : str ) -> Union[str, Any]:
_a : List[str] = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def _lowercase ( self : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : List[str] ) -> Union[str, Any]:
if PartialState._shared_state == {}:
raise RuntimeError(
"""You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.""" )
_a : Dict = kwargs.pop("""main_process_only""" , UpperCAmelCase__ )
_a : str = kwargs.pop("""in_order""" , UpperCAmelCase__ )
if self.isEnabledFor(UpperCAmelCase__ ):
if self._should_log(UpperCAmelCase__ ):
_a , _a : List[Any] = self.process(UpperCAmelCase__ , UpperCAmelCase__ )
self.logger.log(UpperCAmelCase__ , UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__ )
elif in_order:
_a : Dict = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
_a , _a : Optional[int] = self.process(UpperCAmelCase__ , UpperCAmelCase__ )
self.logger.log(UpperCAmelCase__ , UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__ )
state.wait_for_everyone()
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ = None ):
'''simple docstring'''
if log_level is None:
_a : List[Any] = os.environ.get("""ACCELERATE_LOG_LEVEL""" , lowerCamelCase__ )
_a : int = logging.getLogger(lowerCamelCase__ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(lowerCamelCase__ , {} )
| 350
|
"""simple docstring"""
_snake_case = 8.31_44_62 # Unit - J mol-1 K-1
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 324
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_snake_case = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 351
|
"""simple docstring"""
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
_snake_case = logging.getLogger(__name__)
_snake_case = 'pytorch_model.bin'
@dataclasses.dataclass
class UpperCamelCase :
UpperCamelCase : str = dataclasses.field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models.'''} )
UpperCamelCase : Optional[str] = dataclasses.field(
default=snake_case_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co.'''} , )
@dataclasses.dataclass
class UpperCamelCase :
UpperCamelCase : str = dataclasses.field(metadata={'''help''': '''A csv or a json file containing the training data.'''} )
UpperCamelCase : str = dataclasses.field(metadata={'''help''': '''A csv or a json file containing the data to predict on.'''} )
UpperCamelCase : Optional[str] = dataclasses.field(
default=snake_case_ , metadata={'''help''': '''A csv or a json file containing the validation data.'''} )
UpperCamelCase : Optional[str] = dataclasses.field(
default=snake_case_ , metadata={'''help''': '''The name of the task to train on.'''} , )
UpperCamelCase : Optional[List[str]] = dataclasses.field(
default=snake_case_ , metadata={'''help''': '''The list of labels for the task.'''} )
@dataclasses.dataclass
class UpperCamelCase :
UpperCamelCase : str = dataclasses.field(
metadata={'''help''': '''The output directory where the model predictions and checkpoints will be written.'''} )
UpperCamelCase : Optional[str] = dataclasses.field(
default='''accuracy''' , metadata={'''help''': '''The evaluation metric used for the task.'''} )
UpperCamelCase : Optional[str] = dataclasses.field(
default='''no''' , metadata={
'''help''': '''The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]'''
} , )
UpperCamelCase : Optional[int] = dataclasses.field(
default=10 , metadata={'''help''': '''Number of evaluation calls with no improvement after which training will be stopped.'''} , )
UpperCamelCase : Optional[float] = dataclasses.field(
default=0.0 , metadata={
'''help''': '''How much the specified evaluation metric must improve to satisfy early stopping conditions.'''
} , )
UpperCamelCase : Optional[bool] = dataclasses.field(
default=snake_case_ , metadata={'''help''': '''Whether to filter the pseudo-labeled data based on the confidence score.'''} , )
UpperCamelCase : Optional[bool] = dataclasses.field(
default=snake_case_ , metadata={'''help''': '''Whether to filter the pseudo-labeled data based on the validation performance.'''} , )
UpperCamelCase : Optional[bool] = dataclasses.field(
default=snake_case_ , metadata={'''help''': '''Whether to fine-tune on labeled data after pseudo training.'''} , )
UpperCamelCase : Optional[float] = dataclasses.field(
default=0.0 , metadata={'''help''': '''Confidence threshold for pseudo-labeled data filtering.'''} , )
UpperCamelCase : Optional[int] = dataclasses.field(
default=100 , metadata={'''help''': '''Number of evaluation calls with no improvement after which training will be stopped.'''} , )
UpperCamelCase : Optional[int] = dataclasses.field(
default=snake_case_ , metadata={'''help''': '''Random seed for initialization.'''} , )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Optional[int] = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
_a : Union[str, Any] = dataset.filter(lambda UpperCamelCase__ : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
_a : Any = int(eval_result * len(UpperCamelCase__ ) )
print(UpperCamelCase__ )
_a : str = dataset.sort("""probability""" , reverse=UpperCamelCase__ )
_a : Any = dataset.select(range(UpperCamelCase__ ) )
_a : Tuple = dataset.remove_columns(["""label""", """probability"""] )
_a : Optional[Any] = dataset.rename_column("""prediction""" , """label""" )
_a : Dict = dataset.map(lambda UpperCamelCase__ : {"label": idalabel[example["label"]]} )
_a : Union[str, Any] = dataset.shuffle(seed=args.seed )
_a : Optional[int] = os.path.join(UpperCamelCase__ , F"""train_pseudo.{args.data_file_extension}""" )
if args.data_file_extension == "csv":
dataset.to_csv(UpperCamelCase__ , index=UpperCamelCase__ )
else:
dataset.to_json(UpperCamelCase__ )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
_a : Optional[int] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
_a : Dict = STModelArguments(model_name_or_path=UpperCamelCase__ )
_a : Union[str, Any] = STDataArguments(train_file=UpperCamelCase__ , infer_file=UpperCamelCase__ )
_a : Any = STTrainingArguments(output_dir=UpperCamelCase__ )
_a : Any = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(UpperCamelCase__ ).items():
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for key, value in kwargs.items():
if hasattr(UpperCamelCase__ , UpperCamelCase__ ):
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Sanity checks
_a : Union[str, Any] = {}
_a : Tuple = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
_a : int = args.train_file
_a : List[Any] = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
_a : Union[str, Any] = args.eval_file
for key in data_files:
_a : Optional[Any] = data_files[key].split(""".""" )[-1]
assert extension in ["csv", "json"], F"""`{key}_file` should be a csv or a json file."""
if args.data_file_extension is None:
_a : str = extension
else:
assert extension == args.data_file_extension, F"""`{key}_file` should be a {args.data_file_extension} file`."""
assert (
args.eval_metric in datasets.list_metrics()
), F"""{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}."""
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info("""Creating the initial data directory for self-training...""" )
_a : Tuple = F"""{args.output_dir}/self-train_iter-{{}}""".format
_a : Dict = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=UpperCamelCase__ )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
accelerator.wait_for_everyone()
_a : str = None
_a : int = None
_a : str = 0
_a : List[Any] = False
# Show the progress bar
_a : List[Any] = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
_a : Union[str, Any] = data_dir_format(UpperCamelCase__ )
assert os.path.exists(UpperCamelCase__ )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
_a : str = os.path.join(UpperCamelCase__ , """stage-1""" )
_a : Tuple = {
"""accelerator""": accelerator,
"""model_name_or_path""": args.model_name_or_path,
"""cache_dir""": args.cache_dir,
"""do_train""": True,
"""train_file""": data_files["""train"""] if iteration == 0 else data_files["""train_pseudo"""],
"""do_eval""": True if args.eval_file is not None else False,
"""eval_file""": data_files["""eval"""],
"""do_predict""": True,
"""infer_file""": data_files["""infer"""],
"""task_name""": args.task_name,
"""label_list""": args.label_list,
"""output_dir""": current_output_dir,
"""eval_metric""": args.eval_metric,
"""evaluation_strategy""": args.evaluation_strategy,
"""early_stopping_patience""": args.early_stopping_patience,
"""early_stopping_threshold""": args.early_stopping_threshold,
"""seed""": args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(UpperCamelCase__ , UpperCamelCase__ ):
arguments_dict.update({key: value} )
_a : int = os.path.join(UpperCamelCase__ , """best-checkpoint""" , UpperCamelCase__ )
if os.path.exists(UpperCamelCase__ ):
logger.info(
"""Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.""" , UpperCamelCase__ , UpperCamelCase__ , )
else:
logger.info("""***** Running self-training: iteration: %d, stage: 1 *****""" , UpperCamelCase__ )
finetune(**UpperCamelCase__ )
accelerator.wait_for_everyone()
assert os.path.exists(UpperCamelCase__ )
logger.info("""Self-training job completed: iteration: %d, stage: 1.""" , UpperCamelCase__ )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
_a : Dict = os.path.join(UpperCamelCase__ , """best-checkpoint""" )
_a : List[str] = os.path.join(UpperCamelCase__ , """stage-2""" )
# Update arguments_dict
_a : int = model_path
_a : Dict = data_files["""train"""]
_a : int = current_output_dir
_a : Any = os.path.join(UpperCamelCase__ , """best-checkpoint""" , UpperCamelCase__ )
if os.path.exists(UpperCamelCase__ ):
logger.info(
"""Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.""" , UpperCamelCase__ , UpperCamelCase__ , )
else:
logger.info("""***** Running self-training: iteration: %d, stage: 2 *****""" , UpperCamelCase__ )
finetune(**UpperCamelCase__ )
accelerator.wait_for_everyone()
assert os.path.exists(UpperCamelCase__ )
logger.info("""Self-training job completed: iteration: %d, stage: 2.""" , UpperCamelCase__ )
_a : List[Any] = iteration
_a : int = data_dir_format(iteration + 1 )
_a : Dict = AutoConfig.from_pretrained(os.path.join(UpperCamelCase__ , """best-checkpoint""" ) )
_a : Union[str, Any] = config.idalabel
_a : Any = os.path.join(UpperCamelCase__ , """eval_results_best-checkpoint.json""" )
_a : Any = os.path.join(UpperCamelCase__ , """test_results_best-checkpoint.json""" )
assert os.path.exists(UpperCamelCase__ )
with open(UpperCamelCase__ , """r""" ) as f:
_a : Tuple = float(json.load(UpperCamelCase__ )[args.eval_metric] )
_a : Dict = os.path.join(UpperCamelCase__ , """infer_output_best-checkpoint.csv""" )
assert os.path.exists(UpperCamelCase__ )
# Loading the dataset from local csv or json files.
_a : List[Any] = load_dataset(args.data_file_extension , data_files={"""data""": data_files["""infer"""]} )["""data"""]
_a : Any = load_dataset("""csv""" , data_files={"""data""": infer_output_file} )["""data"""]
if accelerator.is_main_process:
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
shutil.copy(UpperCamelCase__ , os.path.join(UpperCamelCase__ , F"""eval_results_iter-{iteration}.json""" ) )
if os.path.exists(UpperCamelCase__ ):
shutil.copy(UpperCamelCase__ , os.path.join(UpperCamelCase__ , F"""test_results_iter-{iteration}.json""" ) )
create_pseudo_labeled_data(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
accelerator.wait_for_everyone()
_a : List[str] = os.path.join(UpperCamelCase__ , F"""train_pseudo.{args.data_file_extension}""" )
if args.evaluation_strategy != IntervalStrategy.NO.value:
_a : Any = eval_result
if best_iteration is None:
_a : Union[str, Any] = new_iteration
_a : str = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
_a : Union[str, Any] = new_iteration
_a : List[str] = new_eval_result
_a : Optional[Any] = 0
else:
if new_eval_result == best_eval_result:
_a : Tuple = new_iteration
_a : List[Any] = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
_a : Union[str, Any] = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info("""Best iteration: %d""" , UpperCamelCase__ )
logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , UpperCamelCase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(UpperCamelCase__ , F"""eval_results_iter-{iteration}.json""" ) , os.path.join(UpperCamelCase__ , """eval_results_best-iteration.json""" ) , )
else:
# Assume that the last iteration is the best
logger.info("""Best iteration: %d""" , args.max_selftrain_iterations - 1 )
logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , UpperCamelCase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(UpperCamelCase__ , F"""eval_results_iter-{args.max_selftrain_iterations - 1}.json""" ) , os.path.join(UpperCamelCase__ , """eval_results_best-iteration.json""" ) , )
| 324
| 0
|
"""simple docstring"""
from jiwer import compute_measures
import datasets
_snake_case = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
_snake_case = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
_snake_case = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
def _lowercase ( self : Optional[int] ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
] , )
def _lowercase ( self : Tuple , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Optional[int]=False ) -> Union[str, Any]:
if concatenate_texts:
return compute_measures(UpperCAmelCase__ , UpperCAmelCase__ )["wer"]
else:
_a : Any = 0
_a : Any = 0
for prediction, reference in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
_a : Optional[int] = compute_measures(UpperCAmelCase__ , UpperCAmelCase__ )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 352
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
_snake_case = None
_snake_case = logging.get_logger(__name__)
_snake_case = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
_snake_case = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/tokenizer.json',
},
}
_snake_case = {
'camembert-base': 512,
}
_snake_case = '▁'
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : Any = VOCAB_FILES_NAMES
UpperCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : Dict = ['''input_ids''', '''attention_mask''']
UpperCamelCase : Optional[Any] = CamembertTokenizer
def __init__( self : int , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Optional[Any]="<s>" , UpperCAmelCase__ : Optional[int]="</s>" , UpperCAmelCase__ : Tuple="</s>" , UpperCAmelCase__ : Tuple="<s>" , UpperCAmelCase__ : Tuple="<unk>" , UpperCAmelCase__ : Tuple="<pad>" , UpperCAmelCase__ : int="<mask>" , UpperCAmelCase__ : Optional[int]=["<s>NOTUSED", "</s>NOTUSED"] , **UpperCAmelCase__ : Optional[Any] , ) -> Union[str, Any]:
# Mask token behave like a normal word, i.e. include the space before it
_a : List[Any] = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else mask_token
super().__init__(
UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , **UpperCAmelCase__ , )
_a : int = vocab_file
_a : int = False if not self.vocab_file else True
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_a : List[Any] = [self.cls_token_id]
_a : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
_a : Union[str, Any] = [self.sep_token_id]
_a : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a : List[str] = os.path.join(
UpperCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ):
copyfile(self.vocab_file , UpperCAmelCase__ )
return (out_vocab_file,)
| 324
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
'configuration_bert': ['BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BertConfig', 'BertOnnxConfig'],
'tokenization_bert': ['BasicTokenizer', 'BertTokenizer', 'WordpieceTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['BertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BertForMaskedLM',
'BertForMultipleChoice',
'BertForNextSentencePrediction',
'BertForPreTraining',
'BertForQuestionAnswering',
'BertForSequenceClassification',
'BertForTokenClassification',
'BertLayer',
'BertLMHeadModel',
'BertModel',
'BertPreTrainedModel',
'load_tf_weights_in_bert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBertEmbeddings',
'TFBertForMaskedLM',
'TFBertForMultipleChoice',
'TFBertForNextSentencePrediction',
'TFBertForPreTraining',
'TFBertForQuestionAnswering',
'TFBertForSequenceClassification',
'TFBertForTokenClassification',
'TFBertLMHeadModel',
'TFBertMainLayer',
'TFBertModel',
'TFBertPreTrainedModel',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['TFBertTokenizer']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'FlaxBertForCausalLM',
'FlaxBertForMaskedLM',
'FlaxBertForMultipleChoice',
'FlaxBertForNextSentencePrediction',
'FlaxBertForPreTraining',
'FlaxBertForQuestionAnswering',
'FlaxBertForSequenceClassification',
'FlaxBertForTokenClassification',
'FlaxBertModel',
'FlaxBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 353
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_snake_case = logging.get_logger(__name__)
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : Dict = ['''pixel_values''']
def __init__( self : Any , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : int = 32 , UpperCAmelCase__ : Optional[Any]=PILImageResampling.BILINEAR , UpperCAmelCase__ : bool = True , **UpperCAmelCase__ : List[str] , ) -> None:
_a : int = do_resize
_a : Union[str, Any] = do_rescale
_a : Any = size_divisor
_a : Any = resample
super().__init__(**UpperCAmelCase__ )
def _lowercase ( self : Tuple , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[ChannelDimension] = None , **UpperCAmelCase__ : Optional[Any] ) -> np.ndarray:
_a , _a : Tuple = get_image_size(UpperCAmelCase__ )
# Rounds the height and width down to the closest multiple of size_divisor
_a : Optional[Any] = height // size_divisor * size_divisor
_a : Union[str, Any] = width // size_divisor * size_divisor
_a : Any = resize(UpperCAmelCase__ , (new_h, new_w) , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
return image
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : float , UpperCAmelCase__ : Optional[ChannelDimension] = None , **UpperCAmelCase__ : Optional[int] ) -> np.ndarray:
return rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[Union[TensorType, str]] = None , UpperCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase__ : int , ) -> BatchFeature:
_a : Dict = do_resize if do_resize is not None else self.do_resize
_a : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
_a : str = size_divisor if size_divisor is not None else self.size_divisor
_a : Any = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("""size_divisor is required for resizing""" )
_a : List[str] = make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError("""Invalid image(s)""" )
# All transformations expect numpy arrays.
_a : Tuple = [to_numpy_array(UpperCAmelCase__ ) for img in images]
if do_resize:
_a : Optional[int] = [self.resize(UpperCAmelCase__ , size_divisor=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images]
if do_rescale:
_a : str = [self.rescale(UpperCAmelCase__ , scale=1 / 255 ) for image in images]
_a : Any = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
_a : Optional[int] = {"""pixel_values""": images}
return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
| 324
| 0
|
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class UpperCamelCase ( lowerCamelCase__ ):
UpperCamelCase : Tuple = None
UpperCamelCase : Any = None
UpperCamelCase : List[Any] = None
UpperCamelCase : List[str] = None
class UpperCamelCase ( lowerCamelCase__ ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Union[str, Any]=1 , UpperCAmelCase__ : Optional[Any]=0 , UpperCAmelCase__ : List[str]=2 , UpperCAmelCase__ : Any=512 , UpperCAmelCase__ : int="cls" , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Union[str, Any]=True , **UpperCAmelCase__ : Optional[int] , ) -> Any:
super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
_a : Tuple = project_dim
_a : Optional[Any] = pooler_fn
_a : Union[str, Any] = learn_encoder
_a : Union[str, Any] = use_attention_mask
class UpperCamelCase ( lowerCamelCase__ ):
UpperCamelCase : List[str] = [R'''pooler''', R'''logit_scale''']
UpperCamelCase : List[str] = [R'''position_ids''', R'''predictions.decoder.bias''']
UpperCamelCase : Union[str, Any] = '''roberta'''
UpperCamelCase : Optional[Any] = RobertaSeriesConfig
def __init__( self : Dict , UpperCAmelCase__ : List[str] ) -> int:
super().__init__(UpperCAmelCase__ )
_a : Any = XLMRobertaModel(UpperCAmelCase__ )
_a : int = nn.Linear(config.hidden_size , config.project_dim )
_a : Union[str, Any] = getattr(UpperCAmelCase__ , """has_pre_transformation""" , UpperCAmelCase__ )
if self.has_pre_transformation:
_a : Any = nn.Linear(config.hidden_size , config.project_dim )
_a : int = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def _lowercase ( self : str , UpperCAmelCase__ : Optional[torch.Tensor] = None , UpperCAmelCase__ : Optional[torch.Tensor] = None , UpperCAmelCase__ : Optional[torch.Tensor] = None , UpperCAmelCase__ : Optional[torch.Tensor] = None , UpperCAmelCase__ : Optional[torch.Tensor] = None , UpperCAmelCase__ : Optional[torch.Tensor] = None , UpperCAmelCase__ : Optional[torch.Tensor] = None , UpperCAmelCase__ : Optional[torch.Tensor] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[bool] = None , ) -> Union[str, Any]:
_a : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
_a : int = self.base_model(
input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , position_ids=UpperCAmelCase__ , head_mask=UpperCAmelCase__ , inputs_embeds=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , output_attentions=UpperCAmelCase__ , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=UpperCAmelCase__ , )
if self.has_pre_transformation:
_a : List[Any] = outputs["hidden_states"][-2]
_a : List[str] = self.pre_LN(UpperCAmelCase__ )
_a : List[str] = self.transformation_pre(UpperCAmelCase__ )
return TransformationModelOutput(
projection_state=UpperCAmelCase__ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
_a : int = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=UpperCAmelCase__ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 354
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCamelCase ( unittest.TestCase ):
@property
def _lowercase ( self : Optional[int] ) -> Union[str, Any]:
torch.manual_seed(0 )
_a : List[str] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def _lowercase ( self : Dict ) -> Dict:
_a : str = self.dummy_uncond_unet
_a : Optional[int] = KarrasVeScheduler()
_a : List[str] = KarrasVePipeline(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_a : int = torch.manual_seed(0 )
_a : List[Any] = pipe(num_inference_steps=2 , generator=UpperCAmelCase__ , output_type="""numpy""" ).images
_a : Tuple = torch.manual_seed(0 )
_a : int = pipe(num_inference_steps=2 , generator=UpperCAmelCase__ , output_type="""numpy""" , return_dict=UpperCAmelCase__ )[0]
_a : int = image[0, -3:, -3:, -1]
_a : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_a : str = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : Tuple ) -> List[str]:
_a : Optional[Any] = """google/ncsnpp-celebahq-256"""
_a : Any = UNetaDModel.from_pretrained(UpperCAmelCase__ )
_a : Dict = KarrasVeScheduler()
_a : int = KarrasVePipeline(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_a : Optional[int] = torch.manual_seed(0 )
_a : Tuple = pipe(num_inference_steps=20 , generator=UpperCAmelCase__ , output_type="""numpy""" ).images
_a : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_a : Optional[int] = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 324
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
"configuration_whisper": ["WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP", "WhisperConfig", "WhisperOnnxConfig"],
"feature_extraction_whisper": ["WhisperFeatureExtractor"],
"processing_whisper": ["WhisperProcessor"],
"tokenization_whisper": ["WhisperTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["WhisperTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST",
"WhisperForConditionalGeneration",
"WhisperModel",
"WhisperPreTrainedModel",
"WhisperForAudioClassification",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFWhisperForConditionalGeneration",
"TFWhisperModel",
"TFWhisperPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"FlaxWhisperForConditionalGeneration",
"FlaxWhisperModel",
"FlaxWhisperPreTrainedModel",
"FlaxWhisperForAudioClassification",
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 355
|
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_snake_case = 16
_snake_case = 32
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ = 1_6 ):
'''simple docstring'''
_a : str = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_a : Dict = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(UpperCamelCase__ ):
# max_length=None => use the model max length (it's actually the default)
_a : Optional[int] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_a : Tuple = datasets.map(
UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_a : List[Any] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(UpperCamelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_a : Union[str, Any] = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_a : int = 1_6
elif accelerator.mixed_precision != "no":
_a : int = 8
else:
_a : str = None
return tokenizer.pad(
UpperCamelCase__ , padding="""longest""" , max_length=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
_a : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )
_a : List[str] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_snake_case = mocked_dataloaders # noqa: F811
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , UpperCamelCase__ ) == "1":
_a : str = 2
# Initialize accelerator
_a : int = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_a : Any = config["""lr"""]
_a : Union[str, Any] = int(config["""num_epochs"""] )
_a : str = int(config["""seed"""] )
_a : List[Any] = int(config["""batch_size"""] )
_a : Tuple = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
_a : Optional[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_a : Optional[Any] = batch_size // MAX_GPU_BATCH_SIZE
_a : str = MAX_GPU_BATCH_SIZE
set_seed(UpperCamelCase__ )
_a , _a : Optional[int] = get_dataloaders(UpperCamelCase__ , UpperCamelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_a : int = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=UpperCamelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_a : List[str] = model.to(accelerator.device )
# Instantiate optimizer
_a : List[str] = AdamW(params=model.parameters() , lr=UpperCamelCase__ )
# Instantiate scheduler
_a : List[str] = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(UpperCamelCase__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_a , _a , _a , _a , _a : Optional[Any] = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Now we train the model
for epoch in range(UpperCamelCase__ ):
model.train()
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_a : Optional[Any] = model(**UpperCamelCase__ )
_a : str = outputs.loss
_a : Optional[int] = loss / gradient_accumulation_steps
accelerator.backward(UpperCamelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
_a : Union[str, Any] = 0
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_a : Dict = model(**UpperCamelCase__ )
_a : Optional[Any] = outputs.logits.argmax(dim=-1 )
_a , _a : int = accelerator.gather((predictions, batch["""labels"""]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(UpperCamelCase__ ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
_a : str = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_a : int = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=UpperCamelCase__ , references=UpperCamelCase__ , )
_a : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , UpperCamelCase__ )
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : Tuple = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=UpperCamelCase__ , default=UpperCamelCase__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
_a : Optional[Any] = parser.parse_args()
_a : Tuple = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6}
training_function(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 324
| 0
|
"""simple docstring"""
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class UpperCamelCase :
def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
torch.manual_seed(0 )
_a : str = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
_a : Union[str, Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
_a : Dict = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
_a : Union[str, Any] = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=lowercase_ , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
_a : Dict = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _lowercase ( self : Tuple ) -> List[str]:
torch.manual_seed(0 )
_a : str = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
_a : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
_a : Optional[Any] = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , class_embed_type="""timestep""" , mid_block_scale_factor=1.4_1_4 , time_embedding_act_fn="""gelu""" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
_a : Optional[int] = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=lowercase_ , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
_a : List[Any] = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , )
torch.manual_seed(0 )
_a : Dict = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _lowercase ( self : Optional[int] ) -> List[Any]:
_a : Optional[int] = self.get_dummy_components()
_a : Optional[int] = self.pipeline_class(**lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
_a : List[Any] = self.get_dummy_inputs(lowercase_ )
_a : Any = inputs["""prompt"""]
_a : Any = inputs["""generator"""]
_a : Union[str, Any] = inputs["""num_inference_steps"""]
_a : List[Any] = inputs["""output_type"""]
if "image" in inputs:
_a : str = inputs["""image"""]
else:
_a : Union[str, Any] = None
if "mask_image" in inputs:
_a : Dict = inputs["""mask_image"""]
else:
_a : int = None
if "original_image" in inputs:
_a : Optional[Any] = inputs["""original_image"""]
else:
_a : Optional[int] = None
_a , _a : Union[str, Any] = pipe.encode_prompt(lowercase_ )
# inputs with prompt converted to embeddings
_a : int = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
_a : str = image
if mask_image is not None:
_a : Any = mask_image
if original_image is not None:
_a : str = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(lowercase_ , lowercase_ , lowercase_ )
_a : Any = pipe(**lowercase_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowercase_ )
_a : str = self.pipeline_class.from_pretrained(lowercase_ )
pipe_loaded.to(lowercase_ )
pipe_loaded.set_progress_bar_config(disable=lowercase_ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(lowercase_ , lowercase_ ) is None , f"""`{optional_component}` did not stay set to None after loading.""" , )
_a : List[str] = self.get_dummy_inputs(lowercase_ )
_a : List[Any] = inputs["""generator"""]
_a : Optional[Any] = inputs["""num_inference_steps"""]
_a : Optional[int] = inputs["""output_type"""]
# inputs with prompt converted to embeddings
_a : List[Any] = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
_a : str = image
if mask_image is not None:
_a : Dict = mask_image
if original_image is not None:
_a : List[Any] = original_image
_a : Optional[int] = pipe_loaded(**lowercase_ )[0]
_a : Optional[Any] = np.abs(to_np(lowercase_ ) - to_np(lowercase_ ) ).max()
self.assertLess(lowercase_ , 1E-4 )
def _lowercase ( self : Dict ) -> str:
_a : Any = self.get_dummy_components()
_a : Union[str, Any] = self.pipeline_class(**lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
_a : Optional[int] = self.get_dummy_inputs(lowercase_ )
_a : Optional[Any] = pipe(**lowercase_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowercase_ )
_a : Optional[int] = self.pipeline_class.from_pretrained(lowercase_ )
pipe_loaded.to(lowercase_ )
pipe_loaded.set_progress_bar_config(disable=lowercase_ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
_a : Union[str, Any] = self.get_dummy_inputs(lowercase_ )
_a : Dict = pipe_loaded(**lowercase_ )[0]
_a : Optional[int] = np.abs(to_np(lowercase_ ) - to_np(lowercase_ ) ).max()
self.assertLess(lowercase_ , 1E-4 )
| 356
|
"""simple docstring"""
import numpy as np
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 324
| 0
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
_snake_case = logging.get_logger(__name__)
@dataclass
class UpperCamelCase ( A__ ):
UpperCamelCase : Dict = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self : Tuple , **UpperCAmelCase__ : Any ) -> int:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
_a : List[str] = deprecated_arg[3:]
_a : str = not kwargs.pop(lowerCamelCase__ )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
_a : List[str] = kwargs.pop("""tpu_name""" , self.tpu_name )
_a : Tuple = kwargs.pop("""device_idx""" , self.device_idx )
_a : int = kwargs.pop("""eager_mode""" , self.eager_mode )
_a : Any = kwargs.pop("""use_xla""" , self.use_xla )
super().__init__(**lowerCamelCase__ )
UpperCamelCase : str = field(
default=A__ , metadata={'''help''': '''Name of TPU'''} , )
UpperCamelCase : int = field(
default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , )
UpperCamelCase : bool = field(default=A__ , metadata={'''help''': '''Benchmark models in eager model.'''} )
UpperCamelCase : bool = field(
default=A__ , metadata={
'''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'''
} , )
@cached_property
def _lowercase ( self : Optional[int] ) -> Optional[Any]:
requires_backends(self , ["""tf"""] )
_a : Any = None
if self.tpu:
try:
if self.tpu_name:
_a : List[str] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
_a : Dict = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
_a : List[Any] = None
return tpu
@cached_property
def _lowercase ( self : List[Any] ) -> Optional[Any]:
requires_backends(self , ["""tf"""] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
_a : Tuple = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , """GPU""" )
_a : Any = tf.distribute.OneDeviceStrategy(device=f"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] , """GPU""" ) # disable GPU
_a : Dict = tf.distribute.OneDeviceStrategy(device=f"""/cpu:{self.device_idx}""" )
return strategy
@property
def _lowercase ( self : Union[str, Any] ) -> Tuple:
requires_backends(self , ["""tf"""] )
return self._setup_tpu is not None
@property
def _lowercase ( self : List[Any] ) -> List[Any]:
requires_backends(self , ["""tf"""] )
return self._setup_strategy
@property
def _lowercase ( self : Union[str, Any] ) -> Any:
requires_backends(self , ["""tf"""] )
return tf.config.list_physical_devices("""GPU""" )
@property
def _lowercase ( self : Dict ) -> Any:
requires_backends(self , ["""tf"""] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def _lowercase ( self : List[str] ) -> Tuple:
return self.n_gpu > 0
| 357
|
"""simple docstring"""
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir('fixtures/test_sentencepiece.model')
_snake_case = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
_snake_case = 'pt' if is_torch_available() else 'tf'
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( snake_case_ , unittest.TestCase ):
UpperCamelCase : str = CamembertTokenizer
UpperCamelCase : List[Any] = CamembertTokenizerFast
UpperCamelCase : Optional[int] = True
UpperCamelCase : Union[str, Any] = True
def _lowercase ( self : List[Any] ) -> Union[str, Any]:
super().setUp()
# We have a SentencePiece fixture for testing
_a : List[Any] = CamembertTokenizer(UpperCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self : List[str] ) -> Tuple:
_a : Optional[Any] = """<pad>"""
_a : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ )
def _lowercase ( self : Union[str, Any] ) -> str:
_a : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(UpperCAmelCase__ ) , 1004 )
def _lowercase ( self : List[str] ) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1005 )
def _lowercase ( self : Union[str, Any] ) -> str:
_a : Tuple = CamembertTokenizer(UpperCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
_a : List[Any] = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
_a : Any = """I was born in 92000, and this is falsé."""
_a : Union[str, Any] = tokenizer.encode(UpperCAmelCase__ )
_a : Dict = rust_tokenizer.encode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Tuple = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
_a : List[Any] = rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
_a : List[str] = tokenizer.convert_ids_to_tokens(UpperCAmelCase__ )
_a : int = rust_tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def _lowercase ( self : Dict ) -> List[str]:
if not self.test_rust_tokenizer:
return
_a : Optional[int] = self.get_tokenizer()
_a : Tuple = self.get_rust_tokenizer()
_a : List[Any] = """I was born in 92000, and this is falsé."""
_a : List[str] = tokenizer.tokenize(UpperCAmelCase__ )
_a : Union[str, Any] = rust_tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
_a : int = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
_a : Optional[int] = rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
_a : int = self.get_rust_tokenizer()
_a : Optional[Any] = tokenizer.encode(UpperCAmelCase__ )
_a : Dict = rust_tokenizer.encode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def _lowercase ( self : Tuple ) -> List[Any]:
# fmt: off
_a : Dict = {"""input_ids""": [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 27575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 22804, 18818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 10326, 24, 2267, 20, 416, 5072, 15612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
_a : Union[str, Any] = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=UpperCAmelCase__ , )
| 324
| 0
|
"""simple docstring"""
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
_snake_case = (
'''https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'''
)
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : List[str] = '''https://pypi.org/pypi/diffusers/json'''
_a : Any = json.loads(request.urlopen(A_ ).read() )['''releases'''].keys()
return sorted(A_ , key=lambda UpperCamelCase__ : version.Version(A_ ) )
def lowerCAmelCase__ ( ):
'''simple docstring'''
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(A_ )
os.makedirs(A_ , exist_ok=A_ )
_a : List[Any] = Path(A_ ) / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
init_hf_modules()
_a : List[str] = Path(A_ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(A_ , exist_ok=A_ )
_a : Union[str, Any] = dynamic_module_path / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
with open(A_ , """r""" , encoding="""utf-8""" ) as f:
_a : List[str] = f.read()
# Imports of the form `import .xxx`
_a : Any = re.findall("""^\s*import\s+\.(\S+)\s*$""" , A_ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("""^\s*from\s+\.(\S+)\s+import""" , A_ , flags=re.MULTILINE )
# Unique-ify
return list(set(A_ ) )
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : Optional[Any] = False
_a : Optional[Any] = [module_file]
_a : int = []
# Let's recurse through all relative imports
while not no_change:
_a : Optional[Any] = []
for f in files_to_check:
new_imports.extend(get_relative_imports(A_ ) )
_a : Optional[int] = Path(A_ ).parent
_a : List[Any] = [str(module_path / m ) for m in new_imports]
_a : Tuple = [f for f in new_import_files if f not in all_relative_imports]
_a : Dict = [F"""{f}.py""" for f in new_import_files]
_a : Union[str, Any] = len(A_ ) == 0
all_relative_imports.extend(A_ )
return all_relative_imports
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
with open(A_ , """r""" , encoding="""utf-8""" ) as f:
_a : Optional[Any] = f.read()
# Imports of the form `import xxx`
_a : Tuple = re.findall("""^\s*import\s+(\S+)\s*$""" , A_ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("""^\s*from\s+(\S+)\s+import""" , A_ , flags=re.MULTILINE )
# Only keep the top-level module
_a : str = [imp.split(""".""" )[0] for imp in imports if not imp.startswith(""".""" )]
# Unique-ify and test we got them all
_a : Union[str, Any] = list(set(A_ ) )
_a : List[Any] = []
for imp in imports:
try:
importlib.import_module(A_ )
except ImportError:
missing_packages.append(A_ )
if len(A_ ) > 0:
raise ImportError(
"""This modeling file requires the following packages that were not found in your environment: """
F"""{', '.join(A_ )}. Run `pip install {' '.join(A_ )}`""" )
return get_relative_imports(A_ )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : List[str] = module_path.replace(os.path.sep , """.""" )
_a : Any = importlib.import_module(A_ )
if class_name is None:
return find_pipeline_class(A_ )
return getattr(A_ , A_ )
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
from ..pipelines import DiffusionPipeline
_a : List[Any] = dict(inspect.getmembers(A_ , inspect.isclass ) )
_a : Union[str, Any] = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , A_ )
and cls.__module__.split(""".""" )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"""
F""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"""
F""" {loaded_module}.""" )
_a : Tuple = cls
return pipeline_class
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = False , ):
'''simple docstring'''
_a : Tuple = str(A_ )
_a : str = os.path.join(A_ , A_ )
if os.path.isfile(A_ ):
_a : Optional[Any] = module_file_or_url
_a : Dict = '''local'''
elif pretrained_model_name_or_path.count("""/""" ) == 0:
_a : Union[str, Any] = get_diffusers_versions()
# cut ".dev0"
_a : Any = '''v''' + '''.'''.join(__version__.split(""".""" )[:3] )
# retrieve github version that matches
if revision is None:
_a : str = latest_version if latest_version[1:] in available_versions else '''main'''
logger.info(F"""Defaulting to latest_version: {revision}.""" )
elif revision in available_versions:
_a : Optional[Any] = F"""v{revision}"""
elif revision == "main":
_a : Tuple = revision
else:
raise ValueError(
F"""`custom_revision`: {revision} does not exist. Please make sure to choose one of"""
F""" {', '.join(available_versions + ['main'] )}.""" )
# community pipeline on GitHub
_a : Tuple = COMMUNITY_PIPELINES_URL.format(revision=A_ , pipeline=A_ )
try:
_a : str = cached_download(
A_ , cache_dir=A_ , force_download=A_ , proxies=A_ , resume_download=A_ , local_files_only=A_ , use_auth_token=A_ , )
_a : Dict = '''git'''
_a : Union[str, Any] = pretrained_model_name_or_path + '''.py'''
except EnvironmentError:
logger.error(F"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
else:
try:
# Load from URL or cache if already cached
_a : Dict = hf_hub_download(
A_ , A_ , cache_dir=A_ , force_download=A_ , proxies=A_ , resume_download=A_ , local_files_only=A_ , use_auth_token=A_ , )
_a : str = os.path.join("""local""" , """--""".join(pretrained_model_name_or_path.split("""/""" ) ) )
except EnvironmentError:
logger.error(F"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
# Check we have all the requirements in our environment
_a : Optional[int] = check_imports(A_ )
# Now we move the module inside our cached dynamic modules.
_a : int = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(A_ )
_a : Tuple = Path(A_ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(A_ , submodule_path / module_file )
for module_needed in modules_needed:
_a : str = F"""{module_needed}.py"""
shutil.copy(os.path.join(A_ , A_ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(A_ , A_ ):
_a : List[str] = use_auth_token
elif use_auth_token is True:
_a : Optional[int] = HfFolder.get_token()
else:
_a : List[Any] = None
_a : List[Any] = model_info(A_ , revision=A_ , token=A_ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
_a : List[str] = submodule_path / commit_hash
_a : Optional[Any] = full_submodule + os.path.sep + commit_hash
create_dynamic_module(A_ )
if not (submodule_path / module_file).exists():
shutil.copy(A_ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
A_ , F"""{module_needed}.py""" , cache_dir=A_ , force_download=A_ , resume_download=A_ , proxies=A_ , use_auth_token=A_ , revision=A_ , local_files_only=A_ , )
return os.path.join(A_ , A_ )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = False , **UpperCamelCase__ , ):
'''simple docstring'''
_a : Tuple = get_cached_module_file(
A_ , A_ , cache_dir=A_ , force_download=A_ , resume_download=A_ , proxies=A_ , use_auth_token=A_ , revision=A_ , local_files_only=A_ , )
return get_class_in_module(A_ , final_module.replace(""".py""" , """""" ) )
| 358
|
"""simple docstring"""
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
_snake_case = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
_snake_case = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
_snake_case = re.compile(r'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
_snake_case = re.compile(r'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_snake_case = re.compile(r'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
_snake_case = [
('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'),
('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'),
('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'),
('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'),
('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'),
('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'),
('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'),
('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'),
('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'),
(
'zero-shot-object-detection',
'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES',
'AutoModelForZeroShotObjectDetection',
),
('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'),
('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'),
('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'),
('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'),
(
'table-question-answering',
'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForTableQuestionAnswering',
),
('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'),
('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'),
(
'next-sentence-prediction',
'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES',
'AutoModelForNextSentencePrediction',
),
(
'audio-frame-classification',
'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForAudioFrameClassification',
),
('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'),
(
'document-question-answering',
'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForDocumentQuestionAnswering',
),
(
'visual-question-answering',
'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForVisualQuestionAnswering',
),
('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'),
(
'zero-shot-image-classification',
'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForZeroShotImageClassification',
),
('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'),
('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'),
('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'),
]
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : Dict = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , UpperCamelCase__ )
return [m.group(0 ) for m in matches]
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : Tuple = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_a : Optional[int] = {
config.replace("""Config""" , """""" ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
_a : List[Any] = collections.defaultdict(UpperCamelCase__ )
_a : List[str] = collections.defaultdict(UpperCamelCase__ )
_a : Tuple = collections.defaultdict(UpperCamelCase__ )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(UpperCamelCase__ ):
_a : str = None
if _re_tf_models.match(UpperCamelCase__ ) is not None:
_a : List[Any] = tf_models
_a : int = _re_tf_models.match(UpperCamelCase__ ).groups()[0]
elif _re_flax_models.match(UpperCamelCase__ ) is not None:
_a : Any = flax_models
_a : Any = _re_flax_models.match(UpperCamelCase__ ).groups()[0]
elif _re_pt_models.match(UpperCamelCase__ ) is not None:
_a : int = pt_models
_a : int = _re_pt_models.match(UpperCamelCase__ ).groups()[0]
if lookup_dict is not None:
while len(UpperCamelCase__ ) > 0:
if attr_name in model_prefix_to_model_type:
_a : Optional[int] = True
break
# Try again after removing the last word in the name
_a : List[Any] = """""".join(camel_case_split(UpperCamelCase__ )[:-1] )
_a : Optional[int] = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
_a : Dict = list(UpperCamelCase__ )
all_models.sort()
_a : str = {"""model_type""": all_models}
_a : List[Any] = [pt_models[t] for t in all_models]
_a : str = [tf_models[t] for t in all_models]
_a : Optional[int] = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
_a : str = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
_a : List[str] = """AutoProcessor"""
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
_a : str = """AutoTokenizer"""
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
_a : int = """AutoFeatureExtractor"""
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
_a : int = """AutoTokenizer"""
_a : Any = [processors[t] for t in all_models]
return pd.DataFrame(UpperCamelCase__ )
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : List[Any] = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
_a : List[Any] = [model_mapping, F"""TF_{model_mapping}""", F"""FLAX_{model_mapping}"""]
_a : Union[str, Any] = [auto_class, F"""TF_{auto_class}""", F"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
# The type of pipeline may not exist in this framework
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ):
continue
# First extract all model_names
_a : str = []
for name in getattr(UpperCamelCase__ , UpperCamelCase__ ).values():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
model_names.append(UpperCamelCase__ )
else:
model_names.extend(list(UpperCamelCase__ ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Dict = get_frameworks_table()
_a : Optional[Any] = Dataset.from_pandas(UpperCamelCase__ )
_a : Any = hf_hub_download(
"""huggingface/transformers-metadata""" , """pipeline_tags.json""" , repo_type="""dataset""" , token=UpperCamelCase__ )
_a : List[Any] = Dataset.from_json(UpperCamelCase__ )
_a : List[str] = {
tags_dataset[i]["""model_class"""]: (tags_dataset[i]["""pipeline_tag"""], tags_dataset[i]["""auto_class"""])
for i in range(len(UpperCamelCase__ ) )
}
_a : str = update_pipeline_and_auto_class_table(UpperCamelCase__ )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
_a : int = sorted(table.keys() )
_a : Union[str, Any] = pd.DataFrame(
{
"""model_class""": model_classes,
"""pipeline_tag""": [table[m][0] for m in model_classes],
"""auto_class""": [table[m][1] for m in model_classes],
} )
_a : Dict = Dataset.from_pandas(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(UpperCamelCase__ , """frameworks.json""" ) )
tags_dataset.to_json(os.path.join(UpperCamelCase__ , """pipeline_tags.json""" ) )
if commit_sha is not None:
_a : List[str] = (
F"""Update with commit {commit_sha}\n\nSee: """
F"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
_a : Optional[Any] = """Update"""
upload_folder(
repo_id="""huggingface/transformers-metadata""" , folder_path=UpperCamelCase__ , repo_type="""dataset""" , token=UpperCamelCase__ , commit_message=UpperCamelCase__ , )
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : List[str] = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
_a : Any = transformers_module.pipelines.SUPPORTED_TASKS
_a : List[str] = []
for key in pipeline_tasks:
if key not in in_table:
_a : Tuple = pipeline_tasks[key]["""pt"""]
if isinstance(UpperCamelCase__ , (list, tuple) ):
_a : Dict = model[0]
_a : List[str] = model.__name__
if model not in in_table.values():
missing.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
_a : Union[str, Any] = """, """.join(UpperCamelCase__ )
raise ValueError(
"""The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside """
F"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.')
parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.')
parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.')
_snake_case = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 324
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCamelCase ( snake_case_ , unittest.TestCase ):
UpperCamelCase : Optional[int] = UnCLIPImageVariationPipeline
UpperCamelCase : List[str] = IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''guidance_scale'''}
UpperCamelCase : List[Any] = IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase : Optional[int] = [
'''generator''',
'''return_dict''',
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
UpperCamelCase : Union[str, Any] = False
@property
def _lowercase ( self : Optional[Any] ) -> Optional[Any]:
return 32
@property
def _lowercase ( self : List[str] ) -> Dict:
return 32
@property
def _lowercase ( self : List[str] ) -> List[str]:
return self.time_input_dim
@property
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
return self.time_input_dim * 4
@property
def _lowercase ( self : List[Any] ) -> Any:
return 100
@property
def _lowercase ( self : List[str] ) -> Union[str, Any]:
_a : Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def _lowercase ( self : Optional[Any] ) -> Optional[Any]:
torch.manual_seed(0 )
_a : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__A )
@property
def _lowercase ( self : List[str] ) -> int:
torch.manual_seed(0 )
_a : Optional[Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(__A )
@property
def _lowercase ( self : str ) -> List[str]:
torch.manual_seed(0 )
_a : Optional[Any] = {
"""clip_embeddings_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""cross_attention_dim""": self.cross_attention_dim,
}
_a : List[str] = UnCLIPTextProjModel(**__A )
return model
@property
def _lowercase ( self : Tuple ) -> str:
torch.manual_seed(0 )
_a : int = {
"""sample_size""": 32,
# RGB in channels
"""in_channels""": 3,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 6,
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": """identity""",
}
_a : Any = UNetaDConditionModel(**__A )
return model
@property
def _lowercase ( self : Tuple ) -> Any:
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def _lowercase ( self : List[Any] ) -> Any:
torch.manual_seed(0 )
_a : int = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def _lowercase ( self : List[Any] ) -> Dict:
# seeded differently to get different unet than `self.dummy_super_res_first`
torch.manual_seed(1 )
_a : List[Any] = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def _lowercase ( self : List[str] ) -> str:
_a : List[Any] = self.dummy_decoder
_a : int = self.dummy_text_proj
_a : int = self.dummy_text_encoder
_a : Dict = self.dummy_tokenizer
_a : Optional[int] = self.dummy_super_res_first
_a : Any = self.dummy_super_res_last
_a : Tuple = UnCLIPScheduler(
variance_type="""learned_range""" , prediction_type="""epsilon""" , num_train_timesteps=1000 , )
_a : Union[str, Any] = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""epsilon""" , num_train_timesteps=1000 , )
_a : Tuple = CLIPImageProcessor(crop_size=32 , size=32 )
_a : Optional[int] = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def _lowercase ( self : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Any=0 , UpperCAmelCase__ : Union[str, Any]=True ) -> Optional[Any]:
_a : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(__A ) ).to(__A )
if str(__A ).startswith("""mps""" ):
_a : List[str] = torch.manual_seed(__A )
else:
_a : Tuple = torch.Generator(device=__A ).manual_seed(__A )
if pil_image:
_a : List[Any] = input_image * 0.5 + 0.5
_a : Dict = input_image.clamp(0 , 1 )
_a : Optional[int] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_a : Optional[Any] = DiffusionPipeline.numpy_to_pil(__A )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def _lowercase ( self : List[str] ) -> Union[str, Any]:
_a : List[Any] = """cpu"""
_a : Optional[Any] = self.get_dummy_components()
_a : Union[str, Any] = self.pipeline_class(**__A )
_a : int = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_a : Any = self.get_dummy_inputs(__A , pil_image=__A )
_a : str = pipe(**__A )
_a : Union[str, Any] = output.images
_a : List[str] = self.get_dummy_inputs(__A , pil_image=__A )
_a : Union[str, Any] = pipe(
**__A , return_dict=__A , )[0]
_a : Tuple = image[0, -3:, -3:, -1]
_a : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a : Optional[Any] = np.array(
[
0.9_9_9_7,
0.0_0_0_2,
0.9_9_9_7,
0.9_9_9_7,
0.9_9_6_9,
0.0_0_2_3,
0.9_9_9_7,
0.9_9_6_9,
0.9_9_7_0,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowercase ( self : Optional[int] ) -> Tuple:
_a : Optional[int] = """cpu"""
_a : str = self.get_dummy_components()
_a : Any = self.pipeline_class(**__A )
_a : Dict = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_a : str = self.get_dummy_inputs(__A , pil_image=__A )
_a : Union[str, Any] = pipe(**__A )
_a : str = output.images
_a : Tuple = self.get_dummy_inputs(__A , pil_image=__A )
_a : List[Any] = pipe(
**__A , return_dict=__A , )[0]
_a : str = image[0, -3:, -3:, -1]
_a : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a : int = np.array([0.9_9_9_7, 0.0_0_0_3, 0.9_9_9_7, 0.9_9_9_7, 0.9_9_7_0, 0.0_0_2_4, 0.9_9_9_7, 0.9_9_7_1, 0.9_9_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowercase ( self : Any ) -> Dict:
_a : str = """cpu"""
_a : Dict = self.get_dummy_components()
_a : List[Any] = self.pipeline_class(**__A )
_a : List[str] = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_a : int = self.get_dummy_inputs(__A , pil_image=__A )
_a : Optional[Any] = [
pipeline_inputs["""image"""],
pipeline_inputs["""image"""],
]
_a : Dict = pipe(**__A )
_a : Optional[int] = output.images
_a : Tuple = self.get_dummy_inputs(__A , pil_image=__A )
_a : List[str] = [
tuple_pipeline_inputs["""image"""],
tuple_pipeline_inputs["""image"""],
]
_a : List[Any] = pipe(
**__A , return_dict=__A , )[0]
_a : Dict = image[0, -3:, -3:, -1]
_a : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
_a : Any = np.array(
[
0.9_9_9_7,
0.9_9_8_9,
0.0_0_0_8,
0.0_0_2_1,
0.9_9_6_0,
0.0_0_1_8,
0.0_0_1_4,
0.0_0_0_2,
0.9_9_3_3,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowercase ( self : List[str] ) -> Tuple:
_a : List[Any] = torch.device("""cpu""" )
class UpperCamelCase :
UpperCamelCase : Tuple = 1
_a : str = self.get_dummy_components()
_a : Dict = self.pipeline_class(**__A )
_a : str = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_a : Optional[int] = torch.Generator(device=__A ).manual_seed(0 )
_a : int = pipe.decoder.dtype
_a : Dict = 1
_a : Optional[int] = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
_a : int = pipe.prepare_latents(
__A , dtype=__A , device=__A , generator=__A , latents=__A , scheduler=DummyScheduler() )
_a : Any = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
_a : Tuple = pipe.prepare_latents(
__A , dtype=__A , device=__A , generator=__A , latents=__A , scheduler=DummyScheduler() )
_a : List[str] = self.get_dummy_inputs(__A , pil_image=__A )
_a : Union[str, Any] = pipe(
**__A , decoder_latents=__A , super_res_latents=__A ).images
_a : Union[str, Any] = self.get_dummy_inputs(__A , pil_image=__A )
# Don't pass image, instead pass embedding
_a : Any = pipeline_inputs.pop("""image""" )
_a : Any = pipe.image_encoder(__A ).image_embeds
_a : str = pipe(
**__A , decoder_latents=__A , super_res_latents=__A , image_embeddings=__A , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def _lowercase ( self : Dict ) -> int:
_a : Optional[Any] = torch_device == """cpu"""
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
_a : Any = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=__A , expected_max_diff=__A )
@skip_mps
def _lowercase ( self : Any ) -> str:
_a : str = torch_device == """cpu"""
_a : Union[str, Any] = True
_a : Any = [
"""decoder_num_inference_steps""",
"""super_res_num_inference_steps""",
]
self._test_inference_batch_single_identical(
test_max_difference=__A , relax_max_difference=__A , additional_params_copy_to_batched_inputs=__A , )
def _lowercase ( self : Dict ) -> Dict:
_a : int = [
"""decoder_num_inference_steps""",
"""super_res_num_inference_steps""",
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
_a : Dict = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=__A , additional_params_copy_to_batched_inputs=__A , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=__A )
@skip_mps
def _lowercase ( self : Optional[int] ) -> Optional[Any]:
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _lowercase ( self : Any ) -> Any:
return super().test_save_load_local()
@skip_mps
def _lowercase ( self : Tuple ) -> Union[str, Any]:
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : int ) -> List[str]:
_a : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png""" )
_a : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/unclip/karlo_v1_alpha_cat_variation_fp16.npy""" )
_a : List[Any] = UnCLIPImageVariationPipeline.from_pretrained(
"""kakaobrain/karlo-v1-alpha-image-variations""" , torch_dtype=torch.floataa )
_a : Tuple = pipeline.to(__A )
pipeline.set_progress_bar_config(disable=__A )
_a : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
_a : Optional[Any] = pipeline(
__A , generator=__A , output_type="""np""" , )
_a : Optional[int] = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(__A , __A , 15 )
| 359
|
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"""files""" , [
["""full:README.md""", """dataset_infos.json"""],
["""empty:README.md""", """dataset_infos.json"""],
["""dataset_infos.json"""],
["""full:README.md"""],
] , )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Dict = tmp_path_factory.mktemp("""dset_infos_dir""" )
if "full:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""---\ndataset_info:\n dataset_size: 42\n---""" )
if "empty:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""""" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / """dataset_infos.json""" , """w""" ) as f:
f.write("""{\"default\": {\"dataset_size\": 42}}""" )
_a : Dict = DatasetInfosDict.from_directory(UpperCamelCase__ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 4_2
@pytest.mark.parametrize(
"""dataset_info""" , [
DatasetInfo(),
DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=4_2 , ),
] , )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Optional[int] = str(UpperCamelCase__ )
dataset_info.write_to_directory(UpperCamelCase__ )
_a : Any = DatasetInfo.from_directory(UpperCamelCase__ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(UpperCamelCase__ , """dataset_info.json""" ) )
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : Dict = DatasetInfo(
description="""foo""" , citation="""bar""" , homepage="""https://foo.bar""" , license="""CC0""" , features=Features({"""a""": Value("""int32""" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train""", """num_examples""": 4_2}] , download_checksums={} , download_size=1_3_3_7 , post_processing_size=4_4_2 , dataset_size=1_2_3_4 , size_in_bytes=1_3_3_7 + 4_4_2 + 1_2_3_4 , )
_a : int = dataset_info._to_yaml_dict()
assert sorted(UpperCamelCase__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
_a : List[str] = yaml.safe_dump(UpperCamelCase__ )
_a : Optional[int] = yaml.safe_load(UpperCamelCase__ )
assert dataset_info_yaml_dict == reloaded
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : List[Any] = DatasetInfo()
_a : Any = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"""dataset_infos_dict""" , [
DatasetInfosDict(),
DatasetInfosDict({"""default""": DatasetInfo()} ),
DatasetInfosDict({"""my_config_name""": DatasetInfo()} ),
DatasetInfosDict(
{
"""default""": DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=4_2 , )
} ),
DatasetInfosDict(
{
"""v1""": DatasetInfo(dataset_size=4_2 ),
"""v2""": DatasetInfo(dataset_size=1_3_3_7 ),
} ),
] , )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : List[Any] = str(UpperCamelCase__ )
dataset_infos_dict.write_to_directory(UpperCamelCase__ )
_a : List[Any] = DatasetInfosDict.from_directory(UpperCamelCase__ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_a : str = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_a : Dict = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(UpperCamelCase__ , """README.md""" ) )
| 324
| 0
|
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : Any = {}
_a : str = tokenizer(example["""content"""] , truncation=UpperCamelCase__ )["""input_ids"""]
_a : List[str] = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
_snake_case = HfArgumentParser(PretokenizationArguments)
_snake_case = parser.parse_args()
if args.num_workers is None:
_snake_case = multiprocessing.cpu_count()
_snake_case = AutoTokenizer.from_pretrained(args.tokenizer_dir)
_snake_case = time.time()
_snake_case = load_dataset(args.dataset_name, split='train')
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
_snake_case = time.time()
_snake_case = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'repo_name',
'path',
'copies',
'size',
'content',
'license',
'hash',
'line_mean',
'line_max',
'alpha_frac',
'autogenerated',
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
_snake_case = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 360
|
"""simple docstring"""
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class UpperCamelCase ( unittest.TestCase , snake_case_ ):
def _lowercase ( self : int ) -> int:
_a : Optional[Any] = load_tool("""text-to-speech""" )
self.tool.setup()
def _lowercase ( self : List[str] ) -> Union[str, Any]:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
_a : str = self.tool("""hey""" )
_a : List[str] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
def _lowercase ( self : Optional[int] ) -> Optional[Any]:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
_a : int = self.tool("""hey""" )
_a : str = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
| 324
| 0
|
"""simple docstring"""
from random import randint, random
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = 5 , ):
'''simple docstring'''
_a : Optional[Any] = [[-1] * number_of_cells] # Create a highway without any car
_a : str = 0
_a : Tuple = max(_a , 0 )
while i < number_of_cells:
_a : Optional[Any] = (
randint(0 , _a ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : str = 0
_a : Union[str, Any] = highway_now[car_index + 1 :]
for cell in range(len(_a ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(_a , -1 )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : List[str] = len(_a )
# Beforce calculations, the highway is empty
_a : str = [-1] * number_of_cells
for car_index in range(_a ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
_a : int = min(highway_now[car_index] + 1 , _a )
# Number of empty cell before the next car
_a : str = get_distance(_a , _a ) - 1
# We can't have the car causing an accident
_a : Optional[int] = min(next_highway[car_index] , _a )
if random() < probability:
# Randomly, a driver will slow down
_a : str = max(next_highway[car_index] - 1 , 0 )
return next_highway
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Union[str, Any] = len(highway[0] )
for i in range(_a ):
_a : List[str] = update(highway[i] , _a , _a )
_a : Optional[Any] = [-1] * number_of_cells
for car_index in range(_a ):
_a : List[Any] = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
_a : Optional[int] = (car_index + speed) % number_of_cells
# Commit the change of position
_a : List[str] = speed
highway.append(_a )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361
|
"""simple docstring"""
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class UpperCamelCase ( snake_case_ ):
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[str]=None , **UpperCAmelCase__ : str ) -> int:
_a : str = parent
_a : Union[str, Any] = config_class
_a : List[Any] = has_text_modality
_a : List[Any] = kwargs
_a : List[Any] = common_properties
def _lowercase ( self : int ) -> Tuple:
_a : List[str] = self.config_class(**self.inputs_dict )
_a : Dict = (
["""hidden_size""", """num_attention_heads""", """num_hidden_layers"""]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["""vocab_size"""] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(UpperCAmelCase__ , UpperCAmelCase__ ) , msg=f"""`{prop}` does not exist""" )
# Test that config has the common properties as setter
for idx, name in enumerate(UpperCAmelCase__ ):
try:
setattr(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
self.parent.assertEqual(
getattr(UpperCAmelCase__ , UpperCAmelCase__ ) , UpperCAmelCase__ , msg=f"""`{name} value {idx} expected, but was {getattr(UpperCAmelCase__ , UpperCAmelCase__ )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(UpperCAmelCase__ ):
try:
_a : Optional[int] = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(UpperCAmelCase__ , UpperCAmelCase__ ) , UpperCAmelCase__ , msg=f"""`{name} value {idx} expected, but was {getattr(UpperCAmelCase__ , UpperCAmelCase__ )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def _lowercase ( self : Optional[int] ) -> Optional[Any]:
_a : Optional[Any] = self.config_class(**self.inputs_dict )
_a : List[str] = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , UpperCAmelCase__ )
def _lowercase ( self : int ) -> List[str]:
_a : Optional[Any] = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Tuple = os.path.join(UpperCAmelCase__ , """config.json""" )
config_first.to_json_file(UpperCAmelCase__ )
_a : List[str] = self.config_class.from_json_file(UpperCAmelCase__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _lowercase ( self : Union[str, Any] ) -> Dict:
_a : Dict = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(UpperCAmelCase__ )
_a : Dict = self.config_class.from_pretrained(UpperCAmelCase__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _lowercase ( self : Dict ) -> Tuple:
_a : List[Any] = self.config_class(**self.inputs_dict )
_a : Any = """test"""
with tempfile.TemporaryDirectory() as tmpdirname:
_a : List[Any] = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
config_first.save_pretrained(UpperCAmelCase__ )
_a : List[Any] = self.config_class.from_pretrained(UpperCAmelCase__ , subfolder=UpperCAmelCase__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _lowercase ( self : List[str] ) -> Union[str, Any]:
_a : Tuple = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
_a : Union[str, Any] = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def _lowercase ( self : Tuple ) -> List[str]:
if self.config_class.is_composition:
return
_a : str = self.config_class()
self.parent.assertIsNotNone(UpperCAmelCase__ )
def _lowercase ( self : List[Any] ) -> Optional[Any]:
_a : Dict = copy.deepcopy(UpperCAmelCase__ )
_a : Any = self.config_class(**UpperCAmelCase__ )
_a : str = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("""torch_dtype""", config.torch_dtype, torch.floataa) )
elif getattr(UpperCAmelCase__ , UpperCAmelCase__ ) != value:
wrong_values.append((key, getattr(UpperCAmelCase__ , UpperCAmelCase__ ), value) )
if len(UpperCAmelCase__ ) > 0:
_a : List[Any] = """\n""".join([f"""- {v[0]}: got {v[1]} instead of {v[2]}""" for v in wrong_values] )
raise ValueError(f"""The following keys were not properly set in the config:\n{errors}""" )
def _lowercase ( self : int ) -> Union[str, Any]:
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 324
| 0
|
"""simple docstring"""
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
if len(__lowerCAmelCase ) < 2:
return collection
def circle_sort_util(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> bool:
_a : Tuple = False
if low == high:
return swapped
_a : Any = low
_a : Any = high
while left < right:
if collection[left] > collection[right]:
_a : Union[str, Any] = (
collection[right],
collection[left],
)
_a : str = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
_a : Dict = (
collection[right + 1],
collection[left],
)
_a : Union[str, Any] = True
_a : List[str] = low + int((high - low) / 2 )
_a : int = circle_sort_util(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_a : Union[str, Any] = circle_sort_util(__lowerCAmelCase , mid + 1 , __lowerCAmelCase )
return swapped or left_swap or right_swap
_a : Optional[int] = True
while is_not_sorted is True:
_a : Optional[Any] = circle_sort_util(__lowerCAmelCase , 0 , len(__lowerCAmelCase ) - 1 )
return collection
if __name__ == "__main__":
_snake_case = input('Enter numbers separated by a comma:\n').strip()
_snake_case = [int(item) for item in user_input.split(',')]
print(circle_sort(unsorted))
| 362
|
"""simple docstring"""
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
_snake_case = HUGGINGFACE_HUB_CACHE
_snake_case = 'config.json'
_snake_case = 'diffusion_pytorch_model.bin'
_snake_case = 'diffusion_flax_model.msgpack'
_snake_case = 'model.onnx'
_snake_case = 'diffusion_pytorch_model.safetensors'
_snake_case = 'weights.pb'
_snake_case = 'https://huggingface.co'
_snake_case = default_cache_path
_snake_case = 'diffusers_modules'
_snake_case = os.getenv('HF_MODULES_CACHE', os.path.join(hf_cache_home, 'modules'))
_snake_case = ['fp16', 'non-ema']
_snake_case = '.self_attn'
| 324
| 0
|
"""simple docstring"""
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : Optional[Any] = ArgumentParser("""Accelerate CLI tool""" , usage="""accelerate <command> [<args>]""" , allow_abbrev=_lowercase )
_a : Optional[Any] = parser.add_subparsers(help="""accelerate command helpers""" )
# Register commands
get_config_parser(subparsers=_lowercase )
env_command_parser(subparsers=_lowercase )
launch_command_parser(subparsers=_lowercase )
tpu_command_parser(subparsers=_lowercase )
test_command_parser(subparsers=_lowercase )
# Let's go
_a : int = parser.parse_args()
if not hasattr(_lowercase , """func""" ):
parser.print_help()
exit(1 )
# Run
args.func(_lowercase )
if __name__ == "__main__":
main()
| 363
|
"""simple docstring"""
from math import factorial
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if successes > trials:
raise ValueError("""successes must be lower or equal to trials""" )
if trials < 0 or successes < 0:
raise ValueError("""the function is defined for non-negative integers""" )
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError("""the function is defined for non-negative integers""" )
if not 0 < prob < 1:
raise ValueError("""prob has to be in range of 1 - 0""" )
_a : Optional[int] = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
_a : Optional[int] = float(factorial(UpperCamelCase__ ) )
coefficient /= factorial(UpperCamelCase__ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('Probability of 2 successes out of 4 trails')
print('with probability of 0.75 is:', end=' ')
print(binomial_distribution(2, 4, 0.75))
| 324
| 0
|
"""simple docstring"""
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
_snake_case = sys.version_info >= (3, 10)
def lowerCAmelCase__ ( UpperCamelCase__=None , UpperCamelCase__=None ):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=UpperCamelCase__ )
@dataclass
class UpperCamelCase :
UpperCamelCase : int
UpperCamelCase : float
UpperCamelCase : str
UpperCamelCase : bool
@dataclass
class UpperCamelCase :
UpperCamelCase : int = 42
UpperCamelCase : str = field(default='''toto''' , metadata={'''help''': '''help message'''} )
@dataclass
class UpperCamelCase :
UpperCamelCase : bool = False
UpperCamelCase : bool = True
UpperCamelCase : Optional[bool] = None
class UpperCamelCase ( lowerCAmelCase__ ):
UpperCamelCase : Optional[int] = "titi"
UpperCamelCase : Tuple = "toto"
class UpperCamelCase ( lowerCAmelCase__ ):
UpperCamelCase : Tuple = "titi"
UpperCamelCase : Tuple = "toto"
UpperCamelCase : str = 42
@dataclass
class UpperCamelCase :
UpperCamelCase : BasicEnum = "toto"
def _lowercase ( self : Union[str, Any] ) -> List[str]:
_a : Optional[Any] = BasicEnum(self.foo )
@dataclass
class UpperCamelCase :
UpperCamelCase : MixedTypeEnum = "toto"
def _lowercase ( self : Dict ) -> List[str]:
_a : List[Any] = MixedTypeEnum(self.foo )
@dataclass
class UpperCamelCase :
UpperCamelCase : Optional[int] = None
UpperCamelCase : Optional[float] = field(default=lowerCAmelCase__ , metadata={'''help''': '''help message'''} )
UpperCamelCase : Optional[str] = None
UpperCamelCase : Optional[List[str]] = list_field(default=[] )
UpperCamelCase : Optional[List[int]] = list_field(default=[] )
@dataclass
class UpperCamelCase :
UpperCamelCase : List[int] = list_field(default=[] )
UpperCamelCase : List[int] = list_field(default=[1, 2, 3] )
UpperCamelCase : List[str] = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] )
UpperCamelCase : List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class UpperCamelCase :
UpperCamelCase : List[int] = field()
UpperCamelCase : str = field()
UpperCamelCase : BasicEnum = field()
def _lowercase ( self : Optional[Any] ) -> Dict:
_a : Optional[int] = BasicEnum(self.required_enum )
@dataclass
class UpperCamelCase :
UpperCamelCase : int
UpperCamelCase : "BasicEnum" = field()
UpperCamelCase : "Optional[bool]" = None
UpperCamelCase : "str" = field(default='''toto''' , metadata={'''help''': '''help message'''} )
UpperCamelCase : "List[str]" = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] )
if is_python_no_less_than_3_10:
@dataclass
class UpperCamelCase :
UpperCamelCase : bool = False
UpperCamelCase : bool = True
UpperCamelCase : bool | None = None
@dataclass
class UpperCamelCase :
UpperCamelCase : int | None = None
UpperCamelCase : float | None = field(default=lowerCAmelCase__ , metadata={'''help''': '''help message'''} )
UpperCamelCase : str | None = None
UpperCamelCase : list[str] | None = list_field(default=[] )
UpperCamelCase : list[int] | None = list_field(default=[] )
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int ) -> str:
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
_a : List[str] = {k: v for k, v in vars(a__ ).items() if k != """container"""}
_a : Tuple = {k: v for k, v in vars(a__ ).items() if k != """container"""}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("""choices""" , a__ ) and yy.get("""choices""" , a__ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["""type"""](a__ ) , yy["""type"""](a__ ) )
del xx["type"], yy["type"]
self.assertEqual(a__ , a__ )
def _lowercase ( self : Optional[Any] ) -> List[str]:
_a : Any = HfArgumentParser(a__ )
_a : List[str] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=a__ , required=a__ )
expected.add_argument("""--bar""" , type=a__ , required=a__ )
expected.add_argument("""--baz""" , type=a__ , required=a__ )
expected.add_argument("""--flag""" , type=a__ , default=a__ , const=a__ , nargs="""?""" )
self.argparsersEqual(a__ , a__ )
_a : Any = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""]
((_a ) , ) : Optional[Any] = parser.parse_args_into_dataclasses(a__ , look_for_args_file=a__ )
self.assertFalse(example.flag )
def _lowercase ( self : Optional[Any] ) -> str:
_a : Any = HfArgumentParser(a__ )
_a : Tuple = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=42 , type=a__ )
expected.add_argument("""--baz""" , default="""toto""" , type=a__ , help="""help message""" )
self.argparsersEqual(a__ , a__ )
def _lowercase ( self : Optional[Any] ) -> int:
_a : str = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=a__ , default=a__ , const=a__ , nargs="""?""" )
expected.add_argument("""--baz""" , type=a__ , default=a__ , const=a__ , nargs="""?""" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("""--no_baz""" , action="""store_false""" , default=a__ , dest="""baz""" )
expected.add_argument("""--opt""" , type=a__ , default=a__ )
_a : Tuple = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(a__ )
for dataclass_type in dataclass_types:
_a : Union[str, Any] = HfArgumentParser(a__ )
self.argparsersEqual(a__ , a__ )
_a : List[Any] = parser.parse_args([] )
self.assertEqual(a__ , Namespace(foo=a__ , baz=a__ , opt=a__ ) )
_a : int = parser.parse_args(["""--foo""", """--no_baz"""] )
self.assertEqual(a__ , Namespace(foo=a__ , baz=a__ , opt=a__ ) )
_a : Union[str, Any] = parser.parse_args(["""--foo""", """--baz"""] )
self.assertEqual(a__ , Namespace(foo=a__ , baz=a__ , opt=a__ ) )
_a : Optional[int] = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] )
self.assertEqual(a__ , Namespace(foo=a__ , baz=a__ , opt=a__ ) )
_a : Optional[Any] = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] )
self.assertEqual(a__ , Namespace(foo=a__ , baz=a__ , opt=a__ ) )
def _lowercase ( self : Optional[int] ) -> List[str]:
_a : Optional[int] = HfArgumentParser(a__ )
_a : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=["""titi""", """toto""", 42] , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(a__ , a__ )
_a : Union[str, Any] = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
_a : Optional[Any] = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
_a : Dict = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
_a : int = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
_a : Union[str, Any] = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
_a : Union[str, Any] = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def _lowercase ( self : Optional[int] ) -> Any:
@dataclass
class UpperCamelCase :
UpperCamelCase : Literal["titi", "toto", 42] = "toto"
_a : List[Any] = HfArgumentParser(a__ )
_a : Optional[Any] = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=("""titi""", """toto""", 42) , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(a__ , a__ )
_a : List[str] = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
_a : List[str] = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
_a : Dict = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
def _lowercase ( self : Dict ) -> List[Any]:
_a : Union[str, Any] = HfArgumentParser(a__ )
_a : List[str] = argparse.ArgumentParser()
expected.add_argument("""--foo_int""" , nargs="""+""" , default=[] , type=a__ )
expected.add_argument("""--bar_int""" , nargs="""+""" , default=[1, 2, 3] , type=a__ )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=a__ )
expected.add_argument("""--foo_float""" , nargs="""+""" , default=[0.1, 0.2, 0.3] , type=a__ )
self.argparsersEqual(a__ , a__ )
_a : Optional[int] = parser.parse_args([] )
self.assertEqual(
a__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["""Hallo""", """Bonjour""", """Hello"""] , foo_float=[0.1, 0.2, 0.3] ) , )
_a : Union[str, Any] = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() )
self.assertEqual(a__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["""a""", """b""", """c"""] , foo_float=[0.1, 0.7] ) )
def _lowercase ( self : Optional[Any] ) -> int:
_a : Any = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=a__ , type=a__ )
expected.add_argument("""--bar""" , default=a__ , type=a__ , help="""help message""" )
expected.add_argument("""--baz""" , default=a__ , type=a__ )
expected.add_argument("""--ces""" , nargs="""+""" , default=[] , type=a__ )
expected.add_argument("""--des""" , nargs="""+""" , default=[] , type=a__ )
_a : Any = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(a__ )
for dataclass_type in dataclass_types:
_a : Union[str, Any] = HfArgumentParser(a__ )
self.argparsersEqual(a__ , a__ )
_a : Tuple = parser.parse_args([] )
self.assertEqual(a__ , Namespace(foo=a__ , bar=a__ , baz=a__ , ces=[] , des=[] ) )
_a : Tuple = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() )
self.assertEqual(a__ , Namespace(foo=12 , bar=3.1_4 , baz="""42""" , ces=["""a""", """b""", """c"""] , des=[1, 2, 3] ) )
def _lowercase ( self : str ) -> Tuple:
_a : Union[str, Any] = HfArgumentParser(a__ )
_a : List[str] = argparse.ArgumentParser()
expected.add_argument("""--required_list""" , nargs="""+""" , type=a__ , required=a__ )
expected.add_argument("""--required_str""" , type=a__ , required=a__ )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=a__ , )
self.argparsersEqual(a__ , a__ )
def _lowercase ( self : List[str] ) -> int:
_a : List[Any] = HfArgumentParser(a__ )
_a : Tuple = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=a__ , required=a__ )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=a__ , )
expected.add_argument("""--opt""" , type=a__ , default=a__ )
expected.add_argument("""--baz""" , default="""toto""" , type=a__ , help="""help message""" )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=a__ )
self.argparsersEqual(a__ , a__ )
def _lowercase ( self : List[Any] ) -> List[Any]:
_a : Any = HfArgumentParser(a__ )
_a : Optional[int] = {
"""foo""": 12,
"""bar""": 3.1_4,
"""baz""": """42""",
"""flag""": True,
}
_a : Tuple = parser.parse_dict(a__ )[0]
_a : List[str] = BasicExample(**a__ )
self.assertEqual(a__ , a__ )
def _lowercase ( self : Optional[Any] ) -> Any:
_a : int = HfArgumentParser(a__ )
_a : Dict = {
"""foo""": 12,
"""bar""": 3.1_4,
"""baz""": """42""",
"""flag""": True,
"""extra""": 42,
}
self.assertRaises(a__ , parser.parse_dict , a__ , allow_extra_keys=a__ )
def _lowercase ( self : str ) -> List[str]:
_a : Optional[int] = HfArgumentParser(a__ )
_a : int = {
"""foo""": 12,
"""bar""": 3.1_4,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
_a : List[str] = os.path.join(a__ , """temp_json""" )
os.mkdir(a__ )
with open(temp_local_path + """.json""" , """w+""" ) as f:
json.dump(a__ , a__ )
_a : Dict = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0]
_a : int = BasicExample(**a__ )
self.assertEqual(a__ , a__ )
def _lowercase ( self : str ) -> Union[str, Any]:
_a : Tuple = HfArgumentParser(a__ )
_a : List[str] = {
"""foo""": 12,
"""bar""": 3.1_4,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
_a : List[Any] = os.path.join(a__ , """temp_yaml""" )
os.mkdir(a__ )
with open(temp_local_path + """.yaml""" , """w+""" ) as f:
yaml.dump(a__ , a__ )
_a : Union[str, Any] = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0]
_a : str = BasicExample(**a__ )
self.assertEqual(a__ , a__ )
def _lowercase ( self : Optional[int] ) -> Union[str, Any]:
_a : Tuple = HfArgumentParser(a__ )
self.assertIsNotNone(a__ )
| 364
|
"""simple docstring"""
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a , _a : Dict = len(UpperCamelCase__ ), len(grid[0] )
if (
min(UpperCamelCase__ , UpperCamelCase__ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
_a : Any = 0
count += depth_first_search(UpperCamelCase__ , row + 1 , UpperCamelCase__ , UpperCamelCase__ )
count += depth_first_search(UpperCamelCase__ , row - 1 , UpperCamelCase__ , UpperCamelCase__ )
count += depth_first_search(UpperCamelCase__ , UpperCamelCase__ , col + 1 , UpperCamelCase__ )
count += depth_first_search(UpperCamelCase__ , UpperCamelCase__ , col - 1 , UpperCamelCase__ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 324
| 0
|
"""simple docstring"""
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
_snake_case = {
'bart': (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'bert': (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-base-cased-finetuned-mrpc': (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'dpr': (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'gpt2': (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlnet': (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm': (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm-roberta': (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'transfo-xl': (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'openai-gpt': (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'roberta': (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'layoutlm': (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'roberta-large-mnli': (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'camembert': (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'flaubert': (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert': (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert-base-distilled-squad': (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert': (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert-visual-feature-encoder': (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'ctrl': (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'albert': (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
't5': (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'electra': (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'wav2vec2': (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False , UpperCamelCase__=True ):
'''simple docstring'''
if model_type not in MODEL_CLASSES:
raise ValueError(F"""Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.""" )
_a , _a , _a , _a : Union[str, Any] = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
_a : Union[str, Any] = cached_file(snake_case__ , snake_case__ , force_download=not use_cached_models )
_a : int = config_class.from_json_file(snake_case__ )
_a : Any = True
_a : str = True
print(F"""Building TensorFlow model from configuration: {config}""" )
_a : str = model_class(snake_case__ )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
_a : Optional[Any] = cached_file(
snake_case__ , snake_case__ , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
_a : str = load_pytorch_checkpoint_in_tfa_model(snake_case__ , snake_case__ )
if compare_with_pt_model:
_a : Optional[int] = tf_model(tf_model.dummy_inputs , training=snake_case__ ) # build the network
_a : int = torch.load(snake_case__ , map_location="""cpu""" )
_a : List[str] = pt_model_class.from_pretrained(
pretrained_model_name_or_path=snake_case__ , config=snake_case__ , state_dict=snake_case__ )
with torch.no_grad():
_a : int = pt_model(**pt_model.dummy_inputs )
_a : Tuple = pto[0].numpy()
_a : int = tfo[0].numpy()
_a : Optional[Any] = np.amax(np.abs(np_pt - np_tf ) )
print(F"""Max absolute difference between models outputs {diff}""" )
assert diff <= 2e-2, F"""Error, model absolute difference is >2e-2: {diff}"""
# Save pytorch-model
print(F"""Save TensorFlow model to {tf_dump_path}""" )
tf_model.save_weights(snake_case__ , save_format="""h5""" )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=False , ):
'''simple docstring'''
if args_model_type is None:
_a : Dict = list(MODEL_CLASSES.keys() )
else:
_a : str = [args_model_type]
for j, model_type in enumerate(snake_case__ , start=1 ):
print("""=""" * 1_0_0 )
print(F""" Converting model type {j}/{len(snake_case__ )}: {model_type}""" )
print("""=""" * 1_0_0 )
if model_type not in MODEL_CLASSES:
raise ValueError(F"""Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.""" )
_a , _a , _a , _a , _a : List[Any] = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
_a : List[str] = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
_a : str = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(snake_case__ , snake_case__ ) , start=1 ):
print("""-""" * 1_0_0 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F""" Skipping finetuned checkpoint {model_shortcut_name}""" )
continue
_a : Optional[Any] = model_shortcut_name
elif only_convert_finetuned_models:
print(F""" Skipping not finetuned checkpoint {model_shortcut_name}""" )
continue
print(
F""" Converting checkpoint {i}/{len(snake_case__ )}: {model_shortcut_name} - model_type {model_type}""" )
print("""-""" * 1_0_0 )
if config_shortcut_name in aws_config_map:
_a : Optional[Any] = cached_file(snake_case__ , snake_case__ , force_download=not use_cached_models )
else:
_a : Optional[Any] = config_shortcut_name
if model_shortcut_name in aws_model_maps:
_a : Dict = cached_file(snake_case__ , snake_case__ , force_download=not use_cached_models )
else:
_a : int = model_shortcut_name
if os.path.isfile(snake_case__ ):
_a : Union[str, Any] = """converted_model"""
convert_pt_checkpoint_to_tf(
model_type=snake_case__ , pytorch_checkpoint_path=snake_case__ , config_file=snake_case__ , tf_dump_path=os.path.join(snake_case__ , model_shortcut_name + """-tf_model.h5""" ) , compare_with_pt_model=snake_case__ , )
if remove_cached_files:
os.remove(snake_case__ )
os.remove(snake_case__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_dump_path', default=None, type=str, required=True, help='Path to the output Tensorflow dump file.'
)
parser.add_argument(
'--model_type',
default=None,
type=str,
help=(
F'''Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and '''
'convert all the models from AWS.'
),
)
parser.add_argument(
'--pytorch_checkpoint_path',
default=None,
type=str,
help=(
'Path to the PyTorch checkpoint path or shortcut name to download from AWS. '
'If not given, will download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--config_file',
default=None,
type=str,
help=(
'The config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture. If not given and '
'--pytorch_checkpoint_path is not given or is a shortcut name '
'use the configuration associated to the shortcut name on the AWS'
),
)
parser.add_argument(
'--compare_with_pt_model', action='store_true', help='Compare Tensorflow and PyTorch model predictions.'
)
parser.add_argument(
'--use_cached_models',
action='store_true',
help='Use cached models if possible instead of updating to latest checkpoint versions.',
)
parser.add_argument(
'--remove_cached_files',
action='store_true',
help='Remove pytorch models after conversion (save memory when converting in batches).',
)
parser.add_argument('--only_convert_finetuned_models', action='store_true', help='Only convert finetuned models.')
_snake_case = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 365
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_snake_case = {
'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['VisionEncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['TFVisionEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['FlaxVisionEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 324
| 0
|
import csv
import tweepy
# Twitter API credentials
_snake_case = ''
_snake_case = ''
_snake_case = ''
_snake_case = ''
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : Tuple = tweepy.OAuthHandler(lowerCAmelCase__ , lowerCAmelCase__ )
auth.set_access_token(lowerCAmelCase__ , lowerCAmelCase__ )
_a : Optional[Any] = tweepy.API(lowerCAmelCase__ )
# initialize a list to hold all the tweepy Tweets
_a : List[Any] = []
# make initial request for most recent tweets (200 is the maximum allowed count)
_a : Optional[Any] = api.user_timeline(screen_name=lowerCAmelCase__ , count=2_0_0 )
# save most recent tweets
alltweets.extend(lowerCAmelCase__ )
# save the id of the oldest tweet less one
_a : Union[str, Any] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowerCAmelCase__ ) > 0:
print(F"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
_a : List[str] = api.user_timeline(
screen_name=lowerCAmelCase__ , count=2_0_0 , max_id=lowerCAmelCase__ )
# save most recent tweets
alltweets.extend(lowerCAmelCase__ )
# update the id of the oldest tweet less one
_a : Dict = alltweets[-1].id - 1
print(F"""...{len(lowerCAmelCase__ )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
_a : int = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"""new_{screen_name}_tweets.csv""" , """w""" ) as f:
_a : Tuple = csv.writer(lowerCAmelCase__ )
writer.writerow(["""id""", """created_at""", """text"""] )
writer.writerows(lowerCAmelCase__ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('FirePing32')
| 366
|
"""simple docstring"""
from __future__ import annotations
import time
_snake_case = list[tuple[int, int]]
_snake_case = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_snake_case = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class UpperCamelCase :
def __init__( self : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Node | None ) -> List[str]:
_a : int = pos_x
_a : Union[str, Any] = pos_y
_a : Tuple = (pos_y, pos_x)
_a : Tuple = goal_x
_a : int = goal_y
_a : str = parent
class UpperCamelCase :
def __init__( self : List[Any] , UpperCAmelCase__ : tuple[int, int] , UpperCAmelCase__ : tuple[int, int] ) -> List[str]:
_a : List[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , UpperCAmelCase__ )
_a : List[str] = Node(goal[1] , goal[0] , goal[1] , goal[0] , UpperCAmelCase__ )
_a : Optional[int] = [self.start]
_a : Tuple = False
def _lowercase ( self : str ) -> Path | None:
while self.node_queue:
_a : Tuple = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
_a : Dict = True
return self.retrace_path(UpperCAmelCase__ )
_a : Tuple = self.get_successors(UpperCAmelCase__ )
for node in successors:
self.node_queue.append(UpperCAmelCase__ )
if not self.reached:
return [self.start.pos]
return None
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : Node ) -> list[Node]:
_a : Optional[Any] = []
for action in delta:
_a : str = parent.pos_x + action[1]
_a : List[Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(UpperCAmelCase__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(UpperCAmelCase__ , UpperCAmelCase__ , self.target.pos_y , self.target.pos_x , UpperCAmelCase__ ) )
return successors
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Node | None ) -> Path:
_a : Dict = node
_a : List[str] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_a : Any = current_node.parent
path.reverse()
return path
class UpperCamelCase :
def __init__( self : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] ) -> Any:
_a : Dict = BreadthFirstSearch(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Optional[int] = BreadthFirstSearch(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Dict = False
def _lowercase ( self : Any ) -> Path | None:
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
_a : List[Any] = self.fwd_bfs.node_queue.pop(0 )
_a : Union[str, Any] = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
_a : Optional[int] = True
return self.retrace_bidirectional_path(
UpperCAmelCase__ , UpperCAmelCase__ )
_a : List[str] = current_bwd_node
_a : int = current_fwd_node
_a : Optional[Any] = {
self.fwd_bfs: self.fwd_bfs.get_successors(UpperCAmelCase__ ),
self.bwd_bfs: self.bwd_bfs.get_successors(UpperCAmelCase__ ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(UpperCAmelCase__ )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : Node , UpperCAmelCase__ : Node ) -> Path:
_a : str = self.fwd_bfs.retrace_path(UpperCAmelCase__ )
_a : List[Any] = self.bwd_bfs.retrace_path(UpperCAmelCase__ )
bwd_path.pop()
bwd_path.reverse()
_a : Tuple = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
_snake_case = (0, 0)
_snake_case = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_snake_case = time.time()
_snake_case = BreadthFirstSearch(init, goal)
_snake_case = bfs.search()
_snake_case = time.time() - start_bfs_time
print('Unidirectional BFS computation time : ', bfs_time)
_snake_case = time.time()
_snake_case = BidirectionalBreadthFirstSearch(init, goal)
_snake_case = bd_bfs.search()
_snake_case = time.time() - start_bd_bfs_time
print('Bidirectional BFS computation time : ', bd_bfs_time)
| 324
| 0
|
"""simple docstring"""
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
_snake_case = 50_0000
_snake_case , _snake_case = os.path.split(__file__)
_snake_case = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def lowerCAmelCase__ ( UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
_a : str = dataset.map(**SCREAMING_SNAKE_CASE__ )
@get_duration
def lowerCAmelCase__ ( UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
_a : Tuple = dataset.filter(**SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : str = {'''num examples''': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
_a : List[Any] = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
_a : str = generate_example_dataset(
os.path.join(SCREAMING_SNAKE_CASE__ , """dataset.arrow""" ) , SCREAMING_SNAKE_CASE__ , num_examples=SCREAMING_SNAKE_CASE__ )
_a : str = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=SCREAMING_SNAKE_CASE__ )
def tokenize(UpperCamelCase__ ):
return tokenizer(examples["""text"""] )
_a : List[Any] = map(SCREAMING_SNAKE_CASE__ )
_a : Tuple = map(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
_a : Any = map(SCREAMING_SNAKE_CASE__ , function=lambda UpperCamelCase__ : None , batched=SCREAMING_SNAKE_CASE__ )
with dataset.formatted_as(type="""numpy""" ):
_a : List[Any] = map(SCREAMING_SNAKE_CASE__ , function=lambda UpperCamelCase__ : None , batched=SCREAMING_SNAKE_CASE__ )
with dataset.formatted_as(type="""pandas""" ):
_a : Optional[int] = map(SCREAMING_SNAKE_CASE__ , function=lambda UpperCamelCase__ : None , batched=SCREAMING_SNAKE_CASE__ )
with dataset.formatted_as(type="""torch""" , columns="""numbers""" ):
_a : List[Any] = map(SCREAMING_SNAKE_CASE__ , function=lambda UpperCamelCase__ : None , batched=SCREAMING_SNAKE_CASE__ )
with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ):
_a : List[str] = map(SCREAMING_SNAKE_CASE__ , function=lambda UpperCamelCase__ : None , batched=SCREAMING_SNAKE_CASE__ )
_a : Any = map(SCREAMING_SNAKE_CASE__ , function=SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
_a : Optional[Any] = filter(SCREAMING_SNAKE_CASE__ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(SCREAMING_SNAKE_CASE__ , """wb""" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 367
|
"""simple docstring"""
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_snake_case = logging.getLogger(__name__)
_snake_case = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
_snake_case = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCamelCase :
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Leave None if you want to train a model from'''
''' scratch.'''
)
} , )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(snake_case_ )} , )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class UpperCamelCase :
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''The input training data file (a text file).'''} )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={
'''help''': (
'''The input training data files (multiple files in glob format). '''
'''Very often splitting large files to smaller files can prevent tokenizer going out of memory'''
)
} , )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''An optional input train ref data file for whole word mask in Chinese.'''} , )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''An optional input eval ref data file for whole word mask in Chinese.'''} , )
UpperCamelCase : bool = field(
default=snake_case_ , metadata={'''help''': '''Whether distinct lines of text in the dataset are to be handled as distinct sequences.'''} , )
UpperCamelCase : bool = field(
default=snake_case_ , metadata={'''help''': '''Train with masked-language modeling loss instead of language modeling.'''} )
UpperCamelCase : bool = field(default=snake_case_ , metadata={'''help''': '''Whether ot not to use whole word mask.'''} )
UpperCamelCase : float = field(
default=0.1_5 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
UpperCamelCase : float = field(
default=1 / 6 , metadata={
'''help''': (
'''Ratio of length of a span of masked tokens to surrounding context length for permutation language'''
''' modeling.'''
)
} , )
UpperCamelCase : int = field(
default=5 , metadata={'''help''': '''Maximum length of a span of masked tokens for permutation language modeling.'''} )
UpperCamelCase : int = field(
default=-1 , metadata={
'''help''': (
'''Optional input sequence length after tokenization.'''
'''The training dataset will be truncated in block of this size for training.'''
'''Default to the model max input length for single sentence inputs (take into account special tokens).'''
)
} , )
UpperCamelCase : bool = field(
default=snake_case_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False , UpperCamelCase__ = None , ):
'''simple docstring'''
def _dataset(UpperCamelCase__ , UpperCamelCase__=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("""You need to set world whole masking and mlm to True for Chinese Whole Word Mask""" )
return LineByLineWithRefDataset(
tokenizer=UpperCamelCase__ , file_path=UpperCamelCase__ , block_size=args.block_size , ref_path=UpperCamelCase__ , )
return LineByLineTextDataset(tokenizer=UpperCamelCase__ , file_path=UpperCamelCase__ , block_size=args.block_size )
else:
return TextDataset(
tokenizer=UpperCamelCase__ , file_path=UpperCamelCase__ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=UpperCamelCase__ , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(UpperCamelCase__ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def lowerCAmelCase__ ( ):
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_a : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_a , _a , _a : List[str] = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"""Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file """
"""or remove the --do_eval argument.""" )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , UpperCamelCase__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
_a : str = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
_a : Union[str, Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
_a : str = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.tokenizer_name:
_a : List[str] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
_a : Union[str, Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"""
""" script, save it,and load it from here, using --tokenizer_name""" )
if model_args.model_name_or_path:
_a : Optional[Any] = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , )
else:
logger.info("""Training new model from scratch""" )
_a : List[Any] = AutoModelWithLMHead.from_config(UpperCamelCase__ )
model.resize_token_embeddings(len(UpperCamelCase__ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"""BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"""
"""--mlm flag (masked language modeling).""" )
if data_args.block_size <= 0:
_a : int = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
_a : Optional[Any] = min(data_args.block_size , tokenizer.max_len )
# Get datasets
_a : Optional[Any] = (
get_dataset(UpperCamelCase__ , tokenizer=UpperCamelCase__ , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
_a : Optional[int] = (
get_dataset(UpperCamelCase__ , tokenizer=UpperCamelCase__ , evaluate=UpperCamelCase__ , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
_a : Any = DataCollatorForPermutationLanguageModeling(
tokenizer=UpperCamelCase__ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
_a : Union[str, Any] = DataCollatorForWholeWordMask(
tokenizer=UpperCamelCase__ , mlm_probability=data_args.mlm_probability )
else:
_a : str = DataCollatorForLanguageModeling(
tokenizer=UpperCamelCase__ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
_a : Union[str, Any] = Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , data_collator=UpperCamelCase__ , train_dataset=UpperCamelCase__ , eval_dataset=UpperCamelCase__ , prediction_loss_only=UpperCamelCase__ , )
# Training
if training_args.do_train:
_a : Optional[Any] = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=UpperCamelCase__ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_a : Union[str, Any] = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_a : int = trainer.evaluate()
_a : Dict = math.exp(eval_output["""eval_loss"""] )
_a : Union[str, Any] = {"""perplexity""": perplexity}
_a : Optional[Any] = os.path.join(training_args.output_dir , """eval_results_lm.txt""" )
if trainer.is_world_master():
with open(UpperCamelCase__ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""" , UpperCamelCase__ , str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
results.update(UpperCamelCase__ )
return results
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 324
| 0
|
"""simple docstring"""
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if len(lowerCamelCase_ ) != len(lowerCamelCase_ ):
raise ValueError("""String lengths must match!""" )
_a : Optional[Any] = 0
for chara, chara in zip(lowerCamelCase_ , lowerCamelCase_ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 368
|
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
_snake_case = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['memory_attention', 'encoder_attn'],
['attention', 'attn'],
['/', '.'],
['.LayerNorm.gamma', '_layer_norm.weight'],
['.LayerNorm.beta', '_layer_norm.bias'],
['r.layer_', 'r.layers.'],
['output_proj', 'out_proj'],
['ffn.dense_1.', 'fc2.'],
['ffn.dense.', 'fc1.'],
['ffn_layer_norm', 'final_layer_norm'],
['kernel', 'weight'],
['encoder_layer_norm.', 'encoder.layer_norm.'],
['decoder_layer_norm.', 'decoder.layer_norm.'],
['embeddings.weights', 'shared.weight'],
]
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
_a : Optional[Any] = k.replace(UpperCamelCase__ , UpperCamelCase__ )
return k
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Union[str, Any] = DEFAULTS.copy()
cfg_kwargs.update(UpperCamelCase__ )
_a : Optional[Any] = PegasusConfig(**UpperCamelCase__ )
_a : Tuple = PegasusForConditionalGeneration(UpperCamelCase__ )
_a : str = torch_model.model.state_dict()
_a : Union[str, Any] = {}
for k, v in tf_weights.items():
_a : Any = rename_state_dict_key(UpperCamelCase__ )
if new_k not in sd:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
_a : str = v.T
_a : int = torch.tensor(UpperCamelCase__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
_a : Union[str, Any] = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
_a : str = mapping["""shared.weight"""]
_a : Union[str, Any] = mapping["""shared.weight"""]
_a : Optional[Any] = {k: torch.zeros_like(UpperCamelCase__ ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**UpperCamelCase__ )
_a , _a : int = torch_model.model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
_a : Optional[Any] = [
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def lowerCAmelCase__ ( UpperCamelCase__="./ckpt/aeslc/model.ckpt-32000" ):
'''simple docstring'''
_a : List[Any] = tf.train.list_variables(UpperCamelCase__ )
_a : Optional[int] = {}
_a : Dict = ["""Adafactor""", """global_step"""]
for name, shape in tqdm(UpperCamelCase__ , desc="""converting tf checkpoint to dict""" ):
_a : Optional[Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
_a : str = tf.train.load_variable(UpperCamelCase__ , UpperCamelCase__ )
_a : int = array
return tf_weights
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
# save tokenizer first
_a : Dict = Path(UpperCamelCase__ ).parent.name
_a : Optional[Any] = task_specific_params[F"""summarization_{dataset}"""]["""max_position_embeddings"""]
_a : Tuple = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=UpperCamelCase__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(UpperCamelCase__ )
# convert model
_a : List[Any] = get_tf_weights_as_numpy(UpperCamelCase__ )
_a : Dict = task_specific_params[F"""summarization_{dataset}"""]
if dataset == "large":
_a : Tuple = task_specific_params
_a : Optional[int] = convert_pegasus(UpperCamelCase__ , UpperCamelCase__ )
torch_model.save_pretrained(UpperCamelCase__ )
_a : Dict = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(UpperCamelCase__ , Path(UpperCamelCase__ ) / """pytorch_model.bin""" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument('tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('save_dir', default=None, type=str, help='Path to the output PyTorch model.')
_snake_case = parser.parse_args()
if args.save_dir is None:
_snake_case = Path(args.tf_ckpt_path).parent.name
_snake_case = os.path.join('pegasus', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 324
| 0
|
"""simple docstring"""
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
_snake_case = logging.getLogger(__name__)
require_version('pytorch_lightning>=1.0.4')
_snake_case = {
'base': AutoModel,
'sequence-classification': AutoModelForSequenceClassification,
'question-answering': AutoModelForQuestionAnswering,
'pretraining': AutoModelForPreTraining,
'token-classification': AutoModelForTokenClassification,
'language-modeling': AutoModelWithLMHead,
'summarization': AutoModelForSeqaSeqLM,
'translation': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
_snake_case = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
_snake_case = sorted(arg_to_scheduler.keys())
_snake_case = '{' + ', '.join(arg_to_scheduler_choices) + '}'
class UpperCamelCase ( pl.LightningModule ):
def __init__( self : List[str] , UpperCAmelCase__ : argparse.Namespace , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : str="base" , UpperCAmelCase__ : int=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Dict=None , **UpperCAmelCase__ : Optional[int] , ) -> Dict:
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(UpperCamelCase__ )
_a : List[Any] = 0
_a : Optional[int] = Path(self.hparams.output_dir )
_a : Union[str, Any] = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
_a : str = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({"""num_labels""": num_labels} if num_labels is not None else {}) , cache_dir=UpperCamelCase__ , **UpperCamelCase__ , )
else:
_a : Optional[Any] = config
_a : Dict = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(self.hparams , UpperCamelCase__ , UpperCamelCase__ ):
assert hasattr(self.config , UpperCamelCase__ ), f"""model config doesn't have a `{p}` attribute"""
setattr(self.config , UpperCamelCase__ , getattr(self.hparams , UpperCamelCase__ ) )
if tokenizer is None:
_a : List[str] = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=UpperCamelCase__ , )
else:
_a : Dict = tokenizer
_a : List[str] = MODEL_MODES[mode]
if model is None:
_a : Optional[int] = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool(""".ckpt""" in self.hparams.model_name_or_path ) , config=self.config , cache_dir=UpperCamelCase__ , )
else:
_a : Optional[Any] = model
def _lowercase ( self : Optional[int] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Any ) -> Tuple:
_a : Any = self.model_type.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
def _lowercase ( self : Dict ) -> str:
_a : Optional[Any] = arg_to_scheduler[self.hparams.lr_scheduler]
_a : List[str] = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
_a : int = {"""scheduler""": scheduler, """interval""": """step""", """frequency""": 1}
return scheduler
def _lowercase ( self : Optional[int] ) -> List[Any]:
_a : Optional[Any] = self.model
_a : Dict = ["""bias""", """LayerNorm.weight"""]
_a : Dict = [
{
"""params""": [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
"""weight_decay""": self.hparams.weight_decay,
},
{
"""params""": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
if self.hparams.adafactor:
_a : Optional[Any] = Adafactor(
UpperCamelCase__ , lr=self.hparams.learning_rate , scale_parameter=UpperCamelCase__ , relative_step=UpperCamelCase__ )
else:
_a : Any = AdamW(
UpperCamelCase__ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
_a : Any = optimizer
_a : str = self.get_lr_scheduler()
return [optimizer], [scheduler]
def _lowercase ( self : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any ) -> List[Any]:
return self.validation_step(UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self : int , UpperCAmelCase__ : Optional[int] ) -> int:
return self.validation_end(UpperCamelCase__ )
def _lowercase ( self : int ) -> int:
_a : List[Any] = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
_a : int = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def _lowercase ( self : Dict , UpperCAmelCase__ : Union[str, Any] ) -> List[str]:
if stage == "test":
_a : List[str] = len(self.test_dataloader().dataset )
else:
_a : Optional[Any] = self.get_dataloader("""train""" , self.hparams.train_batch_size , shuffle=UpperCamelCase__ )
_a : Dict = len(self.train_dataloader().dataset )
def _lowercase ( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : bool = False ) -> Optional[int]:
raise NotImplementedError("""You must implement this for your task""" )
def _lowercase ( self : Tuple ) -> List[str]:
return self.train_loader
def _lowercase ( self : Optional[Any] ) -> str:
return self.get_dataloader("""dev""" , self.hparams.eval_batch_size , shuffle=UpperCamelCase__ )
def _lowercase ( self : Any ) -> Optional[Any]:
return self.get_dataloader("""test""" , self.hparams.eval_batch_size , shuffle=UpperCamelCase__ )
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : List[Any] ) -> List[Any]:
return os.path.join(
self.hparams.data_dir , """cached_{}_{}_{}""".format(
UpperCamelCase__ , list(filter(UpperCamelCase__ , self.hparams.model_name_or_path.split("""/""" ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def _lowercase ( self : int , UpperCAmelCase__ : Dict[str, Any] ) -> None:
_a : Optional[int] = self.output_dir.joinpath("""best_tfmr""" )
_a : str = self.step_count
self.model.save_pretrained(UpperCamelCase__ )
self.tokenizer.save_pretrained(UpperCamelCase__ )
@staticmethod
def _lowercase ( UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any ) -> Optional[int]:
parser.add_argument(
"""--model_name_or_path""" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--config_name""" , default="""""" , type=UpperCamelCase__ , help="""Pretrained config name or path if not the same as model_name""" )
parser.add_argument(
"""--tokenizer_name""" , default=UpperCamelCase__ , type=UpperCamelCase__ , help="""Pretrained tokenizer name or path if not the same as model_name""" , )
parser.add_argument(
"""--cache_dir""" , default=str(Path(UpperCamelCase__ ).parent / """test_run""" / """cache""" ) , type=UpperCamelCase__ , help="""Where do you want to store the pre-trained models downloaded from huggingface.co""" , )
parser.add_argument(
"""--encoder_layerdrop""" , type=UpperCamelCase__ , help="""Encoder layer dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--decoder_layerdrop""" , type=UpperCamelCase__ , help="""Decoder layer dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--dropout""" , type=UpperCamelCase__ , help="""Dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--attention_dropout""" , type=UpperCamelCase__ , help="""Attention dropout probability (Optional). Goes into model.config""" , )
parser.add_argument("""--learning_rate""" , default=5E-5 , type=UpperCamelCase__ , help="""The initial learning rate for Adam.""" )
parser.add_argument(
"""--lr_scheduler""" , default="""linear""" , choices=UpperCamelCase__ , metavar=UpperCamelCase__ , type=UpperCamelCase__ , help="""Learning rate scheduler""" , )
parser.add_argument("""--weight_decay""" , default=0.0 , type=UpperCamelCase__ , help="""Weight decay if we apply some.""" )
parser.add_argument("""--adam_epsilon""" , default=1E-8 , type=UpperCamelCase__ , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--warmup_steps""" , default=0 , type=UpperCamelCase__ , help="""Linear warmup over warmup_steps.""" )
parser.add_argument("""--num_workers""" , default=4 , type=UpperCamelCase__ , help="""kwarg passed to DataLoader""" )
parser.add_argument("""--num_train_epochs""" , dest="""max_epochs""" , default=3 , type=UpperCamelCase__ )
parser.add_argument("""--train_batch_size""" , default=32 , type=UpperCamelCase__ )
parser.add_argument("""--eval_batch_size""" , default=32 , type=UpperCamelCase__ )
parser.add_argument("""--adafactor""" , action="""store_true""" )
class UpperCamelCase ( pl.Callback ):
def _lowercase ( self : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> str:
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class UpperCamelCase ( pl.Callback ):
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] ) -> Union[str, Any]:
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(UpperCamelCase__ )
class UpperCamelCase ( pl.Callback ):
def _lowercase ( self : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] ) -> Tuple:
_a : List[str] = trainer.lr_schedulers[0]["""scheduler"""]
_a : Optional[int] = {f"""lr_group_{i}""": lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(UpperCamelCase__ )
def _lowercase ( self : List[str] , UpperCAmelCase__ : pl.Trainer , UpperCAmelCase__ : pl.LightningModule ) -> Optional[Any]:
rank_zero_info("""***** Validation results *****""" )
_a : Optional[int] = trainer.callback_metrics
# Log results
for key in sorted(UpperCamelCase__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info("""{} = {}\n""".format(UpperCamelCase__ , str(metrics[key] ) ) )
def _lowercase ( self : List[str] , UpperCAmelCase__ : pl.Trainer , UpperCAmelCase__ : pl.LightningModule ) -> Tuple:
rank_zero_info("""***** Test results *****""" )
_a : Any = trainer.callback_metrics
# Log and save results to file
_a : Optional[int] = os.path.join(pl_module.hparams.output_dir , """test_results.txt""" )
with open(UpperCamelCase__ , """w""" ) as writer:
for key in sorted(UpperCamelCase__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info("""{} = {}\n""".format(UpperCamelCase__ , str(metrics[key] ) ) )
writer.write("""{} = {}\n""".format(UpperCamelCase__ , str(metrics[key] ) ) )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
parser.add_argument(
"""--output_dir""" , default=str(Path(__UpperCamelCase ).parent / """test_run""" / """model_checkpoints""" ) , type=__UpperCamelCase , help="""The output directory where the model predictions and checkpoints will be written.""" , )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=__UpperCamelCase , default="""O2""" , help=(
"""For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_tpu_cores""" , dest="""tpu_cores""" , type=__UpperCamelCase )
parser.add_argument("""--max_grad_norm""" , dest="""gradient_clip_val""" , default=1.0 , type=__UpperCamelCase , help="""Max gradient norm""" )
parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""" )
parser.add_argument("""--do_predict""" , action="""store_true""" , help="""Whether to run predictions on the test set.""" )
parser.add_argument(
"""--gradient_accumulation_steps""" , dest="""accumulate_grad_batches""" , type=__UpperCamelCase , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , )
parser.add_argument("""--seed""" , type=__UpperCamelCase , default=4_2 , help="""random seed for initialization""" )
parser.add_argument(
"""--data_dir""" , default=str(Path(__UpperCamelCase ).parent / """test_run""" / """dummy-train-data""" ) , type=__UpperCamelCase , help="""The input data dir. Should contain the training files for the CoNLL-2003 NER task.""" , )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=[] , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ , ):
'''simple docstring'''
pl.seed_everything(args.seed )
# init model
_a : Any = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=__UpperCamelCase )
# add custom checkpoints
if checkpoint_callback is None:
_a : Tuple = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix="""checkpoint""" , monitor="""val_loss""" , mode="""min""" , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(__UpperCamelCase )
if logging_callback is None:
_a : int = LoggingCallback()
_a : Tuple = {}
if args.fpaa:
_a : Tuple = 1_6
if args.gpus > 1:
_a : str = """auto"""
_a : Optional[Any] = """ddp"""
_a : str = args.accumulate_grad_batches
_a : Optional[Any] = None
_a : Optional[Any] = """auto"""
_a : Any = pl.Trainer.from_argparse_args(
__UpperCamelCase , weights_summary=__UpperCamelCase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=__UpperCamelCase , val_check_interval=1 , num_sanity_val_steps=2 , **__UpperCamelCase , )
if args.do_train:
trainer.fit(__UpperCamelCase )
else:
print("""RAG modeling tests with new set functions successfuly executed!""" )
return trainer
| 369
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
UpperCamelCase : Union[str, Any] = StableDiffusionXLImgaImgPipeline
UpperCamelCase : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
UpperCamelCase : Tuple = PipelineTesterMixin.required_optional_params - {'''latents'''}
UpperCamelCase : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _lowercase ( self : Any ) -> List[Any]:
torch.manual_seed(0 )
_a : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=UpperCAmelCase__ , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
_a : Union[str, Any] = EulerDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , )
torch.manual_seed(0 )
_a : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_a : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=32 , )
_a : Tuple = CLIPTextModel(UpperCAmelCase__ )
_a : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=UpperCAmelCase__ )
_a : Dict = CLIPTextModelWithProjection(UpperCAmelCase__ )
_a : Dict = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=UpperCAmelCase__ )
_a : Any = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int=0 ) -> int:
_a : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
_a : Any = image / 2 + 0.5
if str(UpperCAmelCase__ ).startswith("""mps""" ):
_a : Any = torch.manual_seed(UpperCAmelCase__ )
else:
_a : Tuple = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
_a : Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.7_5,
}
return inputs
def _lowercase ( self : Any ) -> List[Any]:
_a : Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_a : Dict = self.get_dummy_components()
_a : List[Any] = StableDiffusionXLImgaImgPipeline(**UpperCAmelCase__ )
_a : Union[str, Any] = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_a : List[str] = self.get_dummy_inputs(UpperCAmelCase__ )
_a : List[str] = sd_pipe(**UpperCAmelCase__ ).images
_a : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_a : List[str] = np.array([0.4_6_5_6, 0.4_8_4_0, 0.4_4_3_9, 0.6_6_9_8, 0.5_5_7_4, 0.4_5_2_4, 0.5_7_9_9, 0.5_9_4_3, 0.5_1_6_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _lowercase ( self : Any ) -> Any:
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def _lowercase ( self : List[Any] ) -> Optional[Any]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def _lowercase ( self : Any ) -> Any:
pass
def _lowercase ( self : Tuple ) -> Union[str, Any]:
_a : int = self.get_dummy_components()
_a : Any = StableDiffusionXLImgaImgPipeline(**UpperCAmelCase__ )
_a : Dict = sd_pipe.to(UpperCAmelCase__ )
_a : List[str] = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
# forward without prompt embeds
_a : int = self.get_dummy_inputs(UpperCAmelCase__ )
_a : List[str] = 3 * ["""this is a negative prompt"""]
_a : Dict = negative_prompt
_a : Dict = 3 * [inputs["""prompt"""]]
_a : Optional[Any] = sd_pipe(**UpperCAmelCase__ )
_a : Tuple = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
_a : int = self.get_dummy_inputs(UpperCAmelCase__ )
_a : Union[str, Any] = 3 * ["""this is a negative prompt"""]
_a : int = 3 * [inputs.pop("""prompt""" )]
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) : List[str] = sd_pipe.encode_prompt(UpperCAmelCase__ , negative_prompt=UpperCAmelCase__ )
_a : Tuple = sd_pipe(
**UpperCAmelCase__ , prompt_embeds=UpperCAmelCase__ , negative_prompt_embeds=UpperCAmelCase__ , pooled_prompt_embeds=UpperCAmelCase__ , negative_pooled_prompt_embeds=UpperCAmelCase__ , )
_a : Dict = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : List[str] ) -> Union[str, Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : str="cpu" , UpperCAmelCase__ : str=torch.floataa , UpperCAmelCase__ : List[Any]=0 ) -> List[str]:
_a : List[str] = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
_a : Union[str, Any] = np.random.RandomState(UpperCAmelCase__ ).standard_normal((1, 4, 64, 64) )
_a : List[Any] = torch.from_numpy(UpperCAmelCase__ ).to(device=UpperCAmelCase__ , dtype=UpperCAmelCase__ )
_a : Any = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _lowercase ( self : int ) -> Union[str, Any]:
_a : Union[str, Any] = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_a : List[str] = self.get_inputs(UpperCAmelCase__ )
_a : Tuple = pipe(**UpperCAmelCase__ ).images
_a : List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_a : int = np.array([0.4_9_4_9_3, 0.4_7_8_9_6, 0.4_0_7_9_8, 0.5_4_2_1_4, 0.5_3_2_1_2, 0.4_8_2_0_2, 0.4_7_6_5_6, 0.4_6_3_2_9, 0.4_8_5_0_6] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 324
| 0
|
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ):
'''simple docstring'''
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, F"""{torch_layer} layer.weight does not match"""
_a : Any = nn.Parameter(snake_case_ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F"""{torch_layer} layer.bias does not match"""
_a : int = nn.Parameter(snake_case_ )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
# set torch weights for 1-to-1 comparison
_a : Tuple = np.asarray(weights[0] )
_a : List[Any] = np.asarray(weights[1] )
_a : Dict = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(snake_case_ ).transpose(1 , 2 ).contiguous().view(-1 , snake_case_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(snake_case_ ).transpose(1 , 2 ).contiguous().view(-1 , snake_case_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(snake_case_ ).view(-1 , snake_case_ ).contiguous().transpose(0 , 1 ) , )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
# set torch weights for 1-to-1 comparison
_a : int = np.asarray(weights[0] )
_a : Tuple = np.asarray(weights[1] )
_a : int = np.asarray(weights[2] )
_a : Optional[Any] = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(snake_case_ ).transpose(1 , 2 ).contiguous().view(-1 , snake_case_ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(snake_case_ ).transpose(1 , 2 ).contiguous().view(-1 , snake_case_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(snake_case_ ).transpose(1 , 2 ).contiguous().view(-1 , snake_case_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(snake_case_ ).view(-1 , snake_case_ ).contiguous().transpose(0 , 1 ) , )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
# layernorm 1
_a : Optional[int] = weights[0][0][0]
_a : List[Any] = np.asarray(layer_norm_a[0] )
_a : Optional[Any] = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(snake_case_ ) , torch.tensor(snake_case_ ) , )
# lsh weights + output
_a : Union[str, Any] = weights[0][1]
if len(snake_case_ ) < 4:
set_layer_weights_in_torch_lsh(snake_case_ , torch_block.attention , snake_case_ )
else:
set_layer_weights_in_torch_local(snake_case_ , torch_block.attention , snake_case_ )
# intermediate weighs
_a : Tuple = weights[2][0][1][2]
# Chunked Feed Forward
if len(snake_case_ ) == 4:
_a : Any = intermediate_weights[2]
# layernorm 2
_a : str = np.asarray(intermediate_weights[0][0] )
_a : Tuple = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(snake_case_ ) , torch.tensor(snake_case_ ) , )
# intermediate dense
_a : List[Any] = np.asarray(intermediate_weights[1][0] )
_a : int = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(snake_case_ ).transpose(0 , 1 ).contiguous() , torch.tensor(snake_case_ ) , )
# intermediate out
_a : Union[str, Any] = np.asarray(intermediate_weights[4][0] )
_a : Optional[Any] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(snake_case_ ).transpose(0 , 1 ).contiguous() , torch.tensor(snake_case_ ) , )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
# reformer model
_a : int = torch_model.reformer
# word embeds
_a : str = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(snake_case_ ) , )
if isinstance(weights[3] , snake_case_ ):
_a : List[Any] = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_a : Optional[Any] = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F"""{position_embeddings[emb_idx]} emb does not match"""
_a : List[str] = nn.Parameter(torch.tensor(snake_case_ ) )
_a : int = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
snake_case_ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_a : Dict = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(snake_case_ , snake_case_ , snake_case_ )
# output layer norm
_a : Dict = np.asarray(weights[7][0] )
_a : str = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(snake_case_ ) , torch.tensor(snake_case_ ) , )
# output embeddings
_a : Optional[int] = np.asarray(weights[9][0] )
_a : Union[str, Any] = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(snake_case_ ).transpose(0 , 1 ).contiguous() , torch.tensor(snake_case_ ) , )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
# Initialise PyTorch model
_a : Tuple = ReformerConfig.from_json_file(snake_case_ )
print(F"""Building PyTorch model from configuration: {config}""" )
_a : int = ReformerModelWithLMHead(snake_case_ )
with open(snake_case_ , """rb""" ) as f:
_a : Any = pickle.load(snake_case_ )["""weights"""]
set_model_weights_in_torch(snake_case_ , snake_case_ , config.hidden_size )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained Reformer model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_snake_case = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 370
|
"""simple docstring"""
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger()
@dataclass
class UpperCamelCase :
UpperCamelCase : nn.Module
UpperCamelCase : List[nn.Module] = field(default_factory=snake_case_ )
UpperCamelCase : list = field(default_factory=snake_case_ )
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tensor , UpperCAmelCase__ : Tensor ) -> Any:
_a : int = len(list(m.modules() ) ) == 1 or isinstance(UpperCAmelCase__ , nn.Convad ) or isinstance(UpperCAmelCase__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(UpperCAmelCase__ )
def __call__( self : Tuple , UpperCAmelCase__ : Tensor ) -> Tuple:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(UpperCAmelCase__ )
[x.remove() for x in self.handles]
return self
@property
def _lowercase ( self : Optional[int] ) -> int:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda UpperCAmelCase__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class UpperCamelCase :
UpperCamelCase : nn.Module
UpperCamelCase : nn.Module
UpperCamelCase : int = 0
UpperCamelCase : List = field(default_factory=snake_case_ )
UpperCamelCase : List = field(default_factory=snake_case_ )
def __call__( self : Optional[Any] , UpperCAmelCase__ : Tensor ) -> Tuple:
_a : Union[str, Any] = Tracker(self.dest )(UpperCAmelCase__ ).parametrized
_a : List[Any] = Tracker(self.src )(UpperCAmelCase__ ).parametrized
_a : Tuple = list(filter(lambda UpperCAmelCase__ : type(UpperCAmelCase__ ) not in self.src_skip , UpperCAmelCase__ ) )
_a : Union[str, Any] = list(filter(lambda UpperCAmelCase__ : type(UpperCAmelCase__ ) not in self.dest_skip , UpperCAmelCase__ ) )
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise Exception(
f"""Numbers of operations are different. Source module has {len(UpperCAmelCase__ )} operations while"""
f""" destination module has {len(UpperCAmelCase__ )}.""" )
for dest_m, src_m in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"""Transfered from={src_m} to={dest_m}""" )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = True ):
'''simple docstring'''
print(F"""Converting {name}...""" )
with torch.no_grad():
_a : List[str] = timm.create_model(UpperCamelCase__ , pretrained=UpperCamelCase__ ).eval()
_a : str = ResNetForImageClassification(UpperCamelCase__ ).eval()
_a : List[str] = ModuleTransfer(src=UpperCamelCase__ , dest=UpperCamelCase__ )
_a : List[str] = torch.randn((1, 3, 2_2_4, 2_2_4) )
module_transfer(UpperCamelCase__ )
assert torch.allclose(from_model(UpperCamelCase__ ) , our_model(UpperCamelCase__ ).logits ), "The model logits don't match the original one."
_a : Dict = F"""resnet{'-'.join(name.split('resnet' ) )}"""
print(UpperCamelCase__ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add model""" , use_temp_dir=UpperCamelCase__ , )
# we can use the convnext one
_a : Optional[Any] = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add image processor""" , use_temp_dir=UpperCamelCase__ , )
print(F"""Pushed {checkpoint_name}""" )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = True ):
'''simple docstring'''
_a : Any = """imagenet-1k-id2label.json"""
_a : Optional[int] = 1_0_0_0
_a : Any = (1, num_labels)
_a : Union[str, Any] = """huggingface/label-files"""
_a : Tuple = num_labels
_a : Optional[int] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
_a : Optional[Any] = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
_a : Any = idalabel
_a : Tuple = {v: k for k, v in idalabel.items()}
_a : List[str] = partial(UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ )
_a : Union[str, Any] = {
"""resnet18""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type="""basic""" ),
"""resnet26""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type="""bottleneck""" ),
"""resnet34""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type="""basic""" ),
"""resnet50""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type="""bottleneck""" ),
"""resnet101""": ImageNetPreTrainedConfig(
depths=[3, 4, 2_3, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type="""bottleneck""" ),
"""resnet152""": ImageNetPreTrainedConfig(
depths=[3, 8, 3_6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type="""bottleneck""" ),
}
if model_name:
convert_weight_and_push(UpperCamelCase__ , names_to_config[model_name] , UpperCamelCase__ , UpperCamelCase__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return config, expected_shape
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported resnet* architecture,'
' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
_snake_case = parser.parse_args()
_snake_case = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 324
| 0
|
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : Any = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
_a : Optional[Any] = set()
return any(
node not in visited and depth_first_search(a__ , a__ , a__ , a__ )
for node in graph )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
visited.add(a__ )
rec_stk.add(a__ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(a__ , a__ , a__ , a__ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(a__ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 371
|
"""simple docstring"""
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 324
| 0
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class UpperCamelCase ( unittest.TestCase ):
UpperCamelCase : Union[str, Any] = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCamelCase : List[Any] = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _lowercase ( self : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] ) -> str:
_a : List[str] = TextaTextGenerationPipeline(model=_A , tokenizer=_A )
return generator, ["Something to write", "Something else"]
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any ) -> Optional[int]:
_a : Dict = generator("""Something there""" )
self.assertEqual(_A , [{"""generated_text""": ANY(_A )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
_a : Dict = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=_A )
self.assertEqual(
_A , [
[{"""generated_text""": ANY(_A )}, {"""generated_text""": ANY(_A )}],
[{"""generated_text""": ANY(_A )}, {"""generated_text""": ANY(_A )}],
] , )
_a : List[Any] = generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=_A )
self.assertEqual(
_A , [
[{"""generated_text""": ANY(_A )}, {"""generated_text""": ANY(_A )}],
[{"""generated_text""": ANY(_A )}, {"""generated_text""": ANY(_A )}],
] , )
with self.assertRaises(_A ):
generator(4 )
@require_torch
def _lowercase ( self : Optional[int] ) -> Union[str, Any]:
_a : Union[str, Any] = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" )
# do_sample=False necessary for reproducibility
_a : str = generator("""Something there""" , do_sample=_A )
self.assertEqual(_A , [{"""generated_text""": """"""}] )
_a : Tuple = 3
_a : int = generator(
"""Something there""" , num_return_sequences=_A , num_beams=_A , )
_a : Optional[Any] = [
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': ''},
]
self.assertEqual(_A , _A )
_a : Dict = generator("""This is a test""" , do_sample=_A , num_return_sequences=2 , return_tensors=_A )
self.assertEqual(
_A , [
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] , )
_a : List[Any] = generator.model.config.eos_token_id
_a : Optional[Any] = '<pad>'
_a : Dict = generator(
["""This is a test""", """This is a second test"""] , do_sample=_A , num_return_sequences=2 , batch_size=2 , return_tensors=_A , )
self.assertEqual(
_A , [
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] , )
@require_tf
def _lowercase ( self : Union[str, Any] ) -> int:
_a : Union[str, Any] = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" )
# do_sample=False necessary for reproducibility
_a : Tuple = generator("""Something there""" , do_sample=_A )
self.assertEqual(_A , [{"""generated_text""": """"""}] )
| 350
|
"""simple docstring"""
_snake_case = 8.31_44_62 # Unit - J mol-1 K-1
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 324
| 0
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCamelCase ( metaclass=_SCREAMING_SNAKE_CASE ):
UpperCamelCase : Union[str, Any] = ['''note_seq''']
def __init__( self : Tuple , *UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : Tuple ) -> Any:
requires_backends(self , ["""note_seq"""] )
@classmethod
def _lowercase ( cls : Optional[Any] , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Dict ) -> List[Any]:
requires_backends(cls , ["""note_seq"""] )
@classmethod
def _lowercase ( cls : Union[str, Any] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : List[Any] ) -> Optional[int]:
requires_backends(cls , ["""note_seq"""] )
| 351
|
"""simple docstring"""
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
_snake_case = logging.getLogger(__name__)
_snake_case = 'pytorch_model.bin'
@dataclasses.dataclass
class UpperCamelCase :
UpperCamelCase : str = dataclasses.field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models.'''} )
UpperCamelCase : Optional[str] = dataclasses.field(
default=snake_case_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co.'''} , )
@dataclasses.dataclass
class UpperCamelCase :
UpperCamelCase : str = dataclasses.field(metadata={'''help''': '''A csv or a json file containing the training data.'''} )
UpperCamelCase : str = dataclasses.field(metadata={'''help''': '''A csv or a json file containing the data to predict on.'''} )
UpperCamelCase : Optional[str] = dataclasses.field(
default=snake_case_ , metadata={'''help''': '''A csv or a json file containing the validation data.'''} )
UpperCamelCase : Optional[str] = dataclasses.field(
default=snake_case_ , metadata={'''help''': '''The name of the task to train on.'''} , )
UpperCamelCase : Optional[List[str]] = dataclasses.field(
default=snake_case_ , metadata={'''help''': '''The list of labels for the task.'''} )
@dataclasses.dataclass
class UpperCamelCase :
UpperCamelCase : str = dataclasses.field(
metadata={'''help''': '''The output directory where the model predictions and checkpoints will be written.'''} )
UpperCamelCase : Optional[str] = dataclasses.field(
default='''accuracy''' , metadata={'''help''': '''The evaluation metric used for the task.'''} )
UpperCamelCase : Optional[str] = dataclasses.field(
default='''no''' , metadata={
'''help''': '''The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]'''
} , )
UpperCamelCase : Optional[int] = dataclasses.field(
default=10 , metadata={'''help''': '''Number of evaluation calls with no improvement after which training will be stopped.'''} , )
UpperCamelCase : Optional[float] = dataclasses.field(
default=0.0 , metadata={
'''help''': '''How much the specified evaluation metric must improve to satisfy early stopping conditions.'''
} , )
UpperCamelCase : Optional[bool] = dataclasses.field(
default=snake_case_ , metadata={'''help''': '''Whether to filter the pseudo-labeled data based on the confidence score.'''} , )
UpperCamelCase : Optional[bool] = dataclasses.field(
default=snake_case_ , metadata={'''help''': '''Whether to filter the pseudo-labeled data based on the validation performance.'''} , )
UpperCamelCase : Optional[bool] = dataclasses.field(
default=snake_case_ , metadata={'''help''': '''Whether to fine-tune on labeled data after pseudo training.'''} , )
UpperCamelCase : Optional[float] = dataclasses.field(
default=0.0 , metadata={'''help''': '''Confidence threshold for pseudo-labeled data filtering.'''} , )
UpperCamelCase : Optional[int] = dataclasses.field(
default=100 , metadata={'''help''': '''Number of evaluation calls with no improvement after which training will be stopped.'''} , )
UpperCamelCase : Optional[int] = dataclasses.field(
default=snake_case_ , metadata={'''help''': '''Random seed for initialization.'''} , )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Optional[int] = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
_a : Union[str, Any] = dataset.filter(lambda UpperCamelCase__ : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
_a : Any = int(eval_result * len(UpperCamelCase__ ) )
print(UpperCamelCase__ )
_a : str = dataset.sort("""probability""" , reverse=UpperCamelCase__ )
_a : Any = dataset.select(range(UpperCamelCase__ ) )
_a : Tuple = dataset.remove_columns(["""label""", """probability"""] )
_a : Optional[Any] = dataset.rename_column("""prediction""" , """label""" )
_a : Dict = dataset.map(lambda UpperCamelCase__ : {"label": idalabel[example["label"]]} )
_a : Union[str, Any] = dataset.shuffle(seed=args.seed )
_a : Optional[int] = os.path.join(UpperCamelCase__ , F"""train_pseudo.{args.data_file_extension}""" )
if args.data_file_extension == "csv":
dataset.to_csv(UpperCamelCase__ , index=UpperCamelCase__ )
else:
dataset.to_json(UpperCamelCase__ )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
_a : Optional[int] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
_a : Dict = STModelArguments(model_name_or_path=UpperCamelCase__ )
_a : Union[str, Any] = STDataArguments(train_file=UpperCamelCase__ , infer_file=UpperCamelCase__ )
_a : Any = STTrainingArguments(output_dir=UpperCamelCase__ )
_a : Any = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(UpperCamelCase__ ).items():
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for key, value in kwargs.items():
if hasattr(UpperCamelCase__ , UpperCamelCase__ ):
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Sanity checks
_a : Union[str, Any] = {}
_a : Tuple = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
_a : int = args.train_file
_a : List[Any] = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
_a : Union[str, Any] = args.eval_file
for key in data_files:
_a : Optional[Any] = data_files[key].split(""".""" )[-1]
assert extension in ["csv", "json"], F"""`{key}_file` should be a csv or a json file."""
if args.data_file_extension is None:
_a : str = extension
else:
assert extension == args.data_file_extension, F"""`{key}_file` should be a {args.data_file_extension} file`."""
assert (
args.eval_metric in datasets.list_metrics()
), F"""{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}."""
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info("""Creating the initial data directory for self-training...""" )
_a : Tuple = F"""{args.output_dir}/self-train_iter-{{}}""".format
_a : Dict = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=UpperCamelCase__ )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
accelerator.wait_for_everyone()
_a : str = None
_a : int = None
_a : str = 0
_a : List[Any] = False
# Show the progress bar
_a : List[Any] = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
_a : Union[str, Any] = data_dir_format(UpperCamelCase__ )
assert os.path.exists(UpperCamelCase__ )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
_a : str = os.path.join(UpperCamelCase__ , """stage-1""" )
_a : Tuple = {
"""accelerator""": accelerator,
"""model_name_or_path""": args.model_name_or_path,
"""cache_dir""": args.cache_dir,
"""do_train""": True,
"""train_file""": data_files["""train"""] if iteration == 0 else data_files["""train_pseudo"""],
"""do_eval""": True if args.eval_file is not None else False,
"""eval_file""": data_files["""eval"""],
"""do_predict""": True,
"""infer_file""": data_files["""infer"""],
"""task_name""": args.task_name,
"""label_list""": args.label_list,
"""output_dir""": current_output_dir,
"""eval_metric""": args.eval_metric,
"""evaluation_strategy""": args.evaluation_strategy,
"""early_stopping_patience""": args.early_stopping_patience,
"""early_stopping_threshold""": args.early_stopping_threshold,
"""seed""": args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(UpperCamelCase__ , UpperCamelCase__ ):
arguments_dict.update({key: value} )
_a : int = os.path.join(UpperCamelCase__ , """best-checkpoint""" , UpperCamelCase__ )
if os.path.exists(UpperCamelCase__ ):
logger.info(
"""Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.""" , UpperCamelCase__ , UpperCamelCase__ , )
else:
logger.info("""***** Running self-training: iteration: %d, stage: 1 *****""" , UpperCamelCase__ )
finetune(**UpperCamelCase__ )
accelerator.wait_for_everyone()
assert os.path.exists(UpperCamelCase__ )
logger.info("""Self-training job completed: iteration: %d, stage: 1.""" , UpperCamelCase__ )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
_a : Dict = os.path.join(UpperCamelCase__ , """best-checkpoint""" )
_a : List[str] = os.path.join(UpperCamelCase__ , """stage-2""" )
# Update arguments_dict
_a : int = model_path
_a : Dict = data_files["""train"""]
_a : int = current_output_dir
_a : Any = os.path.join(UpperCamelCase__ , """best-checkpoint""" , UpperCamelCase__ )
if os.path.exists(UpperCamelCase__ ):
logger.info(
"""Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.""" , UpperCamelCase__ , UpperCamelCase__ , )
else:
logger.info("""***** Running self-training: iteration: %d, stage: 2 *****""" , UpperCamelCase__ )
finetune(**UpperCamelCase__ )
accelerator.wait_for_everyone()
assert os.path.exists(UpperCamelCase__ )
logger.info("""Self-training job completed: iteration: %d, stage: 2.""" , UpperCamelCase__ )
_a : List[Any] = iteration
_a : int = data_dir_format(iteration + 1 )
_a : Dict = AutoConfig.from_pretrained(os.path.join(UpperCamelCase__ , """best-checkpoint""" ) )
_a : Union[str, Any] = config.idalabel
_a : Any = os.path.join(UpperCamelCase__ , """eval_results_best-checkpoint.json""" )
_a : Any = os.path.join(UpperCamelCase__ , """test_results_best-checkpoint.json""" )
assert os.path.exists(UpperCamelCase__ )
with open(UpperCamelCase__ , """r""" ) as f:
_a : Tuple = float(json.load(UpperCamelCase__ )[args.eval_metric] )
_a : Dict = os.path.join(UpperCamelCase__ , """infer_output_best-checkpoint.csv""" )
assert os.path.exists(UpperCamelCase__ )
# Loading the dataset from local csv or json files.
_a : List[Any] = load_dataset(args.data_file_extension , data_files={"""data""": data_files["""infer"""]} )["""data"""]
_a : Any = load_dataset("""csv""" , data_files={"""data""": infer_output_file} )["""data"""]
if accelerator.is_main_process:
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
shutil.copy(UpperCamelCase__ , os.path.join(UpperCamelCase__ , F"""eval_results_iter-{iteration}.json""" ) )
if os.path.exists(UpperCamelCase__ ):
shutil.copy(UpperCamelCase__ , os.path.join(UpperCamelCase__ , F"""test_results_iter-{iteration}.json""" ) )
create_pseudo_labeled_data(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
accelerator.wait_for_everyone()
_a : List[str] = os.path.join(UpperCamelCase__ , F"""train_pseudo.{args.data_file_extension}""" )
if args.evaluation_strategy != IntervalStrategy.NO.value:
_a : Any = eval_result
if best_iteration is None:
_a : Union[str, Any] = new_iteration
_a : str = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
_a : Union[str, Any] = new_iteration
_a : List[str] = new_eval_result
_a : Optional[Any] = 0
else:
if new_eval_result == best_eval_result:
_a : Tuple = new_iteration
_a : List[Any] = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
_a : Union[str, Any] = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info("""Best iteration: %d""" , UpperCamelCase__ )
logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , UpperCamelCase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(UpperCamelCase__ , F"""eval_results_iter-{iteration}.json""" ) , os.path.join(UpperCamelCase__ , """eval_results_best-iteration.json""" ) , )
else:
# Assume that the last iteration is the best
logger.info("""Best iteration: %d""" , args.max_selftrain_iterations - 1 )
logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , UpperCamelCase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(UpperCamelCase__ , F"""eval_results_iter-{args.max_selftrain_iterations - 1}.json""" ) , os.path.join(UpperCamelCase__ , """eval_results_best-iteration.json""" ) , )
| 324
| 0
|
"""simple docstring"""
from math import pi, sqrt
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
if num <= 0:
raise ValueError("""math domain error""" )
if num > 1_7_1.5:
raise OverflowError("""math range error""" )
elif num - int(lowerCAmelCase__ ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(lowerCAmelCase__ )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def lowerCAmelCase__ ( ):
'''simple docstring'''
assert gamma(0.5 ) == sqrt(lowerCAmelCase__ )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
_snake_case = 1.0
while num:
_snake_case = float(input('Gamma of: '))
print(F'''gamma({num}) = {gamma(num)}''')
print('\nEnter 0 to exit...')
| 352
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
_snake_case = None
_snake_case = logging.get_logger(__name__)
_snake_case = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
_snake_case = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/tokenizer.json',
},
}
_snake_case = {
'camembert-base': 512,
}
_snake_case = '▁'
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : Any = VOCAB_FILES_NAMES
UpperCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : Dict = ['''input_ids''', '''attention_mask''']
UpperCamelCase : Optional[Any] = CamembertTokenizer
def __init__( self : int , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Optional[Any]="<s>" , UpperCAmelCase__ : Optional[int]="</s>" , UpperCAmelCase__ : Tuple="</s>" , UpperCAmelCase__ : Tuple="<s>" , UpperCAmelCase__ : Tuple="<unk>" , UpperCAmelCase__ : Tuple="<pad>" , UpperCAmelCase__ : int="<mask>" , UpperCAmelCase__ : Optional[int]=["<s>NOTUSED", "</s>NOTUSED"] , **UpperCAmelCase__ : Optional[Any] , ) -> Union[str, Any]:
# Mask token behave like a normal word, i.e. include the space before it
_a : List[Any] = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else mask_token
super().__init__(
UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , **UpperCAmelCase__ , )
_a : int = vocab_file
_a : int = False if not self.vocab_file else True
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_a : List[Any] = [self.cls_token_id]
_a : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
_a : Union[str, Any] = [self.sep_token_id]
_a : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a : List[str] = os.path.join(
UpperCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ):
copyfile(self.vocab_file , UpperCAmelCase__ )
return (out_vocab_file,)
| 324
| 0
|
"""simple docstring"""
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def lowerCAmelCase__ ( UpperCamelCase__ = "isbn/0140328726" ):
'''simple docstring'''
_a : Union[str, Any] = olid.strip().strip("""/""" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("""/""" ) != 1:
_a : Optional[Any] = F"""{olid} is not a valid Open Library olid"""
raise ValueError(UpperCamelCase__ )
return requests.get(F"""https://openlibrary.org/{new_olid}.json""" ).json()
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : List[Any] = {
"""title""": """Title""",
"""publish_date""": """Publish date""",
"""authors""": """Authors""",
"""number_of_pages""": """Number of pages:""",
"""first_sentence""": """First sentence""",
"""isbn_10""": """ISBN (10)""",
"""isbn_13""": """ISBN (13)""",
}
_a : int = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
_a : Optional[int] = [
get_openlibrary_data(author["""key"""] )["""name"""] for author in data["""Authors"""]
]
_a : List[Any] = data["""First sentence"""]["""value"""]
for key, value in data.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_a : int = """, """.join(UpperCamelCase__ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
_snake_case = input('\nEnter the ISBN code to search (or \'quit\' to stop): ').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(F'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
_snake_case = summarize_book(get_openlibrary_data(F'''isbn/{isbn}'''))
print('\n'.join(F'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F'''Sorry, there are no results for ISBN: {isbn}.''')
| 353
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_snake_case = logging.get_logger(__name__)
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : Dict = ['''pixel_values''']
def __init__( self : Any , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : int = 32 , UpperCAmelCase__ : Optional[Any]=PILImageResampling.BILINEAR , UpperCAmelCase__ : bool = True , **UpperCAmelCase__ : List[str] , ) -> None:
_a : int = do_resize
_a : Union[str, Any] = do_rescale
_a : Any = size_divisor
_a : Any = resample
super().__init__(**UpperCAmelCase__ )
def _lowercase ( self : Tuple , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[ChannelDimension] = None , **UpperCAmelCase__ : Optional[Any] ) -> np.ndarray:
_a , _a : Tuple = get_image_size(UpperCAmelCase__ )
# Rounds the height and width down to the closest multiple of size_divisor
_a : Optional[Any] = height // size_divisor * size_divisor
_a : Union[str, Any] = width // size_divisor * size_divisor
_a : Any = resize(UpperCAmelCase__ , (new_h, new_w) , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
return image
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : float , UpperCAmelCase__ : Optional[ChannelDimension] = None , **UpperCAmelCase__ : Optional[int] ) -> np.ndarray:
return rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[Union[TensorType, str]] = None , UpperCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase__ : int , ) -> BatchFeature:
_a : Dict = do_resize if do_resize is not None else self.do_resize
_a : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
_a : str = size_divisor if size_divisor is not None else self.size_divisor
_a : Any = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("""size_divisor is required for resizing""" )
_a : List[str] = make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError("""Invalid image(s)""" )
# All transformations expect numpy arrays.
_a : Tuple = [to_numpy_array(UpperCAmelCase__ ) for img in images]
if do_resize:
_a : Optional[int] = [self.resize(UpperCAmelCase__ , size_divisor=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images]
if do_rescale:
_a : str = [self.rescale(UpperCAmelCase__ , scale=1 / 255 ) for image in images]
_a : Any = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
_a : Optional[int] = {"""pixel_values""": images}
return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
| 324
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class UpperCamelCase ( lowercase_ ):
UpperCamelCase : str = "openai-gpt"
UpperCamelCase : Optional[int] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Optional[int] , UpperCAmelCase__ : Optional[Any]=40478 , UpperCAmelCase__ : Dict=512 , UpperCAmelCase__ : str=768 , UpperCAmelCase__ : List[str]=12 , UpperCAmelCase__ : List[str]=12 , UpperCAmelCase__ : Tuple="gelu" , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : List[str]=1E-5 , UpperCAmelCase__ : Optional[Any]=0.0_2 , UpperCAmelCase__ : str="cls_index" , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : int=0.1 , **UpperCAmelCase__ : List[str] , ) -> Union[str, Any]:
_a : Union[str, Any] = vocab_size
_a : str = n_positions
_a : List[Any] = n_embd
_a : str = n_layer
_a : List[str] = n_head
_a : Union[str, Any] = afn
_a : Dict = resid_pdrop
_a : Optional[int] = embd_pdrop
_a : Optional[Any] = attn_pdrop
_a : Tuple = layer_norm_epsilon
_a : Union[str, Any] = initializer_range
_a : List[Any] = summary_type
_a : Optional[Any] = summary_use_proj
_a : Union[str, Any] = summary_activation
_a : Tuple = summary_first_dropout
_a : Any = summary_proj_to_labels
super().__init__(**a__ )
| 354
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCamelCase ( unittest.TestCase ):
@property
def _lowercase ( self : Optional[int] ) -> Union[str, Any]:
torch.manual_seed(0 )
_a : List[str] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def _lowercase ( self : Dict ) -> Dict:
_a : str = self.dummy_uncond_unet
_a : Optional[int] = KarrasVeScheduler()
_a : List[str] = KarrasVePipeline(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_a : int = torch.manual_seed(0 )
_a : List[Any] = pipe(num_inference_steps=2 , generator=UpperCAmelCase__ , output_type="""numpy""" ).images
_a : Tuple = torch.manual_seed(0 )
_a : int = pipe(num_inference_steps=2 , generator=UpperCAmelCase__ , output_type="""numpy""" , return_dict=UpperCAmelCase__ )[0]
_a : int = image[0, -3:, -3:, -1]
_a : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_a : str = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : Tuple ) -> List[str]:
_a : Optional[Any] = """google/ncsnpp-celebahq-256"""
_a : Any = UNetaDModel.from_pretrained(UpperCAmelCase__ )
_a : Dict = KarrasVeScheduler()
_a : int = KarrasVePipeline(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_a : Optional[int] = torch.manual_seed(0 )
_a : Tuple = pipe(num_inference_steps=20 , generator=UpperCAmelCase__ , output_type="""numpy""" ).images
_a : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_a : Optional[int] = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 324
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = '▁'
_snake_case = {'vocab_file': 'sentencepiece.bpe.model'}
_snake_case = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
}
}
_snake_case = {
'facebook/mbart-large-en-ro': 1024,
'facebook/mbart-large-cc25': 1024,
}
# fmt: off
_snake_case = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class UpperCamelCase ( UpperCamelCase__ ):
UpperCamelCase : List[Any] = VOCAB_FILES_NAMES
UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Optional[int] = ['''input_ids''', '''attention_mask''']
UpperCamelCase : List[int] = []
UpperCamelCase : List[int] = []
def __init__( self : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str]="<s>" , UpperCAmelCase__ : str="</s>" , UpperCAmelCase__ : Union[str, Any]="</s>" , UpperCAmelCase__ : List[Any]="<s>" , UpperCAmelCase__ : Any="<unk>" , UpperCAmelCase__ : Optional[Any]="<pad>" , UpperCAmelCase__ : str="<mask>" , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Optional[Dict[str, Any]] = None , UpperCAmelCase__ : str=None , **UpperCAmelCase__ : List[Any] , ) -> str:
_a : Dict = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
_a : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , src_lang=UpperCamelCase_ , tgt_lang=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
_a : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase_ ) )
_a : Tuple = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_a : List[str] = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_a : Any = 1
_a : Tuple = len(self.sp_model )
_a : Tuple = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(UpperCamelCase_ )
}
_a : int = {v: k for k, v in self.lang_code_to_id.items()}
_a : Union[str, Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
_a : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_a : Tuple = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
_a : Any = src_lang if src_lang is not None else """en_XX"""
_a : Union[str, Any] = self.lang_code_to_id[self._src_lang]
_a : List[str] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : List[Any] ) -> str:
_a : Union[str, Any] = self.__dict__.copy()
_a : Optional[int] = None
_a : int = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Union[str, Any] , UpperCAmelCase__ : Optional[int] ) -> Optional[int]:
_a : Optional[int] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_a : Union[str, Any] = {}
_a : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def _lowercase ( self : Dict ) -> Dict:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _lowercase ( self : List[Any] ) -> str:
return self._src_lang
@src_lang.setter
def _lowercase ( self : List[str] , UpperCAmelCase__ : str ) -> None:
_a : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
_a : Union[str, Any] = [1] * len(self.prefix_tokens )
_a : str = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(UpperCamelCase_ )) + suffix_ones
return prefix_ones + ([0] * len(UpperCamelCase_ )) + ([0] * len(UpperCamelCase_ )) + suffix_ones
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
_a : List[Any] = [self.sep_token_id]
_a : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] , UpperCAmelCase__ : Optional[str] , **UpperCAmelCase__ : Union[str, Any] ) -> int:
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
_a : str = src_lang
_a : List[str] = self(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
_a : Any = self.convert_tokens_to_ids(UpperCamelCase_ )
_a : Any = tgt_lang_id
return inputs
def _lowercase ( self : Optional[int] ) -> int:
_a : List[str] = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _lowercase ( self : Tuple , UpperCAmelCase__ : str ) -> List[str]:
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : List[Any] ) -> Dict:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_a : List[str] = self.sp_model.PieceToId(UpperCamelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _lowercase ( self : Tuple , UpperCAmelCase__ : Any ) -> Optional[int]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _lowercase ( self : Any , UpperCAmelCase__ : Any ) -> List[str]:
_a : Optional[int] = """""".join(UpperCamelCase_ ).replace(UpperCamelCase_ , """ """ ).strip()
return out_string
def _lowercase ( self : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a : Tuple = os.path.join(
UpperCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , """wb""" ) as fi:
_a : Tuple = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,)
def _lowercase ( self : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str = "en_XX" , UpperCAmelCase__ : Optional[List[str]] = None , UpperCAmelCase__ : str = "ro_RO" , **UpperCAmelCase__ : Union[str, Any] , ) -> BatchEncoding:
_a : List[Any] = src_lang
_a : List[str] = tgt_lang
return super().prepare_seqaseq_batch(UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
def _lowercase ( self : Tuple ) -> Optional[int]:
return self.set_src_lang_special_tokens(self.src_lang )
def _lowercase ( self : Union[str, Any] ) -> Any:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _lowercase ( self : Tuple , UpperCAmelCase__ : Any ) -> None:
_a : Optional[Any] = self.lang_code_to_id[src_lang]
_a : str = []
_a : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
def _lowercase ( self : List[str] , UpperCAmelCase__ : str ) -> None:
_a : str = self.lang_code_to_id[lang]
_a : Union[str, Any] = []
_a : str = [self.eos_token_id, self.cur_lang_code]
| 355
|
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_snake_case = 16
_snake_case = 32
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ = 1_6 ):
'''simple docstring'''
_a : str = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_a : Dict = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(UpperCamelCase__ ):
# max_length=None => use the model max length (it's actually the default)
_a : Optional[int] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_a : Tuple = datasets.map(
UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_a : List[Any] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(UpperCamelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_a : Union[str, Any] = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_a : int = 1_6
elif accelerator.mixed_precision != "no":
_a : int = 8
else:
_a : str = None
return tokenizer.pad(
UpperCamelCase__ , padding="""longest""" , max_length=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
_a : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )
_a : List[str] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_snake_case = mocked_dataloaders # noqa: F811
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , UpperCamelCase__ ) == "1":
_a : str = 2
# Initialize accelerator
_a : int = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_a : Any = config["""lr"""]
_a : Union[str, Any] = int(config["""num_epochs"""] )
_a : str = int(config["""seed"""] )
_a : List[Any] = int(config["""batch_size"""] )
_a : Tuple = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
_a : Optional[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_a : Optional[Any] = batch_size // MAX_GPU_BATCH_SIZE
_a : str = MAX_GPU_BATCH_SIZE
set_seed(UpperCamelCase__ )
_a , _a : Optional[int] = get_dataloaders(UpperCamelCase__ , UpperCamelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_a : int = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=UpperCamelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_a : List[str] = model.to(accelerator.device )
# Instantiate optimizer
_a : List[str] = AdamW(params=model.parameters() , lr=UpperCamelCase__ )
# Instantiate scheduler
_a : List[str] = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(UpperCamelCase__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_a , _a , _a , _a , _a : Optional[Any] = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Now we train the model
for epoch in range(UpperCamelCase__ ):
model.train()
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_a : Optional[Any] = model(**UpperCamelCase__ )
_a : str = outputs.loss
_a : Optional[int] = loss / gradient_accumulation_steps
accelerator.backward(UpperCamelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
_a : Union[str, Any] = 0
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_a : Dict = model(**UpperCamelCase__ )
_a : Optional[Any] = outputs.logits.argmax(dim=-1 )
_a , _a : int = accelerator.gather((predictions, batch["""labels"""]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(UpperCamelCase__ ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
_a : str = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_a : int = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=UpperCamelCase__ , references=UpperCamelCase__ , )
_a : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , UpperCamelCase__ )
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : Tuple = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=UpperCamelCase__ , default=UpperCamelCase__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
_a : Optional[Any] = parser.parse_args()
_a : Tuple = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6}
training_function(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 324
| 0
|
"""simple docstring"""
from __future__ import annotations
from random import random
class UpperCamelCase :
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any] = None ) -> List[str]:
_a : List[str] = value
_a : Any = random()
_a : Node | None = None
_a : Node | None = None
def __repr__( self : List[str] ) -> str:
from pprint import pformat
if self.left is None and self.right is None:
return f"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{f"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 )
def __str__( self : Tuple ) -> str:
_a : int = str(self.value ) + ''' '''
_a : Optional[int] = str(self.left or """""" )
_a : Tuple = str(self.right or """""" )
return value + left + right
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
_a : Optional[Any] = split(root.left , lowerCamelCase_ )
return left, root
else:
_a : List[Any] = split(root.right , lowerCamelCase_ )
return root, right
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
_a : Union[str, Any] = merge(left.right , lowerCamelCase_ )
return left
else:
_a : Optional[Any] = merge(lowerCamelCase_ , right.left )
return right
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Optional[int] = Node(lowerCamelCase_ )
_a : Union[str, Any] = split(lowerCamelCase_ , lowerCamelCase_ )
return merge(merge(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Optional[Any] = split(lowerCamelCase_ , value - 1 )
_a : str = split(lowerCamelCase_ , lowerCamelCase_ )
return merge(lowerCamelCase_ , lowerCamelCase_ )
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=""",""" )
inorder(root.right )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
for arg in args.split():
if arg[0] == "+":
_a : Tuple = insert(lowerCamelCase_ , int(arg[1:] ) )
elif arg[0] == "-":
_a : Dict = erase(lowerCamelCase_ , int(arg[1:] ) )
else:
print("""Unknown command""" )
return root
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : Optional[Any] = None
print(
"""enter numbers to create a tree, + value to add value into treap, """
"""- value to erase all nodes with value. \'q\' to quit. """ )
_a : str = input()
while args != "q":
_a : Optional[int] = interact_treap(lowerCamelCase_ , lowerCamelCase_ )
print(lowerCamelCase_ )
_a : Tuple = input()
print("""good by!""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 356
|
"""simple docstring"""
import numpy as np
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 324
| 0
|
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : int = int(number**0.5 )
return number == sq * sq
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
_a : int = x_den * y_den * z_den
_a : int = gcd(UpperCAmelCase_ , UpperCAmelCase_ )
top //= hcf
bottom //= hcf
return top, bottom
def lowerCAmelCase__ ( UpperCamelCase__ = 3_5 ):
'''simple docstring'''
_a : set = set()
_a : int
_a : Fraction = Fraction(0 )
_a : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
_a : Optional[Any] = x_num * y_den + x_den * y_num
_a : Union[str, Any] = x_den * y_den
_a : Union[str, Any] = gcd(UpperCAmelCase_ , UpperCAmelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_a : Union[str, Any] = add_three(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
unique_s.add(UpperCAmelCase_ )
# n=2
_a : Tuple = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
_a : Any = x_den * x_den * y_den * y_den
if is_sq(UpperCAmelCase_ ) and is_sq(UpperCAmelCase_ ):
_a : List[str] = int(sqrt(UpperCAmelCase_ ) )
_a : Optional[Any] = int(sqrt(UpperCAmelCase_ ) )
_a : Tuple = gcd(UpperCAmelCase_ , UpperCAmelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_a : int = add_three(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
unique_s.add(UpperCAmelCase_ )
# n=-1
_a : int = x_num * y_num
_a : Any = x_den * y_num + x_num * y_den
_a : Tuple = gcd(UpperCAmelCase_ , UpperCAmelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_a : Tuple = add_three(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
unique_s.add(UpperCAmelCase_ )
# n=2
_a : Dict = x_num * x_num * y_num * y_num
_a : List[str] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(UpperCAmelCase_ ) and is_sq(UpperCAmelCase_ ):
_a : Tuple = int(sqrt(UpperCAmelCase_ ) )
_a : Dict = int(sqrt(UpperCAmelCase_ ) )
_a : Union[str, Any] = gcd(UpperCAmelCase_ , UpperCAmelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_a : Union[str, Any] = add_three(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
unique_s.add(UpperCAmelCase_ )
for num, den in unique_s:
total += Fraction(UpperCAmelCase_ , UpperCAmelCase_ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F'''{solution() = }''')
| 357
|
"""simple docstring"""
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir('fixtures/test_sentencepiece.model')
_snake_case = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
_snake_case = 'pt' if is_torch_available() else 'tf'
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( snake_case_ , unittest.TestCase ):
UpperCamelCase : str = CamembertTokenizer
UpperCamelCase : List[Any] = CamembertTokenizerFast
UpperCamelCase : Optional[int] = True
UpperCamelCase : Union[str, Any] = True
def _lowercase ( self : List[Any] ) -> Union[str, Any]:
super().setUp()
# We have a SentencePiece fixture for testing
_a : List[Any] = CamembertTokenizer(UpperCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self : List[str] ) -> Tuple:
_a : Optional[Any] = """<pad>"""
_a : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ )
def _lowercase ( self : Union[str, Any] ) -> str:
_a : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(UpperCAmelCase__ ) , 1004 )
def _lowercase ( self : List[str] ) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1005 )
def _lowercase ( self : Union[str, Any] ) -> str:
_a : Tuple = CamembertTokenizer(UpperCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
_a : List[Any] = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
_a : Any = """I was born in 92000, and this is falsé."""
_a : Union[str, Any] = tokenizer.encode(UpperCAmelCase__ )
_a : Dict = rust_tokenizer.encode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Tuple = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
_a : List[Any] = rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
_a : List[str] = tokenizer.convert_ids_to_tokens(UpperCAmelCase__ )
_a : int = rust_tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def _lowercase ( self : Dict ) -> List[str]:
if not self.test_rust_tokenizer:
return
_a : Optional[int] = self.get_tokenizer()
_a : Tuple = self.get_rust_tokenizer()
_a : List[Any] = """I was born in 92000, and this is falsé."""
_a : List[str] = tokenizer.tokenize(UpperCAmelCase__ )
_a : Union[str, Any] = rust_tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
_a : int = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
_a : Optional[int] = rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
_a : int = self.get_rust_tokenizer()
_a : Optional[Any] = tokenizer.encode(UpperCAmelCase__ )
_a : Dict = rust_tokenizer.encode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def _lowercase ( self : Tuple ) -> List[Any]:
# fmt: off
_a : Dict = {"""input_ids""": [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 27575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 22804, 18818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 10326, 24, 2267, 20, 416, 5072, 15612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
_a : Union[str, Any] = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=UpperCAmelCase__ , )
| 324
| 0
|
"""simple docstring"""
import string
import numpy
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
return b if a == 0 else greatest_common_divisor(b % a , _A )
class UpperCamelCase :
UpperCamelCase : Tuple = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
UpperCamelCase : Optional[Any] = numpy.vectorize(lambda snake_case_ : x % 36 )
UpperCamelCase : List[str] = numpy.vectorize(lowerCamelCase__ )
def __init__( self : Any , UpperCAmelCase__ : numpy.ndarray ) -> Any:
_a : Optional[Any] = self.modulus(__snake_case ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
_a : Union[str, Any] = encrypt_key.shape[0]
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : str ) -> List[Any]:
return self.key_string.index(__snake_case )
def _lowercase ( self : Dict , UpperCAmelCase__ : int ) -> Any:
return self.key_string[round(__snake_case )]
def _lowercase ( self : Any ) -> int:
_a : Tuple = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
_a : List[str] = det % len(self.key_string )
_a : Any = len(self.key_string )
if greatest_common_divisor(__snake_case , len(self.key_string ) ) != 1:
_a : Any = (
f"""determinant modular {req_l} of encryption key({det}) """
f"""is not co prime w.r.t {req_l}.\nTry another key."""
)
raise ValueError(__snake_case )
def _lowercase ( self : int , UpperCAmelCase__ : str ) -> Dict:
_a : Dict = [char for char in text.upper() if char in self.key_string]
_a : Optional[int] = chars[-1]
while len(__snake_case ) % self.break_key != 0:
chars.append(__snake_case )
return "".join(__snake_case )
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : str ) -> List[str]:
_a : str = self.process_text(text.upper() )
_a : Optional[Any] = ''
for i in range(0 , len(__snake_case ) - self.break_key + 1 , self.break_key ):
_a : List[str] = text[i : i + self.break_key]
_a : Optional[Any] = [self.replace_letters(__snake_case ) for char in batch]
_a : Dict = numpy.array([vec] ).T
_a : str = self.modulus(self.encrypt_key.dot(__snake_case ) ).T.tolist()[
0
]
_a : Any = ''.join(
self.replace_digits(__snake_case ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def _lowercase ( self : Optional[int] ) -> Dict:
_a : Tuple = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
_a : Optional[int] = det % len(self.key_string )
_a : Any = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
_a : Union[str, Any] = i
break
_a : List[Any] = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(__snake_case ) )
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : str ) -> Union[str, Any]:
_a : List[str] = self.make_decrypt_key()
_a : Union[str, Any] = self.process_text(text.upper() )
_a : Tuple = ''
for i in range(0 , len(__snake_case ) - self.break_key + 1 , self.break_key ):
_a : Union[str, Any] = text[i : i + self.break_key]
_a : int = [self.replace_letters(__snake_case ) for char in batch]
_a : Union[str, Any] = numpy.array([vec] ).T
_a : int = self.modulus(decrypt_key.dot(__snake_case ) ).T.tolist()[0]
_a : List[Any] = ''.join(
self.replace_digits(__snake_case ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : int = int(input("""Enter the order of the encryption key: """ ) )
_a : Union[str, Any] = []
print("""Enter each row of the encryption key with space separated integers""" )
for _ in range(_A ):
_a : int = [int(_A ) for x in input().split()]
hill_matrix.append(_A )
_a : Optional[Any] = HillCipher(numpy.array(_A ) )
print("""Would you like to encrypt or decrypt some text? (1 or 2)""" )
_a : Optional[Any] = input("""\n1. Encrypt\n2. Decrypt\n""" )
if option == "1":
_a : Any = input("""What text would you like to encrypt?: """ )
print("""Your encrypted text is:""" )
print(hc.encrypt(_A ) )
elif option == "2":
_a : Optional[Any] = input("""What text would you like to decrypt?: """ )
print("""Your decrypted text is:""" )
print(hc.decrypt(_A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 358
|
"""simple docstring"""
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
_snake_case = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
_snake_case = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
_snake_case = re.compile(r'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
_snake_case = re.compile(r'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_snake_case = re.compile(r'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
_snake_case = [
('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'),
('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'),
('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'),
('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'),
('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'),
('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'),
('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'),
('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'),
('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'),
(
'zero-shot-object-detection',
'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES',
'AutoModelForZeroShotObjectDetection',
),
('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'),
('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'),
('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'),
('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'),
(
'table-question-answering',
'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForTableQuestionAnswering',
),
('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'),
('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'),
(
'next-sentence-prediction',
'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES',
'AutoModelForNextSentencePrediction',
),
(
'audio-frame-classification',
'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForAudioFrameClassification',
),
('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'),
(
'document-question-answering',
'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForDocumentQuestionAnswering',
),
(
'visual-question-answering',
'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForVisualQuestionAnswering',
),
('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'),
(
'zero-shot-image-classification',
'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForZeroShotImageClassification',
),
('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'),
('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'),
('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'),
]
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : Dict = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , UpperCamelCase__ )
return [m.group(0 ) for m in matches]
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : Tuple = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_a : Optional[int] = {
config.replace("""Config""" , """""" ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
_a : List[Any] = collections.defaultdict(UpperCamelCase__ )
_a : List[str] = collections.defaultdict(UpperCamelCase__ )
_a : Tuple = collections.defaultdict(UpperCamelCase__ )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(UpperCamelCase__ ):
_a : str = None
if _re_tf_models.match(UpperCamelCase__ ) is not None:
_a : List[Any] = tf_models
_a : int = _re_tf_models.match(UpperCamelCase__ ).groups()[0]
elif _re_flax_models.match(UpperCamelCase__ ) is not None:
_a : Any = flax_models
_a : Any = _re_flax_models.match(UpperCamelCase__ ).groups()[0]
elif _re_pt_models.match(UpperCamelCase__ ) is not None:
_a : int = pt_models
_a : int = _re_pt_models.match(UpperCamelCase__ ).groups()[0]
if lookup_dict is not None:
while len(UpperCamelCase__ ) > 0:
if attr_name in model_prefix_to_model_type:
_a : Optional[int] = True
break
# Try again after removing the last word in the name
_a : List[Any] = """""".join(camel_case_split(UpperCamelCase__ )[:-1] )
_a : Optional[int] = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
_a : Dict = list(UpperCamelCase__ )
all_models.sort()
_a : str = {"""model_type""": all_models}
_a : List[Any] = [pt_models[t] for t in all_models]
_a : str = [tf_models[t] for t in all_models]
_a : Optional[int] = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
_a : str = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
_a : List[str] = """AutoProcessor"""
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
_a : str = """AutoTokenizer"""
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
_a : int = """AutoFeatureExtractor"""
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
_a : int = """AutoTokenizer"""
_a : Any = [processors[t] for t in all_models]
return pd.DataFrame(UpperCamelCase__ )
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : List[Any] = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
_a : List[Any] = [model_mapping, F"""TF_{model_mapping}""", F"""FLAX_{model_mapping}"""]
_a : Union[str, Any] = [auto_class, F"""TF_{auto_class}""", F"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
# The type of pipeline may not exist in this framework
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ):
continue
# First extract all model_names
_a : str = []
for name in getattr(UpperCamelCase__ , UpperCamelCase__ ).values():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
model_names.append(UpperCamelCase__ )
else:
model_names.extend(list(UpperCamelCase__ ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Dict = get_frameworks_table()
_a : Optional[Any] = Dataset.from_pandas(UpperCamelCase__ )
_a : Any = hf_hub_download(
"""huggingface/transformers-metadata""" , """pipeline_tags.json""" , repo_type="""dataset""" , token=UpperCamelCase__ )
_a : List[Any] = Dataset.from_json(UpperCamelCase__ )
_a : List[str] = {
tags_dataset[i]["""model_class"""]: (tags_dataset[i]["""pipeline_tag"""], tags_dataset[i]["""auto_class"""])
for i in range(len(UpperCamelCase__ ) )
}
_a : str = update_pipeline_and_auto_class_table(UpperCamelCase__ )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
_a : int = sorted(table.keys() )
_a : Union[str, Any] = pd.DataFrame(
{
"""model_class""": model_classes,
"""pipeline_tag""": [table[m][0] for m in model_classes],
"""auto_class""": [table[m][1] for m in model_classes],
} )
_a : Dict = Dataset.from_pandas(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(UpperCamelCase__ , """frameworks.json""" ) )
tags_dataset.to_json(os.path.join(UpperCamelCase__ , """pipeline_tags.json""" ) )
if commit_sha is not None:
_a : List[str] = (
F"""Update with commit {commit_sha}\n\nSee: """
F"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
_a : Optional[Any] = """Update"""
upload_folder(
repo_id="""huggingface/transformers-metadata""" , folder_path=UpperCamelCase__ , repo_type="""dataset""" , token=UpperCamelCase__ , commit_message=UpperCamelCase__ , )
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : List[str] = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
_a : Any = transformers_module.pipelines.SUPPORTED_TASKS
_a : List[str] = []
for key in pipeline_tasks:
if key not in in_table:
_a : Tuple = pipeline_tasks[key]["""pt"""]
if isinstance(UpperCamelCase__ , (list, tuple) ):
_a : Dict = model[0]
_a : List[str] = model.__name__
if model not in in_table.values():
missing.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
_a : Union[str, Any] = """, """.join(UpperCamelCase__ )
raise ValueError(
"""The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside """
F"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.')
parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.')
parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.')
_snake_case = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 324
| 0
|
"""simple docstring"""
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
_snake_case = logging.get_logger(__name__)
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ):
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
if not is_sharded:
_a : Dict = os.path.abspath(lowerCAmelCase__ )
logger.info(F"""Loading PyTorch weights from {pt_path}""" )
_a : Dict = torch.load(lowerCAmelCase__ , map_location="""cpu""" )
logger.info(F"""PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.""" )
_a : Optional[int] = convert_pytorch_state_dict_to_flax(lowerCAmelCase__ , lowerCAmelCase__ )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
_a : Any = convert_pytorch_sharded_state_dict_to_flax(lowerCAmelCase__ , lowerCAmelCase__ )
return flax_state_dict
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ):
'''simple docstring'''
def is_key_or_prefix_key_in_dict(UpperCamelCase__ ) -> bool:
return len(set(lowerCAmelCase__ ) & {key, (model_prefix,) + key} ) > 0
# layer norm
_a : Any = pt_tuple_key[:-1] + ("""scale""",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(lowerCAmelCase__ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
_a : str = pt_tuple_key[:-1] + ("""mean""",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(lowerCAmelCase__ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
_a : List[Any] = pt_tuple_key[:-1] + ("""var""",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(lowerCAmelCase__ ):
return renamed_pt_tuple_key, pt_tensor
# embedding
_a : Optional[int] = pt_tuple_key[:-1] + ("""embedding""",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(lowerCAmelCase__ ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
_a : Union[str, Any] = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(lowerCAmelCase__ ):
_a : Tuple = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
_a : Dict = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(lowerCAmelCase__ ):
_a : Tuple = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
_a : Dict = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
_a : str = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
_a : int = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
_a : Tuple = pt_tuple_key[-2] + """_g"""
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
_a : Tuple = pt_tuple_key[-2] + """_v"""
if name is not None:
_a : int = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Union[str, Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
_a : str = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
_a : Optional[int] = flax_model.params["""params"""]
else:
_a : Dict = flax_model.params
_a : Union[str, Any] = flatten_dict(lowerCAmelCase__ )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
_a : List[Any] = flatten_dict(flax_model.params["""batch_stats"""] )
random_flax_state_dict.update(lowerCAmelCase__ )
_a : Optional[int] = {}
_a : Union[str, Any] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
_a : Tuple = (model_prefix in flax_model_params) and (
model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
_a : Any = tuple(pt_key.split(""".""" ) )
# remove base model prefix if necessary
_a : List[str] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
_a : Any = pt_tuple_key[1:]
# Correctly rename weight parameters
_a , _a : Dict = rename_key_and_reshape_tensor(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# add model prefix if necessary
_a : int = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
_a : int = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
_a : List[Any] = jnp.asarray(lowerCAmelCase__ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
continue
# also add unexpected weight so that warning is thrown
_a : Optional[int] = jnp.asarray(lowerCAmelCase__ )
else:
# also add unexpected weight so that warning is thrown
_a : Union[str, Any] = jnp.asarray(lowerCAmelCase__ )
return unflatten_dict(lowerCAmelCase__ )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
import torch
# Load the index
_a : Dict = {}
for shard_file in shard_filenames:
# load using msgpack utils
_a : Dict = torch.load(lowerCAmelCase__ )
_a : int = {k: v.numpy() for k, v in pt_state_dict.items()}
_a : Optional[int] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
_a : Tuple = flax_model.params["""params"""]
_a : Any = flatten_dict(lowerCAmelCase__ )
random_flax_state_dict.update(flatten_dict(flax_model.params["""batch_stats"""] ) )
else:
_a : Optional[Any] = flax_model.params
_a : Dict = flatten_dict(lowerCAmelCase__ )
_a : List[str] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
_a : str = (model_prefix in flax_model_params) and (
model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
_a : str = tuple(pt_key.split(""".""" ) )
# remove base model prefix if necessary
_a : Dict = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
_a : Union[str, Any] = pt_tuple_key[1:]
# Correctly rename weight parameters
_a , _a : Any = rename_key_and_reshape_tensor(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# add model prefix if necessary
_a : Tuple = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
_a : Any = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
_a : Optional[int] = jnp.asarray(lowerCAmelCase__ )
continue
if "var" in flax_key[-1]:
_a : List[Any] = jnp.asarray(lowerCAmelCase__ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
continue
# also add unexpected weight so that warning is thrown
_a : Any = jnp.asarray(lowerCAmelCase__ )
else:
# also add unexpected weight so that warning is thrown
_a : List[Any] = jnp.asarray(lowerCAmelCase__ )
return unflatten_dict(lowerCAmelCase__ )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Tuple = os.path.abspath(lowerCAmelCase__ )
logger.info(F"""Loading Flax weights from {flax_checkpoint_path}""" )
# import correct flax class
_a : int = getattr(lowerCAmelCase__ , """Flax""" + model.__class__.__name__ )
# load flax weight dict
with open(lowerCAmelCase__ , """rb""" ) as state_f:
try:
_a : int = from_bytes(lowerCAmelCase__ , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F"""Unable to convert {flax_checkpoint_path} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(lowerCAmelCase__ , lowerCAmelCase__ )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
_a : List[Any] = flatten_dict(jax.tree_util.tree_map(lambda UpperCamelCase__ : x.dtype == jnp.bfloataa , lowerCAmelCase__ ) ).values()
if any(lowerCAmelCase__ ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
_a : Optional[int] = jax.tree_util.tree_map(
lambda UpperCamelCase__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , lowerCAmelCase__ )
_a : Optional[Any] = flatten_dict(lowerCAmelCase__ )
_a : Union[str, Any] = pt_model.state_dict()
_a : str = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split(""".""" )[0] for k in pt_model_dict.keys()}
)
_a : int = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split(""".""" )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
_a : List[str] = []
_a : Tuple = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
_a : Any = flax_key_tuple[0] == pt_model.base_model_prefix
_a : Optional[int] = """.""".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
_a : List[Any] = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
_a : Union[str, Any] = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(lowerCAmelCase__ ) not in pt_model_dict:
# conv layer
_a : Optional[Any] = flax_key_tuple[:-1] + ("""weight""",)
_a : Dict = jnp.transpose(lowerCAmelCase__ , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(lowerCAmelCase__ ) not in pt_model_dict:
# linear layer
_a : Optional[Any] = flax_key_tuple[:-1] + ("""weight""",)
_a : int = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_a : List[str] = flax_key_tuple[:-1] + ("""weight""",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
_a : Tuple = flax_key_tuple[:-1] + ("""running_mean""",)
elif "var" in flax_key_tuple[-1]:
_a : Tuple = flax_key_tuple[:-1] + ("""running_var""",)
if "batch_stats" in flax_state:
_a : int = """.""".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
_a : Any = """.""".join(lowerCAmelCase__ )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
_a : Optional[int] = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
_a : Any = key.split(""".""" )
_a : Dict = None
if key_components[-3::2] == ["parametrizations", "original0"]:
_a : Union[str, Any] = key_components[-2] + """_g"""
elif key_components[-3::2] == ["parametrizations", "original1"]:
_a : List[Any] = key_components[-2] + """_v"""
if name is not None:
_a : Any = key_components[:-3] + [name]
_a : Union[str, Any] = """.""".join(lowerCAmelCase__ )
_a : int = key
if flax_key in special_pt_names:
_a : Any = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
F"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
_a : List[str] = np.asarray(lowerCAmelCase__ ) if not isinstance(lowerCAmelCase__ , np.ndarray ) else flax_tensor
_a : List[str] = torch.from_numpy(lowerCAmelCase__ )
# remove from missing keys
missing_keys.remove(lowerCAmelCase__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(lowerCAmelCase__ )
pt_model.load_state_dict(lowerCAmelCase__ )
# re-transform missing_keys to list
_a : Optional[int] = list(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
F""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
F""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
F""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
else:
logger.warning(F"""All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n""" )
if len(lowerCAmelCase__ ) > 0:
logger.warning(
F"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
F""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
""" use it for predictions and inference.""" )
else:
logger.warning(
F"""All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n"""
"""If your task is similar to the task the model of the checkpoint was trained on, """
F"""you can already use {pt_model.__class__.__name__} for predictions without further training.""" )
return pt_model
| 359
|
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"""files""" , [
["""full:README.md""", """dataset_infos.json"""],
["""empty:README.md""", """dataset_infos.json"""],
["""dataset_infos.json"""],
["""full:README.md"""],
] , )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Dict = tmp_path_factory.mktemp("""dset_infos_dir""" )
if "full:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""---\ndataset_info:\n dataset_size: 42\n---""" )
if "empty:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""""" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / """dataset_infos.json""" , """w""" ) as f:
f.write("""{\"default\": {\"dataset_size\": 42}}""" )
_a : Dict = DatasetInfosDict.from_directory(UpperCamelCase__ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 4_2
@pytest.mark.parametrize(
"""dataset_info""" , [
DatasetInfo(),
DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=4_2 , ),
] , )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Optional[int] = str(UpperCamelCase__ )
dataset_info.write_to_directory(UpperCamelCase__ )
_a : Any = DatasetInfo.from_directory(UpperCamelCase__ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(UpperCamelCase__ , """dataset_info.json""" ) )
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : Dict = DatasetInfo(
description="""foo""" , citation="""bar""" , homepage="""https://foo.bar""" , license="""CC0""" , features=Features({"""a""": Value("""int32""" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train""", """num_examples""": 4_2}] , download_checksums={} , download_size=1_3_3_7 , post_processing_size=4_4_2 , dataset_size=1_2_3_4 , size_in_bytes=1_3_3_7 + 4_4_2 + 1_2_3_4 , )
_a : int = dataset_info._to_yaml_dict()
assert sorted(UpperCamelCase__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
_a : List[str] = yaml.safe_dump(UpperCamelCase__ )
_a : Optional[int] = yaml.safe_load(UpperCamelCase__ )
assert dataset_info_yaml_dict == reloaded
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : List[Any] = DatasetInfo()
_a : Any = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"""dataset_infos_dict""" , [
DatasetInfosDict(),
DatasetInfosDict({"""default""": DatasetInfo()} ),
DatasetInfosDict({"""my_config_name""": DatasetInfo()} ),
DatasetInfosDict(
{
"""default""": DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=4_2 , )
} ),
DatasetInfosDict(
{
"""v1""": DatasetInfo(dataset_size=4_2 ),
"""v2""": DatasetInfo(dataset_size=1_3_3_7 ),
} ),
] , )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : List[Any] = str(UpperCamelCase__ )
dataset_infos_dict.write_to_directory(UpperCamelCase__ )
_a : List[Any] = DatasetInfosDict.from_directory(UpperCamelCase__ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_a : str = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_a : Dict = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(UpperCamelCase__ , """README.md""" ) )
| 324
| 0
|
"""simple docstring"""
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError("""both inputs must be positive integers""" )
_a : str = str(bin(UpperCamelCase__ ) )
binary_number += "0" * shift_amount
return binary_number
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError("""both inputs must be positive integers""" )
_a : List[Any] = str(bin(UpperCamelCase__ ) )[2:]
if shift_amount >= len(UpperCamelCase__ ):
return "0b0"
_a : Optional[int] = binary_number[: len(UpperCamelCase__ ) - shift_amount]
return "0b" + shifted_binary_number
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if number >= 0: # Get binary representation of positive number
_a : int = """0""" + str(bin(UpperCamelCase__ ) ).strip("""-""" )[2:]
else: # Get binary (2's complement) representation of negative number
_a : str = len(bin(UpperCamelCase__ )[3:] ) # Find 2's complement of number
_a : Tuple = bin(abs(UpperCamelCase__ ) - (1 << binary_number_length) )[3:]
_a : Any = (
"""1""" + """0""" * (binary_number_length - len(UpperCamelCase__ )) + binary_number
)
if shift_amount >= len(UpperCamelCase__ ):
return "0b" + binary_number[0] * len(UpperCamelCase__ )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(UpperCamelCase__ ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 360
|
"""simple docstring"""
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class UpperCamelCase ( unittest.TestCase , snake_case_ ):
def _lowercase ( self : int ) -> int:
_a : Optional[Any] = load_tool("""text-to-speech""" )
self.tool.setup()
def _lowercase ( self : List[str] ) -> Union[str, Any]:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
_a : str = self.tool("""hey""" )
_a : List[str] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
def _lowercase ( self : Optional[int] ) -> Optional[Any]:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
_a : int = self.tool("""hey""" )
_a : str = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
| 324
| 0
|
"""simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
_snake_case = {
"text_branch": "text_model",
"audio_branch": "audio_model.audio_encoder",
"attn": "attention.self",
"self.proj": "output.dense",
"attention.self_mask": "attn_mask",
"mlp.fc1": "intermediate.dense",
"mlp.fc2": "output.dense",
"norm1": "layernorm_before",
"norm2": "layernorm_after",
"bn0": "batch_norm",
}
_snake_case = AutoFeatureExtractor.from_pretrained('laion/clap-htsat-unfused', truncation='rand_trunc')
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__=False ):
'''simple docstring'''
_a : str = create_model(
"""HTSAT-tiny""" , """roberta""" , UpperCamelCase__ , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=UpperCamelCase__ , fusion_type="""aff_2d""" if enable_fusion else None , )
return model, model_cfg
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : Union[str, Any] = {}
_a : str = r'''.*sequential.(\d+).*'''
_a : int = r'''.*_projection.(\d+).*'''
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_a : str = key.replace(UpperCamelCase__ , UpperCamelCase__ )
if re.match(UpperCamelCase__ , UpperCamelCase__ ):
# replace sequential layers with list
_a : List[str] = re.match(UpperCamelCase__ , UpperCamelCase__ ).group(1 )
_a : str = key.replace(F"""sequential.{sequential_layer}.""" , F"""layers.{int(UpperCamelCase__ )//3}.linear.""" )
elif re.match(UpperCamelCase__ , UpperCamelCase__ ):
_a : Union[str, Any] = int(re.match(UpperCamelCase__ , UpperCamelCase__ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
_a : List[Any] = 1 if projecton_layer == 0 else 2
_a : Union[str, Any] = key.replace(F"""_projection.{projecton_layer}.""" , F"""_projection.linear{transformers_projection_layer}.""" )
if "audio" and "qkv" in key:
# split qkv into query key and value
_a : Optional[Any] = value
_a : Any = mixed_qkv.size(0 ) // 3
_a : Any = mixed_qkv[:qkv_dim]
_a : Dict = mixed_qkv[qkv_dim : qkv_dim * 2]
_a : Optional[int] = mixed_qkv[qkv_dim * 2 :]
_a : Dict = query_layer
_a : Union[str, Any] = key_layer
_a : List[str] = value_layer
else:
_a : int = value
return model_state_dict
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ):
'''simple docstring'''
_a : str = init_clap(UpperCamelCase__ , enable_fusion=UpperCamelCase__ )
clap_model.eval()
_a : Tuple = clap_model.state_dict()
_a : Dict = rename_state_dict(UpperCamelCase__ )
_a : Optional[Any] = ClapConfig()
_a : List[Any] = enable_fusion
_a : Optional[int] = ClapModel(UpperCamelCase__ )
# ignore the spectrogram embedding layer
model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
transformers_config.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument('--enable_fusion', action='store_true', help='Whether to enable fusion or not')
_snake_case = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 361
|
"""simple docstring"""
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class UpperCamelCase ( snake_case_ ):
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[str]=None , **UpperCAmelCase__ : str ) -> int:
_a : str = parent
_a : Union[str, Any] = config_class
_a : List[Any] = has_text_modality
_a : List[Any] = kwargs
_a : List[Any] = common_properties
def _lowercase ( self : int ) -> Tuple:
_a : List[str] = self.config_class(**self.inputs_dict )
_a : Dict = (
["""hidden_size""", """num_attention_heads""", """num_hidden_layers"""]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["""vocab_size"""] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(UpperCAmelCase__ , UpperCAmelCase__ ) , msg=f"""`{prop}` does not exist""" )
# Test that config has the common properties as setter
for idx, name in enumerate(UpperCAmelCase__ ):
try:
setattr(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
self.parent.assertEqual(
getattr(UpperCAmelCase__ , UpperCAmelCase__ ) , UpperCAmelCase__ , msg=f"""`{name} value {idx} expected, but was {getattr(UpperCAmelCase__ , UpperCAmelCase__ )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(UpperCAmelCase__ ):
try:
_a : Optional[int] = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(UpperCAmelCase__ , UpperCAmelCase__ ) , UpperCAmelCase__ , msg=f"""`{name} value {idx} expected, but was {getattr(UpperCAmelCase__ , UpperCAmelCase__ )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def _lowercase ( self : Optional[int] ) -> Optional[Any]:
_a : Optional[Any] = self.config_class(**self.inputs_dict )
_a : List[str] = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , UpperCAmelCase__ )
def _lowercase ( self : int ) -> List[str]:
_a : Optional[Any] = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Tuple = os.path.join(UpperCAmelCase__ , """config.json""" )
config_first.to_json_file(UpperCAmelCase__ )
_a : List[str] = self.config_class.from_json_file(UpperCAmelCase__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _lowercase ( self : Union[str, Any] ) -> Dict:
_a : Dict = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(UpperCAmelCase__ )
_a : Dict = self.config_class.from_pretrained(UpperCAmelCase__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _lowercase ( self : Dict ) -> Tuple:
_a : List[Any] = self.config_class(**self.inputs_dict )
_a : Any = """test"""
with tempfile.TemporaryDirectory() as tmpdirname:
_a : List[Any] = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
config_first.save_pretrained(UpperCAmelCase__ )
_a : List[Any] = self.config_class.from_pretrained(UpperCAmelCase__ , subfolder=UpperCAmelCase__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _lowercase ( self : List[str] ) -> Union[str, Any]:
_a : Tuple = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
_a : Union[str, Any] = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def _lowercase ( self : Tuple ) -> List[str]:
if self.config_class.is_composition:
return
_a : str = self.config_class()
self.parent.assertIsNotNone(UpperCAmelCase__ )
def _lowercase ( self : List[Any] ) -> Optional[Any]:
_a : Dict = copy.deepcopy(UpperCAmelCase__ )
_a : Any = self.config_class(**UpperCAmelCase__ )
_a : str = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("""torch_dtype""", config.torch_dtype, torch.floataa) )
elif getattr(UpperCAmelCase__ , UpperCAmelCase__ ) != value:
wrong_values.append((key, getattr(UpperCAmelCase__ , UpperCAmelCase__ ), value) )
if len(UpperCAmelCase__ ) > 0:
_a : List[Any] = """\n""".join([f"""- {v[0]}: got {v[1]} instead of {v[2]}""" for v in wrong_values] )
raise ValueError(f"""The following keys were not properly set in the config:\n{errors}""" )
def _lowercase ( self : int ) -> Union[str, Any]:
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 324
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 362
|
"""simple docstring"""
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
_snake_case = HUGGINGFACE_HUB_CACHE
_snake_case = 'config.json'
_snake_case = 'diffusion_pytorch_model.bin'
_snake_case = 'diffusion_flax_model.msgpack'
_snake_case = 'model.onnx'
_snake_case = 'diffusion_pytorch_model.safetensors'
_snake_case = 'weights.pb'
_snake_case = 'https://huggingface.co'
_snake_case = default_cache_path
_snake_case = 'diffusers_modules'
_snake_case = os.getenv('HF_MODULES_CACHE', os.path.join(hf_cache_home, 'modules'))
_snake_case = ['fp16', 'non-ema']
_snake_case = '.self_attn'
| 324
| 0
|
"""simple docstring"""
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
_snake_case = get_tests_dir() + '/test_data/fsmt/fsmt_val_data.json'
with io.open(filename, 'r', encoding='utf-8') as f:
_snake_case = json.load(f)
@require_torch
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : List[Any] , UpperCAmelCase__ : str ) -> Optional[Any]:
return FSMTTokenizer.from_pretrained(UpperCamelCase__ )
def _lowercase ( self : Tuple , UpperCAmelCase__ : List[str] ) -> List[str]:
_a : Tuple = FSMTForConditionalGeneration.from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["""en-ru""", 2_6.0],
["""ru-en""", 2_2.0],
["""en-de""", 2_2.0],
["""de-en""", 2_9.0],
] )
@slow
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] ) -> List[Any]:
_a : int = f"""facebook/wmt19-{pair}"""
_a : List[str] = self.get_tokenizer(UpperCamelCase__ )
_a : Any = self.get_model(UpperCamelCase__ )
_a : Any = bleu_data[pair]["""src"""]
_a : List[Any] = bleu_data[pair]["""tgt"""]
_a : List[str] = tokenizer(UpperCamelCase__ , return_tensors="""pt""" , truncation=UpperCamelCase__ , padding="""longest""" ).to(UpperCamelCase__ )
_a : Any = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
_a : Optional[int] = tokenizer.batch_decode(
UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ )
_a : Optional[Any] = calculate_bleu(UpperCamelCase__ , UpperCamelCase__ )
print(UpperCamelCase__ )
self.assertGreaterEqual(scores["""bleu"""] , UpperCamelCase__ )
| 363
|
"""simple docstring"""
from math import factorial
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if successes > trials:
raise ValueError("""successes must be lower or equal to trials""" )
if trials < 0 or successes < 0:
raise ValueError("""the function is defined for non-negative integers""" )
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError("""the function is defined for non-negative integers""" )
if not 0 < prob < 1:
raise ValueError("""prob has to be in range of 1 - 0""" )
_a : Optional[int] = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
_a : Optional[int] = float(factorial(UpperCamelCase__ ) )
coefficient /= factorial(UpperCamelCase__ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('Probability of 2 successes out of 4 trails')
print('with probability of 0.75 is:', end=' ')
print(binomial_distribution(2, 4, 0.75))
| 324
| 0
|
"""simple docstring"""
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'google/owlvit-base-patch32': 'https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json',
'google/owlvit-base-patch16': 'https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json',
'google/owlvit-large-patch14': 'https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json',
}
class UpperCamelCase ( snake_case__ ):
UpperCamelCase : Dict = """owlvit_text_model"""
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Dict=49408 , UpperCAmelCase__ : str=512 , UpperCAmelCase__ : List[Any]=2048 , UpperCAmelCase__ : Optional[int]=12 , UpperCAmelCase__ : List[Any]=8 , UpperCAmelCase__ : List[str]=16 , UpperCAmelCase__ : Optional[Any]="quick_gelu" , UpperCAmelCase__ : Optional[int]=1E-5 , UpperCAmelCase__ : str=0.0 , UpperCAmelCase__ : Optional[int]=0.0_2 , UpperCAmelCase__ : List[Any]=1.0 , UpperCAmelCase__ : List[Any]=0 , UpperCAmelCase__ : List[str]=49406 , UpperCAmelCase__ : str=49407 , **UpperCAmelCase__ : Union[str, Any] , ) -> str:
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
_a : List[str] = vocab_size
_a : str = hidden_size
_a : List[Any] = intermediate_size
_a : Optional[int] = num_hidden_layers
_a : str = num_attention_heads
_a : Tuple = max_position_embeddings
_a : Dict = hidden_act
_a : List[str] = layer_norm_eps
_a : str = attention_dropout
_a : Any = initializer_range
_a : int = initializer_factor
@classmethod
def _lowercase ( cls : List[Any] , UpperCAmelCase__ : int , **UpperCAmelCase__ : Any ) -> Any:
cls._set_token_in_kwargs(_A )
_a , _a : List[str] = cls.get_config_dict(_A , **_A )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
_a : List[str] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_A , **_A )
class UpperCamelCase ( snake_case__ ):
UpperCamelCase : Optional[int] = """owlvit_vision_model"""
def __init__( self : Tuple , UpperCAmelCase__ : List[Any]=768 , UpperCAmelCase__ : List[Any]=3072 , UpperCAmelCase__ : Tuple=12 , UpperCAmelCase__ : List[Any]=12 , UpperCAmelCase__ : Tuple=3 , UpperCAmelCase__ : str=768 , UpperCAmelCase__ : Dict=32 , UpperCAmelCase__ : str="quick_gelu" , UpperCAmelCase__ : Optional[int]=1E-5 , UpperCAmelCase__ : Optional[int]=0.0 , UpperCAmelCase__ : int=0.0_2 , UpperCAmelCase__ : Tuple=1.0 , **UpperCAmelCase__ : Union[str, Any] , ) -> Optional[int]:
super().__init__(**_A )
_a : int = hidden_size
_a : List[Any] = intermediate_size
_a : List[Any] = num_hidden_layers
_a : Optional[int] = num_attention_heads
_a : Union[str, Any] = num_channels
_a : Dict = image_size
_a : Optional[int] = patch_size
_a : Optional[int] = hidden_act
_a : Optional[int] = layer_norm_eps
_a : List[str] = attention_dropout
_a : Any = initializer_range
_a : List[Any] = initializer_factor
@classmethod
def _lowercase ( cls : List[Any] , UpperCAmelCase__ : Any , **UpperCAmelCase__ : Optional[Any] ) -> List[Any]:
cls._set_token_in_kwargs(_A )
_a , _a : str = cls.get_config_dict(_A , **_A )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
_a : Optional[int] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_A , **_A )
class UpperCamelCase ( snake_case__ ):
UpperCamelCase : str = """owlvit"""
UpperCamelCase : Any = True
def __init__( self : int , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : List[str]=512 , UpperCAmelCase__ : Optional[int]=2.6_5_9_2 , UpperCAmelCase__ : Dict=True , **UpperCAmelCase__ : Optional[int] , ) -> Tuple:
super().__init__(**_A )
if text_config is None:
_a : List[Any] = {}
logger.info("""text_config is None. Initializing the OwlViTTextConfig with default values.""" )
if vision_config is None:
_a : Dict = {}
logger.info("""vision_config is None. initializing the OwlViTVisionConfig with default values.""" )
_a : str = OwlViTTextConfig(**_A )
_a : Union[str, Any] = OwlViTVisionConfig(**_A )
_a : Optional[Any] = projection_dim
_a : List[Any] = logit_scale_init_value
_a : List[str] = return_dict
_a : Optional[Any] = 1.0
@classmethod
def _lowercase ( cls : List[str] , UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Dict ) -> Optional[Any]:
cls._set_token_in_kwargs(_A )
_a , _a : Optional[Any] = cls.get_config_dict(_A , **_A )
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_A , **_A )
@classmethod
def _lowercase ( cls : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , **UpperCAmelCase__ : Dict ) -> Dict:
_a : List[Any] = {}
_a : Optional[int] = text_config
_a : Optional[int] = vision_config
return cls.from_dict(_A , **_A )
def _lowercase ( self : Dict ) -> Dict:
_a : int = copy.deepcopy(self.__dict__ )
_a : List[str] = self.text_config.to_dict()
_a : Any = self.vision_config.to_dict()
_a : Any = self.__class__.model_type
return output
class UpperCamelCase ( snake_case__ ):
@property
def _lowercase ( self : List[str] ) -> str:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
] )
@property
def _lowercase ( self : Dict ) -> Tuple:
return OrderedDict(
[
("""logits_per_image""", {0: """batch"""}),
("""logits_per_text""", {0: """batch"""}),
("""text_embeds""", {0: """batch"""}),
("""image_embeds""", {0: """batch"""}),
] )
@property
def _lowercase ( self : int ) -> Optional[Any]:
return 1E-4
def _lowercase ( self : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : str = -1 , UpperCAmelCase__ : Optional[int] = -1 , UpperCAmelCase__ : List[Any] = None , ) -> Union[str, Any]:
_a : Dict = super().generate_dummy_inputs(
processor.tokenizer , batch_size=_A , seq_length=_A , framework=_A )
_a : List[Any] = super().generate_dummy_inputs(
processor.image_processor , batch_size=_A , framework=_A )
return {**text_input_dict, **image_input_dict}
@property
def _lowercase ( self : str ) -> Tuple:
return 14
| 364
|
"""simple docstring"""
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a , _a : Dict = len(UpperCamelCase__ ), len(grid[0] )
if (
min(UpperCamelCase__ , UpperCamelCase__ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
_a : Any = 0
count += depth_first_search(UpperCamelCase__ , row + 1 , UpperCamelCase__ , UpperCamelCase__ )
count += depth_first_search(UpperCamelCase__ , row - 1 , UpperCamelCase__ , UpperCamelCase__ )
count += depth_first_search(UpperCamelCase__ , UpperCamelCase__ , col + 1 , UpperCamelCase__ )
count += depth_first_search(UpperCamelCase__ , UpperCamelCase__ , col - 1 , UpperCamelCase__ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 324
| 0
|
"""simple docstring"""
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
# load base model
_a : Tuple = StableDiffusionPipeline.from_pretrained(snake_case__ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
_a : Union[str, Any] = load_file(snake_case__ )
_a : int = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
_a : Optional[Any] = key.split(""".""" )[0].split(LORA_PREFIX_TEXT_ENCODER + """_""" )[-1].split("""_""" )
_a : Optional[Any] = pipeline.text_encoder
else:
_a : Tuple = key.split(""".""" )[0].split(LORA_PREFIX_UNET + """_""" )[-1].split("""_""" )
_a : List[str] = pipeline.unet
# find the target layer
_a : Optional[Any] = layer_infos.pop(0 )
while len(snake_case__ ) > -1:
try:
_a : Dict = curr_layer.__getattr__(snake_case__ )
if len(snake_case__ ) > 0:
_a : Dict = layer_infos.pop(0 )
elif len(snake_case__ ) == 0:
break
except Exception:
if len(snake_case__ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
_a : Tuple = layer_infos.pop(0 )
_a : List[Any] = []
if "lora_down" in key:
pair_keys.append(key.replace("""lora_down""" , """lora_up""" ) )
pair_keys.append(snake_case__ )
else:
pair_keys.append(snake_case__ )
pair_keys.append(key.replace("""lora_up""" , """lora_down""" ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
_a : Dict = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
_a : Union[str, Any] = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(snake_case__ , snake_case__ ).unsqueeze(2 ).unsqueeze(3 )
else:
_a : Dict = state_dict[pair_keys[0]].to(torch.floataa )
_a : List[Any] = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(snake_case__ , snake_case__ )
# update visited list
for item in pair_keys:
visited.append(snake_case__ )
return pipeline
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
'--base_model_path', default=None, type=str, required=True, help='Path to the base model in diffusers format.'
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--lora_prefix_unet', default='lora_unet', type=str, help='The prefix of UNet weight in safetensors'
)
parser.add_argument(
'--lora_prefix_text_encoder',
default='lora_te',
type=str,
help='The prefix of text encoder weight in safetensors',
)
parser.add_argument('--alpha', default=0.75, type=float, help='The merging ratio in W = W0 + alpha * deltaW')
parser.add_argument(
'--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.'
)
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
_snake_case = parser.parse_args()
_snake_case = args.base_model_path
_snake_case = args.checkpoint_path
_snake_case = args.dump_path
_snake_case = args.lora_prefix_unet
_snake_case = args.lora_prefix_text_encoder
_snake_case = args.alpha
_snake_case = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
_snake_case = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 365
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_snake_case = {
'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['VisionEncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['TFVisionEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['FlaxVisionEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 324
| 0
|
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_a : Tuple = np.full((len(lowerCAmelCase__ ), sequence_length, 2) , lowerCAmelCase__ )
else:
_a : Optional[int] = np.full((len(lowerCAmelCase__ ), sequence_length) , lowerCAmelCase__ )
for i, tensor in enumerate(lowerCAmelCase__ ):
if padding_side == "right":
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_a : Dict = tensor[:sequence_length]
else:
_a : Dict = tensor[:sequence_length]
else:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_a : Tuple = tensor[:sequence_length]
else:
_a : List[Any] = tensor[:sequence_length]
return out_tensor.tolist()
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : int = ord(lowerCAmelCase__ )
if (cp >= 3_3 and cp <= 4_7) or (cp >= 5_8 and cp <= 6_4) or (cp >= 9_1 and cp <= 9_6) or (cp >= 1_2_3 and cp <= 1_2_6):
return True
_a : Optional[int] = unicodedata.category(lowerCAmelCase__ )
if cat.startswith("""P""" ):
return True
return False
@dataclass
class UpperCamelCase ( _SCREAMING_SNAKE_CASE ):
UpperCamelCase : int = 42
UpperCamelCase : Dict = True
UpperCamelCase : List[str] = None
UpperCamelCase : Optional[Any] = None
UpperCamelCase : List[str] = -100
UpperCamelCase : Union[str, Any] = '''pt'''
def _lowercase ( self : List[str] , UpperCAmelCase__ : List[Any] ) -> Any:
import torch
_a : Optional[int] = """label""" if """label""" in features[0].keys() else """labels"""
_a : Optional[Any] = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
_a : str = self.tokenizer.pad(
UpperCAmelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" if labels is None else None , )
if labels is None:
return batch
_a : Tuple = torch.tensor(batch["""entity_ids"""] ).shape[1]
_a : Union[str, Any] = self.tokenizer.padding_side
if padding_side == "right":
_a : Optional[Any] = [
list(UpperCAmelCase__ ) + [self.label_pad_token_id] * (sequence_length - len(UpperCAmelCase__ )) for label in labels
]
else:
_a : Any = [
[self.label_pad_token_id] * (sequence_length - len(UpperCAmelCase__ )) + list(UpperCAmelCase__ ) for label in labels
]
_a : int = [feature["""ner_tags"""] for feature in features]
_a : Optional[int] = padding_tensor(UpperCAmelCase__ , -1 , UpperCAmelCase__ , UpperCAmelCase__ )
_a : List[str] = [feature["""original_entity_spans"""] for feature in features]
_a : int = padding_tensor(UpperCAmelCase__ , (-1, -1) , UpperCAmelCase__ , UpperCAmelCase__ )
_a : Tuple = {k: torch.tensor(UpperCAmelCase__ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 366
|
"""simple docstring"""
from __future__ import annotations
import time
_snake_case = list[tuple[int, int]]
_snake_case = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_snake_case = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class UpperCamelCase :
def __init__( self : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Node | None ) -> List[str]:
_a : int = pos_x
_a : Union[str, Any] = pos_y
_a : Tuple = (pos_y, pos_x)
_a : Tuple = goal_x
_a : int = goal_y
_a : str = parent
class UpperCamelCase :
def __init__( self : List[Any] , UpperCAmelCase__ : tuple[int, int] , UpperCAmelCase__ : tuple[int, int] ) -> List[str]:
_a : List[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , UpperCAmelCase__ )
_a : List[str] = Node(goal[1] , goal[0] , goal[1] , goal[0] , UpperCAmelCase__ )
_a : Optional[int] = [self.start]
_a : Tuple = False
def _lowercase ( self : str ) -> Path | None:
while self.node_queue:
_a : Tuple = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
_a : Dict = True
return self.retrace_path(UpperCAmelCase__ )
_a : Tuple = self.get_successors(UpperCAmelCase__ )
for node in successors:
self.node_queue.append(UpperCAmelCase__ )
if not self.reached:
return [self.start.pos]
return None
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : Node ) -> list[Node]:
_a : Optional[Any] = []
for action in delta:
_a : str = parent.pos_x + action[1]
_a : List[Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(UpperCAmelCase__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(UpperCAmelCase__ , UpperCAmelCase__ , self.target.pos_y , self.target.pos_x , UpperCAmelCase__ ) )
return successors
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Node | None ) -> Path:
_a : Dict = node
_a : List[str] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_a : Any = current_node.parent
path.reverse()
return path
class UpperCamelCase :
def __init__( self : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] ) -> Any:
_a : Dict = BreadthFirstSearch(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Optional[int] = BreadthFirstSearch(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Dict = False
def _lowercase ( self : Any ) -> Path | None:
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
_a : List[Any] = self.fwd_bfs.node_queue.pop(0 )
_a : Union[str, Any] = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
_a : Optional[int] = True
return self.retrace_bidirectional_path(
UpperCAmelCase__ , UpperCAmelCase__ )
_a : List[str] = current_bwd_node
_a : int = current_fwd_node
_a : Optional[Any] = {
self.fwd_bfs: self.fwd_bfs.get_successors(UpperCAmelCase__ ),
self.bwd_bfs: self.bwd_bfs.get_successors(UpperCAmelCase__ ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(UpperCAmelCase__ )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : Node , UpperCAmelCase__ : Node ) -> Path:
_a : str = self.fwd_bfs.retrace_path(UpperCAmelCase__ )
_a : List[Any] = self.bwd_bfs.retrace_path(UpperCAmelCase__ )
bwd_path.pop()
bwd_path.reverse()
_a : Tuple = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
_snake_case = (0, 0)
_snake_case = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_snake_case = time.time()
_snake_case = BreadthFirstSearch(init, goal)
_snake_case = bfs.search()
_snake_case = time.time() - start_bfs_time
print('Unidirectional BFS computation time : ', bfs_time)
_snake_case = time.time()
_snake_case = BidirectionalBreadthFirstSearch(init, goal)
_snake_case = bd_bfs.search()
_snake_case = time.time() - start_bd_bfs_time
print('Bidirectional BFS computation time : ', bd_bfs_time)
| 324
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
_snake_case = logging.get_logger(__name__)
class UpperCamelCase ( UpperCAmelCase_ ):
UpperCamelCase : Dict = ["""pixel_values"""]
def __init__( self : Optional[Any] , UpperCAmelCase__ : Tuple = True , UpperCAmelCase__ : int = None , UpperCAmelCase__ : str = PILImageResampling.BILINEAR , UpperCAmelCase__ : List[Any] = True , UpperCAmelCase__ : Tuple = 1 / 255 , UpperCAmelCase__ : str = True , UpperCAmelCase__ : Optional[Any] = None , UpperCAmelCase__ : Any = True , **UpperCAmelCase__ : Optional[Any] , ) -> None:
super().__init__(**__lowercase )
_a : Optional[int] = size if size is not None else {'''shortest_edge''': 224}
_a : str = get_size_dict(__lowercase , default_to_square=__lowercase )
_a : str = crop_size if crop_size is not None else {'''height''': 256, '''width''': 256}
_a : Union[str, Any] = get_size_dict(__lowercase , param_name="""crop_size""" )
_a : Union[str, Any] = do_resize
_a : str = size
_a : List[Any] = resample
_a : Union[str, Any] = do_rescale
_a : List[Any] = rescale_factor
_a : Optional[Any] = do_center_crop
_a : Optional[int] = crop_size
_a : int = do_flip_channel_order
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple = PIL.Image.BILINEAR , UpperCAmelCase__ : Tuple = None , **UpperCAmelCase__ : Tuple , ) -> np.ndarray:
_a : Dict = get_size_dict(__lowercase , default_to_square=__lowercase )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
_a : List[str] = get_resize_output_image_size(__lowercase , size=size["""shortest_edge"""] , default_to_square=__lowercase )
return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] = None , **UpperCAmelCase__ : Optional[Any] , ) -> np.ndarray:
_a : int = get_size_dict(__lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(__lowercase , size=(size["""height"""], size["""width"""]) , data_format=__lowercase , **__lowercase )
def _lowercase ( self : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int = None , **UpperCAmelCase__ : List[str] , ) -> List[str]:
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def _lowercase ( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[Any] = None ) -> np.ndarray:
return flip_channel_order(__lowercase , data_format=__lowercase )
def _lowercase ( self : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str = None , UpperCAmelCase__ : Optional[Any] = None , UpperCAmelCase__ : Any = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[Any] = None , UpperCAmelCase__ : Dict = None , UpperCAmelCase__ : Optional[Any] = None , UpperCAmelCase__ : Dict = None , UpperCAmelCase__ : int = None , UpperCAmelCase__ : Dict = ChannelDimension.FIRST , **UpperCAmelCase__ : List[str] , ) -> PIL.Image.Image:
_a : Any = do_resize if do_resize is not None else self.do_resize
_a : str = resample if resample is not None else self.resample
_a : List[str] = do_rescale if do_rescale is not None else self.do_rescale
_a : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
_a : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
_a : Union[str, Any] = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
_a : List[Any] = size if size is not None else self.size
_a : Any = get_size_dict(__lowercase , default_to_square=__lowercase )
_a : int = crop_size if crop_size is not None else self.crop_size
_a : Optional[Any] = get_size_dict(__lowercase , param_name="""crop_size""" )
_a : Union[str, Any] = make_list_of_images(__lowercase )
if not valid_images(__lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
# All transformations expect numpy arrays.
_a : Union[str, Any] = [to_numpy_array(__lowercase ) for image in images]
if do_resize:
_a : Optional[int] = [self.resize(image=__lowercase , size=__lowercase , resample=__lowercase ) for image in images]
if do_center_crop:
_a : Optional[Any] = [self.center_crop(image=__lowercase , size=__lowercase ) for image in images]
if do_rescale:
_a : List[Any] = [self.rescale(image=__lowercase , scale=__lowercase ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
_a : Dict = [self.flip_channel_order(image=__lowercase ) for image in images]
_a : int = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
_a : Any = {'''pixel_values''': images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase )
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] = None ) -> str:
_a : Optional[int] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__lowercase ) != len(__lowercase ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(__lowercase ):
_a : int = target_sizes.numpy()
_a : Any = []
for idx in range(len(__lowercase ) ):
_a : List[Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=__lowercase )
_a : Optional[Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__lowercase )
else:
_a : Optional[Any] = logits.argmax(dim=1 )
_a : List[str] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 367
|
"""simple docstring"""
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_snake_case = logging.getLogger(__name__)
_snake_case = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
_snake_case = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCamelCase :
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Leave None if you want to train a model from'''
''' scratch.'''
)
} , )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(snake_case_ )} , )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class UpperCamelCase :
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''The input training data file (a text file).'''} )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={
'''help''': (
'''The input training data files (multiple files in glob format). '''
'''Very often splitting large files to smaller files can prevent tokenizer going out of memory'''
)
} , )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''An optional input train ref data file for whole word mask in Chinese.'''} , )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''An optional input eval ref data file for whole word mask in Chinese.'''} , )
UpperCamelCase : bool = field(
default=snake_case_ , metadata={'''help''': '''Whether distinct lines of text in the dataset are to be handled as distinct sequences.'''} , )
UpperCamelCase : bool = field(
default=snake_case_ , metadata={'''help''': '''Train with masked-language modeling loss instead of language modeling.'''} )
UpperCamelCase : bool = field(default=snake_case_ , metadata={'''help''': '''Whether ot not to use whole word mask.'''} )
UpperCamelCase : float = field(
default=0.1_5 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
UpperCamelCase : float = field(
default=1 / 6 , metadata={
'''help''': (
'''Ratio of length of a span of masked tokens to surrounding context length for permutation language'''
''' modeling.'''
)
} , )
UpperCamelCase : int = field(
default=5 , metadata={'''help''': '''Maximum length of a span of masked tokens for permutation language modeling.'''} )
UpperCamelCase : int = field(
default=-1 , metadata={
'''help''': (
'''Optional input sequence length after tokenization.'''
'''The training dataset will be truncated in block of this size for training.'''
'''Default to the model max input length for single sentence inputs (take into account special tokens).'''
)
} , )
UpperCamelCase : bool = field(
default=snake_case_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False , UpperCamelCase__ = None , ):
'''simple docstring'''
def _dataset(UpperCamelCase__ , UpperCamelCase__=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("""You need to set world whole masking and mlm to True for Chinese Whole Word Mask""" )
return LineByLineWithRefDataset(
tokenizer=UpperCamelCase__ , file_path=UpperCamelCase__ , block_size=args.block_size , ref_path=UpperCamelCase__ , )
return LineByLineTextDataset(tokenizer=UpperCamelCase__ , file_path=UpperCamelCase__ , block_size=args.block_size )
else:
return TextDataset(
tokenizer=UpperCamelCase__ , file_path=UpperCamelCase__ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=UpperCamelCase__ , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(UpperCamelCase__ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def lowerCAmelCase__ ( ):
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_a : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_a , _a , _a : List[str] = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"""Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file """
"""or remove the --do_eval argument.""" )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , UpperCamelCase__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
_a : str = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
_a : Union[str, Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
_a : str = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.tokenizer_name:
_a : List[str] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
_a : Union[str, Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"""
""" script, save it,and load it from here, using --tokenizer_name""" )
if model_args.model_name_or_path:
_a : Optional[Any] = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , )
else:
logger.info("""Training new model from scratch""" )
_a : List[Any] = AutoModelWithLMHead.from_config(UpperCamelCase__ )
model.resize_token_embeddings(len(UpperCamelCase__ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"""BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"""
"""--mlm flag (masked language modeling).""" )
if data_args.block_size <= 0:
_a : int = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
_a : Optional[Any] = min(data_args.block_size , tokenizer.max_len )
# Get datasets
_a : Optional[Any] = (
get_dataset(UpperCamelCase__ , tokenizer=UpperCamelCase__ , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
_a : Optional[int] = (
get_dataset(UpperCamelCase__ , tokenizer=UpperCamelCase__ , evaluate=UpperCamelCase__ , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
_a : Any = DataCollatorForPermutationLanguageModeling(
tokenizer=UpperCamelCase__ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
_a : Union[str, Any] = DataCollatorForWholeWordMask(
tokenizer=UpperCamelCase__ , mlm_probability=data_args.mlm_probability )
else:
_a : str = DataCollatorForLanguageModeling(
tokenizer=UpperCamelCase__ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
_a : Union[str, Any] = Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , data_collator=UpperCamelCase__ , train_dataset=UpperCamelCase__ , eval_dataset=UpperCamelCase__ , prediction_loss_only=UpperCamelCase__ , )
# Training
if training_args.do_train:
_a : Optional[Any] = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=UpperCamelCase__ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_a : Union[str, Any] = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_a : int = trainer.evaluate()
_a : Dict = math.exp(eval_output["""eval_loss"""] )
_a : Union[str, Any] = {"""perplexity""": perplexity}
_a : Optional[Any] = os.path.join(training_args.output_dir , """eval_results_lm.txt""" )
if trainer.is_world_master():
with open(UpperCamelCase__ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""" , UpperCamelCase__ , str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
results.update(UpperCamelCase__ )
return results
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 324
| 0
|
"""simple docstring"""
import re
from ..utils import cached_file
# docstyle-ignore
_snake_case = '\nHuman: <<task>>\n\nAssistant: '
_snake_case = 'huggingface-tools/default-prompts'
_snake_case = {'chat': 'chat_prompt_template.txt', 'run': 'run_prompt_template.txt'}
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__="run" ):
'''simple docstring'''
if prompt_or_repo_id is None:
_a : Optional[int] = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("""\\s""" , snake_case_ ) is not None:
return prompt_or_repo_id
_a : Tuple = cached_file(
snake_case_ , PROMPT_FILES[mode] , repo_type="""dataset""" , user_agent={"""agent""": agent_name} )
with open(snake_case_ , """r""" , encoding="""utf-8""" ) as f:
return f.read()
| 368
|
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
_snake_case = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['memory_attention', 'encoder_attn'],
['attention', 'attn'],
['/', '.'],
['.LayerNorm.gamma', '_layer_norm.weight'],
['.LayerNorm.beta', '_layer_norm.bias'],
['r.layer_', 'r.layers.'],
['output_proj', 'out_proj'],
['ffn.dense_1.', 'fc2.'],
['ffn.dense.', 'fc1.'],
['ffn_layer_norm', 'final_layer_norm'],
['kernel', 'weight'],
['encoder_layer_norm.', 'encoder.layer_norm.'],
['decoder_layer_norm.', 'decoder.layer_norm.'],
['embeddings.weights', 'shared.weight'],
]
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
_a : Optional[Any] = k.replace(UpperCamelCase__ , UpperCamelCase__ )
return k
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Union[str, Any] = DEFAULTS.copy()
cfg_kwargs.update(UpperCamelCase__ )
_a : Optional[Any] = PegasusConfig(**UpperCamelCase__ )
_a : Tuple = PegasusForConditionalGeneration(UpperCamelCase__ )
_a : str = torch_model.model.state_dict()
_a : Union[str, Any] = {}
for k, v in tf_weights.items():
_a : Any = rename_state_dict_key(UpperCamelCase__ )
if new_k not in sd:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
_a : str = v.T
_a : int = torch.tensor(UpperCamelCase__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
_a : Union[str, Any] = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
_a : str = mapping["""shared.weight"""]
_a : Union[str, Any] = mapping["""shared.weight"""]
_a : Optional[Any] = {k: torch.zeros_like(UpperCamelCase__ ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**UpperCamelCase__ )
_a , _a : int = torch_model.model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
_a : Optional[Any] = [
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def lowerCAmelCase__ ( UpperCamelCase__="./ckpt/aeslc/model.ckpt-32000" ):
'''simple docstring'''
_a : List[Any] = tf.train.list_variables(UpperCamelCase__ )
_a : Optional[int] = {}
_a : Dict = ["""Adafactor""", """global_step"""]
for name, shape in tqdm(UpperCamelCase__ , desc="""converting tf checkpoint to dict""" ):
_a : Optional[Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
_a : str = tf.train.load_variable(UpperCamelCase__ , UpperCamelCase__ )
_a : int = array
return tf_weights
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
# save tokenizer first
_a : Dict = Path(UpperCamelCase__ ).parent.name
_a : Optional[Any] = task_specific_params[F"""summarization_{dataset}"""]["""max_position_embeddings"""]
_a : Tuple = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=UpperCamelCase__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(UpperCamelCase__ )
# convert model
_a : List[Any] = get_tf_weights_as_numpy(UpperCamelCase__ )
_a : Dict = task_specific_params[F"""summarization_{dataset}"""]
if dataset == "large":
_a : Tuple = task_specific_params
_a : Optional[int] = convert_pegasus(UpperCamelCase__ , UpperCamelCase__ )
torch_model.save_pretrained(UpperCamelCase__ )
_a : Dict = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(UpperCamelCase__ , Path(UpperCamelCase__ ) / """pytorch_model.bin""" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument('tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('save_dir', default=None, type=str, help='Path to the output PyTorch model.')
_snake_case = parser.parse_args()
if args.save_dir is None:
_snake_case = Path(args.tf_ckpt_path).parent.name
_snake_case = os.path.join('pegasus', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 324
| 0
|
"""simple docstring"""
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError("""The given input must be positive""" )
# get the generated string sequence
_a : str = gray_code_sequence_string(lowerCAmelCase_ )
#
# convert them to integers
for i in range(len(lowerCAmelCase_ ) ):
_a : Optional[int] = int(sequence[i] , 2 )
return sequence
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
_a : Optional[Any] = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
_a : List[str] = gray_code_sequence_string(bit_count - 1 )
_a : str = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
_a : int = '0' + smaller_sequence[i]
sequence.append(lowerCAmelCase_ )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
_a : Dict = '1' + smaller_sequence[i]
sequence.append(lowerCAmelCase_ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
UpperCamelCase : Union[str, Any] = StableDiffusionXLImgaImgPipeline
UpperCamelCase : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
UpperCamelCase : Tuple = PipelineTesterMixin.required_optional_params - {'''latents'''}
UpperCamelCase : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _lowercase ( self : Any ) -> List[Any]:
torch.manual_seed(0 )
_a : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=UpperCAmelCase__ , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
_a : Union[str, Any] = EulerDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , )
torch.manual_seed(0 )
_a : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_a : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=32 , )
_a : Tuple = CLIPTextModel(UpperCAmelCase__ )
_a : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=UpperCAmelCase__ )
_a : Dict = CLIPTextModelWithProjection(UpperCAmelCase__ )
_a : Dict = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=UpperCAmelCase__ )
_a : Any = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int=0 ) -> int:
_a : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
_a : Any = image / 2 + 0.5
if str(UpperCAmelCase__ ).startswith("""mps""" ):
_a : Any = torch.manual_seed(UpperCAmelCase__ )
else:
_a : Tuple = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
_a : Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.7_5,
}
return inputs
def _lowercase ( self : Any ) -> List[Any]:
_a : Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_a : Dict = self.get_dummy_components()
_a : List[Any] = StableDiffusionXLImgaImgPipeline(**UpperCAmelCase__ )
_a : Union[str, Any] = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_a : List[str] = self.get_dummy_inputs(UpperCAmelCase__ )
_a : List[str] = sd_pipe(**UpperCAmelCase__ ).images
_a : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_a : List[str] = np.array([0.4_6_5_6, 0.4_8_4_0, 0.4_4_3_9, 0.6_6_9_8, 0.5_5_7_4, 0.4_5_2_4, 0.5_7_9_9, 0.5_9_4_3, 0.5_1_6_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _lowercase ( self : Any ) -> Any:
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def _lowercase ( self : List[Any] ) -> Optional[Any]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def _lowercase ( self : Any ) -> Any:
pass
def _lowercase ( self : Tuple ) -> Union[str, Any]:
_a : int = self.get_dummy_components()
_a : Any = StableDiffusionXLImgaImgPipeline(**UpperCAmelCase__ )
_a : Dict = sd_pipe.to(UpperCAmelCase__ )
_a : List[str] = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
# forward without prompt embeds
_a : int = self.get_dummy_inputs(UpperCAmelCase__ )
_a : List[str] = 3 * ["""this is a negative prompt"""]
_a : Dict = negative_prompt
_a : Dict = 3 * [inputs["""prompt"""]]
_a : Optional[Any] = sd_pipe(**UpperCAmelCase__ )
_a : Tuple = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
_a : int = self.get_dummy_inputs(UpperCAmelCase__ )
_a : Union[str, Any] = 3 * ["""this is a negative prompt"""]
_a : int = 3 * [inputs.pop("""prompt""" )]
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) : List[str] = sd_pipe.encode_prompt(UpperCAmelCase__ , negative_prompt=UpperCAmelCase__ )
_a : Tuple = sd_pipe(
**UpperCAmelCase__ , prompt_embeds=UpperCAmelCase__ , negative_prompt_embeds=UpperCAmelCase__ , pooled_prompt_embeds=UpperCAmelCase__ , negative_pooled_prompt_embeds=UpperCAmelCase__ , )
_a : Dict = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : List[str] ) -> Union[str, Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : str="cpu" , UpperCAmelCase__ : str=torch.floataa , UpperCAmelCase__ : List[Any]=0 ) -> List[str]:
_a : List[str] = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
_a : Union[str, Any] = np.random.RandomState(UpperCAmelCase__ ).standard_normal((1, 4, 64, 64) )
_a : List[Any] = torch.from_numpy(UpperCAmelCase__ ).to(device=UpperCAmelCase__ , dtype=UpperCAmelCase__ )
_a : Any = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _lowercase ( self : int ) -> Union[str, Any]:
_a : Union[str, Any] = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_a : List[str] = self.get_inputs(UpperCAmelCase__ )
_a : Tuple = pipe(**UpperCAmelCase__ ).images
_a : List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_a : int = np.array([0.4_9_4_9_3, 0.4_7_8_9_6, 0.4_0_7_9_8, 0.5_4_2_1_4, 0.5_3_2_1_2, 0.4_8_2_0_2, 0.4_7_6_5_6, 0.4_6_3_2_9, 0.4_8_5_0_6] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 324
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'facebook/vit-mae-base': 'https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class UpperCamelCase ( __snake_case ):
UpperCamelCase : List[str] = """vit_mae"""
def __init__( self : Dict , UpperCAmelCase__ : str=768 , UpperCAmelCase__ : Any=12 , UpperCAmelCase__ : Optional[int]=12 , UpperCAmelCase__ : str=3072 , UpperCAmelCase__ : Tuple="gelu" , UpperCAmelCase__ : Optional[int]=0.0 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Optional[Any]=0.0_2 , UpperCAmelCase__ : Union[str, Any]=1E-12 , UpperCAmelCase__ : Dict=224 , UpperCAmelCase__ : Optional[Any]=16 , UpperCAmelCase__ : List[Any]=3 , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Optional[int]=16 , UpperCAmelCase__ : List[Any]=512 , UpperCAmelCase__ : str=8 , UpperCAmelCase__ : str=2048 , UpperCAmelCase__ : int=0.7_5 , UpperCAmelCase__ : Tuple=False , **UpperCAmelCase__ : List[str] , ) -> int:
super().__init__(**UpperCamelCase__ )
_a : int = hidden_size
_a : Tuple = num_hidden_layers
_a : str = num_attention_heads
_a : Optional[int] = intermediate_size
_a : Any = hidden_act
_a : Union[str, Any] = hidden_dropout_prob
_a : Optional[int] = attention_probs_dropout_prob
_a : str = initializer_range
_a : List[Any] = layer_norm_eps
_a : List[str] = image_size
_a : List[str] = patch_size
_a : Union[str, Any] = num_channels
_a : Dict = qkv_bias
_a : List[Any] = decoder_num_attention_heads
_a : str = decoder_hidden_size
_a : Any = decoder_num_hidden_layers
_a : List[Any] = decoder_intermediate_size
_a : Tuple = mask_ratio
_a : List[str] = norm_pix_loss
| 370
|
"""simple docstring"""
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger()
@dataclass
class UpperCamelCase :
UpperCamelCase : nn.Module
UpperCamelCase : List[nn.Module] = field(default_factory=snake_case_ )
UpperCamelCase : list = field(default_factory=snake_case_ )
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tensor , UpperCAmelCase__ : Tensor ) -> Any:
_a : int = len(list(m.modules() ) ) == 1 or isinstance(UpperCAmelCase__ , nn.Convad ) or isinstance(UpperCAmelCase__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(UpperCAmelCase__ )
def __call__( self : Tuple , UpperCAmelCase__ : Tensor ) -> Tuple:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(UpperCAmelCase__ )
[x.remove() for x in self.handles]
return self
@property
def _lowercase ( self : Optional[int] ) -> int:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda UpperCAmelCase__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class UpperCamelCase :
UpperCamelCase : nn.Module
UpperCamelCase : nn.Module
UpperCamelCase : int = 0
UpperCamelCase : List = field(default_factory=snake_case_ )
UpperCamelCase : List = field(default_factory=snake_case_ )
def __call__( self : Optional[Any] , UpperCAmelCase__ : Tensor ) -> Tuple:
_a : Union[str, Any] = Tracker(self.dest )(UpperCAmelCase__ ).parametrized
_a : List[Any] = Tracker(self.src )(UpperCAmelCase__ ).parametrized
_a : Tuple = list(filter(lambda UpperCAmelCase__ : type(UpperCAmelCase__ ) not in self.src_skip , UpperCAmelCase__ ) )
_a : Union[str, Any] = list(filter(lambda UpperCAmelCase__ : type(UpperCAmelCase__ ) not in self.dest_skip , UpperCAmelCase__ ) )
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise Exception(
f"""Numbers of operations are different. Source module has {len(UpperCAmelCase__ )} operations while"""
f""" destination module has {len(UpperCAmelCase__ )}.""" )
for dest_m, src_m in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"""Transfered from={src_m} to={dest_m}""" )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = True ):
'''simple docstring'''
print(F"""Converting {name}...""" )
with torch.no_grad():
_a : List[str] = timm.create_model(UpperCamelCase__ , pretrained=UpperCamelCase__ ).eval()
_a : str = ResNetForImageClassification(UpperCamelCase__ ).eval()
_a : List[str] = ModuleTransfer(src=UpperCamelCase__ , dest=UpperCamelCase__ )
_a : List[str] = torch.randn((1, 3, 2_2_4, 2_2_4) )
module_transfer(UpperCamelCase__ )
assert torch.allclose(from_model(UpperCamelCase__ ) , our_model(UpperCamelCase__ ).logits ), "The model logits don't match the original one."
_a : Dict = F"""resnet{'-'.join(name.split('resnet' ) )}"""
print(UpperCamelCase__ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add model""" , use_temp_dir=UpperCamelCase__ , )
# we can use the convnext one
_a : Optional[Any] = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add image processor""" , use_temp_dir=UpperCamelCase__ , )
print(F"""Pushed {checkpoint_name}""" )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = True ):
'''simple docstring'''
_a : Any = """imagenet-1k-id2label.json"""
_a : Optional[int] = 1_0_0_0
_a : Any = (1, num_labels)
_a : Union[str, Any] = """huggingface/label-files"""
_a : Tuple = num_labels
_a : Optional[int] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
_a : Optional[Any] = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
_a : Any = idalabel
_a : Tuple = {v: k for k, v in idalabel.items()}
_a : List[str] = partial(UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ )
_a : Union[str, Any] = {
"""resnet18""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type="""basic""" ),
"""resnet26""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type="""bottleneck""" ),
"""resnet34""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type="""basic""" ),
"""resnet50""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type="""bottleneck""" ),
"""resnet101""": ImageNetPreTrainedConfig(
depths=[3, 4, 2_3, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type="""bottleneck""" ),
"""resnet152""": ImageNetPreTrainedConfig(
depths=[3, 8, 3_6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type="""bottleneck""" ),
}
if model_name:
convert_weight_and_push(UpperCamelCase__ , names_to_config[model_name] , UpperCamelCase__ , UpperCamelCase__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return config, expected_shape
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported resnet* architecture,'
' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
_snake_case = parser.parse_args()
_snake_case = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 324
| 0
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
_snake_case = logging.getLogger(__name__)
@dataclass
class UpperCamelCase :
UpperCamelCase : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCamelCase : Optional[str] = field(
default=lowerCamelCase_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase : Optional[str] = field(
default=lowerCamelCase_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase : Optional[str] = field(
default=lowerCamelCase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCamelCase : bool = field(default=lowerCamelCase_ , metadata={'''help''': '''Whether tp freeze the encoder.'''} )
UpperCamelCase : bool = field(default=lowerCamelCase_ , metadata={'''help''': '''Whether to freeze the embeddings.'''} )
@dataclass
class UpperCamelCase :
UpperCamelCase : str = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
UpperCamelCase : Optional[str] = field(
default='''summarization''' , metadata={'''help''': '''Task name, summarization (or summarization_{dataset} for pegasus) or translation'''} , )
UpperCamelCase : Optional[int] = field(
default=1_024 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCamelCase : Optional[int] = field(
default=128 , metadata={
'''help''': (
'''The maximum total sequence length for target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCamelCase : Optional[int] = field(
default=142 , metadata={
'''help''': (
'''The maximum total sequence length for validation target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded. '''
'''This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '''
'''during ``evaluate`` and ``predict``.'''
)
} , )
UpperCamelCase : Optional[int] = field(
default=142 , metadata={
'''help''': (
'''The maximum total sequence length for test target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCamelCase : Optional[int] = field(default=-1 , metadata={'''help''': '''# training examples. -1 means use all.'''} )
UpperCamelCase : Optional[int] = field(default=-1 , metadata={'''help''': '''# validation examples. -1 means use all.'''} )
UpperCamelCase : Optional[int] = field(default=-1 , metadata={'''help''': '''# test examples. -1 means use all.'''} )
UpperCamelCase : Optional[str] = field(default=lowerCamelCase_ , metadata={'''help''': '''Source language id for translation.'''} )
UpperCamelCase : Optional[str] = field(default=lowerCamelCase_ , metadata={'''help''': '''Target language id for translation.'''} )
UpperCamelCase : Optional[int] = field(default=lowerCamelCase_ , metadata={'''help''': '''# num_beams to use for evaluation.'''} )
UpperCamelCase : bool = field(
default=lowerCamelCase_ , metadata={'''help''': '''If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'''} , )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
logger.info(F"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(F""" {key} = {metrics[key]}""" )
save_json(__lowerCAmelCase , os.path.join(__lowerCAmelCase , F"""{split}_results.json""" ) )
def lowerCAmelCase__ ( ):
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_a : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_a , _a , _a : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_a , _a , _a : Tuple = parser.parse_args_into_dataclasses()
check_output_dir(__lowerCAmelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" , __lowerCAmelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_a : Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_a : str = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
assert hasattr(__lowerCAmelCase , __lowerCAmelCase ), F"""({config.__class__.__name__}) doesn\'t have a `{p}` attribute"""
setattr(__lowerCAmelCase , __lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) )
_a : Dict = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_a : Tuple = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(__lowerCAmelCase , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
_a : Dict = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(__lowerCAmelCase , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_a : int = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
_a : Tuple = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(__lowerCAmelCase )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
_a : Union[str, Any] = SeqaSeqDataset
# Get datasets
_a : Tuple = (
dataset_class(
__lowerCAmelCase , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_train
else None
)
_a : List[Any] = (
dataset_class(
__lowerCAmelCase , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
_a : Optional[Any] = (
dataset_class(
__lowerCAmelCase , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
_a : Any = (
build_compute_metrics_fn(data_args.task , __lowerCAmelCase ) if training_args.predict_with_generate else None
)
_a : List[str] = SeqaSeqTrainer(
model=__lowerCAmelCase , args=__lowerCAmelCase , data_args=__lowerCAmelCase , train_dataset=__lowerCAmelCase , eval_dataset=__lowerCAmelCase , data_collator=SeqaSeqDataCollator(
__lowerCAmelCase , __lowerCAmelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=__lowerCAmelCase , tokenizer=__lowerCAmelCase , )
_a : List[Any] = {}
# Training
if training_args.do_train:
logger.info("""*** Train ***""" )
_a : List[Any] = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
_a : Union[str, Any] = train_result.metrics
_a : str = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("""train""" , __lowerCAmelCase , training_args.output_dir )
all_metrics.update(__lowerCAmelCase )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_a : int = trainer.evaluate(metric_key_prefix="""val""" )
_a : Tuple = data_args.n_val
_a : Optional[int] = round(metrics["""val_loss"""] , 4 )
if trainer.is_world_process_zero():
handle_metrics("""val""" , __lowerCAmelCase , training_args.output_dir )
all_metrics.update(__lowerCAmelCase )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
_a : List[Any] = trainer.predict(test_dataset=__lowerCAmelCase , metric_key_prefix="""test""" )
_a : List[str] = test_output.metrics
_a : str = data_args.n_test
if trainer.is_world_process_zero():
_a : Any = round(metrics["""test_loss"""] , 4 )
handle_metrics("""test""" , __lowerCAmelCase , training_args.output_dir )
all_metrics.update(__lowerCAmelCase )
if training_args.predict_with_generate:
_a : Optional[Any] = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
_a : Dict = lmap(str.strip , __lowerCAmelCase )
write_txt_file(__lowerCAmelCase , os.path.join(training_args.output_dir , """test_generations.txt""" ) )
if trainer.is_world_process_zero():
save_json(__lowerCAmelCase , os.path.join(training_args.output_dir , """all_results.json""" ) )
return all_metrics
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 371
|
"""simple docstring"""
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 324
| 0
|
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : Dict ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowercase ( self : List[str] ) -> Dict:
_a : Tuple = 1
_a : Optional[Any] = 3
_a : Optional[int] = (32, 32)
_a : Union[str, Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCAmelCase__ )
return image
@property
def _lowercase ( self : int ) -> Dict:
torch.manual_seed(0 )
_a : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def _lowercase ( self : Union[str, Any] ) -> int:
torch.manual_seed(0 )
_a : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def _lowercase ( self : Optional[int] ) -> int:
torch.manual_seed(0 )
_a : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(UpperCAmelCase__ )
@property
def _lowercase ( self : Dict ) -> Optional[Any]:
def extract(*UpperCAmelCase__ : int , **UpperCAmelCase__ : str ):
class UpperCamelCase :
def __init__( self : Any ) -> int:
_a : List[Any] = torch.ones([0] )
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : List[str] ) -> str:
self.pixel_values.to(UpperCAmelCase__ )
return self
return Out()
return extract
def _lowercase ( self : Any ) -> List[Any]:
_a : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
_a : Union[str, Any] = self.dummy_cond_unet
_a : Optional[int] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=UpperCAmelCase__ , set_alpha_to_one=UpperCAmelCase__ , )
_a : List[str] = self.dummy_vae
_a : Tuple = self.dummy_text_encoder
_a : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
_a : int = StableDiffusionPipeline(
unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , vae=UpperCAmelCase__ , text_encoder=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , safety_checker=UpperCAmelCase__ , feature_extractor=self.dummy_extractor , )
_a : Optional[Any] = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_a : int = """A painting of a squirrel eating a burger"""
_a : List[str] = torch.Generator(device=UpperCAmelCase__ ).manual_seed(0 )
_a : Optional[Any] = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
_a : Any = output.images
_a : Tuple = torch.Generator(device=UpperCAmelCase__ ).manual_seed(0 )
_a : Tuple = sd_pipe(
[prompt] , generator=UpperCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=UpperCAmelCase__ , )[0]
_a : Optional[Any] = image[0, -3:, -3:, -1]
_a : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a : Any = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowercase ( self : Any ) -> List[str]:
_a : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
_a : Tuple = self.dummy_cond_unet
_a : str = PNDMScheduler(skip_prk_steps=UpperCAmelCase__ )
_a : int = self.dummy_vae
_a : Optional[Any] = self.dummy_text_encoder
_a : List[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
_a : Any = StableDiffusionPipeline(
unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , vae=UpperCAmelCase__ , text_encoder=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , safety_checker=UpperCAmelCase__ , feature_extractor=self.dummy_extractor , )
_a : Any = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_a : Union[str, Any] = """A painting of a squirrel eating a burger"""
_a : str = torch.Generator(device=UpperCAmelCase__ ).manual_seed(0 )
_a : Optional[Any] = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
_a : Dict = output.images
_a : int = torch.Generator(device=UpperCAmelCase__ ).manual_seed(0 )
_a : int = sd_pipe(
[prompt] , generator=UpperCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=UpperCAmelCase__ , )[0]
_a : List[Any] = image[0, -3:, -3:, -1]
_a : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a : int = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowercase ( self : int ) -> Dict:
_a : Any = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" , safety_checker=UpperCAmelCase__ )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
assert isinstance(pipe.scheduler , UpperCAmelCase__ )
assert pipe.safety_checker is None
_a : Any = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCAmelCase__ )
_a : Union[str, Any] = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_a : Optional[int] = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def _lowercase ( self : List[str] ) -> List[str]:
_a : Union[str, Any] = self.dummy_cond_unet
_a : List[Any] = PNDMScheduler(skip_prk_steps=UpperCAmelCase__ )
_a : List[str] = self.dummy_vae
_a : Optional[Any] = self.dummy_text_encoder
_a : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
_a : Any = unet.half()
_a : List[Any] = vae.half()
_a : List[Any] = bert.half()
# make sure here that pndm scheduler skips prk
_a : Optional[int] = StableDiffusionPipeline(
unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , vae=UpperCAmelCase__ , text_encoder=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , safety_checker=UpperCAmelCase__ , feature_extractor=self.dummy_extractor , )
_a : Tuple = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_a : str = """A painting of a squirrel eating a burger"""
_a : Union[str, Any] = sd_pipe([prompt] , num_inference_steps=2 , output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : List[Any] ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : Tuple ) -> Dict:
_a : Dict = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=UpperCAmelCase__ )
_a : Optional[Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_a : Dict = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_a : List[str] = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
_a : Optional[int] = 4003660346
_a : int = 7
# without safety guidance (sld_guidance_scale = 0)
_a : List[Any] = torch.manual_seed(UpperCAmelCase__ )
_a : List[Any] = sd_pipe(
[prompt] , generator=UpperCAmelCase__ , guidance_scale=UpperCAmelCase__ , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
_a : Optional[int] = output.images
_a : List[str] = image[0, -3:, -3:, -1]
_a : int = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
_a : Optional[int] = torch.manual_seed(UpperCAmelCase__ )
_a : List[Any] = sd_pipe(
[prompt] , generator=UpperCAmelCase__ , guidance_scale=UpperCAmelCase__ , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_a : Union[str, Any] = output.images
_a : int = image[0, -3:, -3:, -1]
_a : List[str] = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _lowercase ( self : int ) -> Tuple:
_a : Optional[int] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=UpperCAmelCase__ )
_a : Union[str, Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_a : int = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_a : Optional[int] = """padme amidala taking a bath artwork, safe for work, no nudity"""
_a : List[Any] = 2734971755
_a : Any = 7
_a : str = torch.manual_seed(UpperCAmelCase__ )
_a : Optional[int] = sd_pipe(
[prompt] , generator=UpperCAmelCase__ , guidance_scale=UpperCAmelCase__ , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
_a : Tuple = output.images
_a : str = image[0, -3:, -3:, -1]
_a : List[str] = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
_a : List[str] = torch.manual_seed(UpperCAmelCase__ )
_a : List[str] = sd_pipe(
[prompt] , generator=UpperCAmelCase__ , guidance_scale=UpperCAmelCase__ , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_a : List[Any] = output.images
_a : List[str] = image[0, -3:, -3:, -1]
_a : int = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _lowercase ( self : Union[str, Any] ) -> List[Any]:
_a : str = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
_a : Optional[int] = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_a : Union[str, Any] = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
_a : Tuple = 1044355234
_a : Any = 12
_a : int = torch.manual_seed(UpperCAmelCase__ )
_a : Union[str, Any] = sd_pipe(
[prompt] , generator=UpperCAmelCase__ , guidance_scale=UpperCAmelCase__ , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
_a : Optional[int] = output.images
_a : Union[str, Any] = image[0, -3:, -3:, -1]
_a : List[str] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
_a : str = torch.manual_seed(UpperCAmelCase__ )
_a : List[str] = sd_pipe(
[prompt] , generator=UpperCAmelCase__ , guidance_scale=UpperCAmelCase__ , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_a : Dict = output.images
_a : Union[str, Any] = image[0, -3:, -3:, -1]
_a : List[Any] = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 350
|
"""simple docstring"""
_snake_case = 8.31_44_62 # Unit - J mol-1 K-1
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 324
| 0
|
"""simple docstring"""
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'):
_snake_case = True
from torch.cuda.amp import autocast
_snake_case = logging.getLogger(__name__)
def lowerCAmelCase__ ( UpperCamelCase__=None , UpperCamelCase__=None ):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=UpperCamelCase__ )
@dataclass
class UpperCamelCase :
UpperCamelCase : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCamelCase : Optional[bool] = field(
default=snake_case_ , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
UpperCamelCase : Optional[float] = field(
default=0.1 , metadata={'''help''': '''The dropout ratio for the attention probabilities.'''} )
UpperCamelCase : Optional[float] = field(
default=0.1 , metadata={'''help''': '''The dropout ratio for activations inside the fully connected layer.'''} )
UpperCamelCase : Optional[float] = field(
default=0.1 , metadata={
'''help''': '''The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.'''
} , )
UpperCamelCase : Optional[float] = field(
default=0.1 , metadata={'''help''': '''The dropout probabilitiy for all 1D convolutional layers in feature extractor.'''} , )
UpperCamelCase : Optional[float] = field(
default=0.0_5 , metadata={
'''help''': (
'''Propability of each feature vector along the time axis to be chosen as the start of the vector'''
'''span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature'''
'''vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``.'''
)
} , )
UpperCamelCase : Optional[float] = field(default=0.0 , metadata={'''help''': '''The LayerDrop probability.'''} )
@dataclass
class UpperCamelCase :
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
UpperCamelCase : Optional[str] = field(
default='''train+validation''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
UpperCamelCase : bool = field(
default=snake_case_ , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
UpperCamelCase : Optional[int] = field(
default=snake_case_ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
UpperCamelCase : Optional[int] = field(
default=snake_case_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCamelCase : Optional[int] = field(
default=snake_case_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of validation examples to this '''
'''value if set.'''
)
} , )
UpperCamelCase : List[str] = list_field(
default=[''',''', '''?''', '''.''', '''!''', '''-''', ''';''', ''':''', '''""''', '''%''', '''\'''', '''"''', '''�'''] , metadata={'''help''': '''A list of characters to remove from the transcripts.'''} , )
@dataclass
class UpperCamelCase :
UpperCamelCase : WavaVecaProcessor
UpperCamelCase : Union[bool, str] = True
UpperCamelCase : Optional[int] = None
UpperCamelCase : Optional[int] = None
UpperCamelCase : Optional[int] = None
UpperCamelCase : Optional[int] = None
def __call__( self : List[Any] , UpperCAmelCase__ : List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Dict[str, torch.Tensor]:
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
_a : Union[str, Any] = [{"""input_values""": feature["""input_values"""]} for feature in features]
_a : str = [{"""input_ids""": feature["""labels"""]} for feature in features]
_a : Optional[Any] = self.processor.pad(
UpperCAmelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
_a : List[str] = self.processor.pad(
labels=UpperCAmelCase__ , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors="""pt""" , )
# replace padding with -100 to ignore loss correctly
_a : Union[str, Any] = labels_batch["""input_ids"""].masked_fill(labels_batch.attention_mask.ne(1 ) , -100 )
_a : Any = labels
return batch
class UpperCamelCase ( snake_case_ ):
def _lowercase ( self : Any , UpperCAmelCase__ : nn.Module , UpperCAmelCase__ : Dict[str, Union[torch.Tensor, Any]] ) -> torch.Tensor:
model.train()
_a : Optional[Any] = self._prepare_inputs(UpperCAmelCase__ )
if self.use_amp:
with autocast():
_a : Optional[int] = self.compute_loss(UpperCAmelCase__ , UpperCAmelCase__ )
else:
_a : Union[str, Any] = self.compute_loss(UpperCAmelCase__ , UpperCAmelCase__ )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
_a : List[str] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_a : List[Any] = loss.sum() / (inputs["""labels"""] >= 0).sum()
else:
raise ValueError(f"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
_a : Any = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(UpperCAmelCase__ ).backward()
elif self.use_apex:
with amp.scale_loss(UpperCAmelCase__ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(UpperCAmelCase__ )
else:
loss.backward()
return loss.detach()
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_a : List[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_a : List[str] = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
_a : Union[str, Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_a : Union[str, Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" , UpperCamelCase__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
_a : str = datasets.load_dataset(
"""common_voice""" , data_args.dataset_config_name , split=data_args.train_split_name )
_a : int = datasets.load_dataset("""common_voice""" , data_args.dataset_config_name , split="""test""" )
# Create and save tokenizer
_a : str = F"""[{''.join(data_args.chars_to_ignore )}]"""
def remove_special_characters(UpperCamelCase__ ):
_a : int = re.sub(UpperCamelCase__ , """""" , batch["""sentence"""] ).lower() + """ """
return batch
_a : List[Any] = train_dataset.map(UpperCamelCase__ , remove_columns=["""sentence"""] )
_a : Union[str, Any] = eval_dataset.map(UpperCamelCase__ , remove_columns=["""sentence"""] )
def extract_all_chars(UpperCamelCase__ ):
_a : Any = """ """.join(batch["""text"""] )
_a : List[Any] = list(set(UpperCamelCase__ ) )
return {"vocab": [vocab], "all_text": [all_text]}
_a : Optional[Any] = train_dataset.map(
UpperCamelCase__ , batched=UpperCamelCase__ , batch_size=-1 , keep_in_memory=UpperCamelCase__ , remove_columns=train_dataset.column_names , )
_a : Tuple = train_dataset.map(
UpperCamelCase__ , batched=UpperCamelCase__ , batch_size=-1 , keep_in_memory=UpperCamelCase__ , remove_columns=eval_dataset.column_names , )
_a : Tuple = list(set(vocab_train["""vocab"""][0] ) | set(vocab_test["""vocab"""][0] ) )
_a : str = {v: k for k, v in enumerate(UpperCamelCase__ )}
_a : Dict = vocab_dict[""" """]
del vocab_dict[" "]
_a : Any = len(UpperCamelCase__ )
_a : Union[str, Any] = len(UpperCamelCase__ )
with open("""vocab.json""" , """w""" ) as vocab_file:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_a : Union[str, Any] = WavaVecaCTCTokenizer(
"""vocab.json""" , unk_token="""[UNK]""" , pad_token="""[PAD]""" , word_delimiter_token="""|""" , )
_a : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0.0 , do_normalize=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ )
_a : Dict = WavaVecaProcessor(feature_extractor=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
_a : Any = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction="""mean""" , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
_a : Any = min(len(UpperCamelCase__ ) , data_args.max_train_samples )
_a : Optional[int] = train_dataset.select(range(UpperCamelCase__ ) )
if data_args.max_val_samples is not None:
_a : Dict = eval_dataset.select(range(data_args.max_val_samples ) )
_a : List[Any] = torchaudio.transforms.Resample(4_8_0_0_0 , 1_6_0_0_0 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(UpperCamelCase__ ):
_a : str = torchaudio.load(batch["""path"""] )
_a : Tuple = resampler(UpperCamelCase__ ).squeeze().numpy()
_a : List[Any] = 1_6_0_0_0
_a : Optional[int] = batch["""text"""]
return batch
_a : Tuple = train_dataset.map(
UpperCamelCase__ , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
_a : int = eval_dataset.map(
UpperCamelCase__ , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(UpperCamelCase__ ):
# check that all files have the correct sampling rate
assert (
len(set(batch["""sampling_rate"""] ) ) == 1
), F"""Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."""
_a : str = processor(
audio=batch["""speech"""] , text=batch["""target_text"""] , sampling_rate=batch["""sampling_rate"""][0] )
batch.update(UpperCamelCase__ )
return batch
_a : Optional[int] = train_dataset.map(
UpperCamelCase__ , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , )
_a : str = eval_dataset.map(
UpperCamelCase__ , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , )
# Metric
_a : str = datasets.load_metric("""wer""" )
def compute_metrics(UpperCamelCase__ ):
_a : List[Any] = pred.predictions
_a : int = np.argmax(UpperCamelCase__ , axis=-1 )
_a : Any = processor.tokenizer.pad_token_id
_a : List[Any] = processor.batch_decode(UpperCamelCase__ )
# we do not want to group tokens when computing the metrics
_a : int = processor.batch_decode(pred.label_ids , group_tokens=UpperCamelCase__ )
_a : Any = wer_metric.compute(predictions=UpperCamelCase__ , references=UpperCamelCase__ )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
_a : Optional[Any] = DataCollatorCTCWithPadding(processor=UpperCamelCase__ , padding=UpperCamelCase__ )
# Initialize our Trainer
_a : Any = CTCTrainer(
model=UpperCamelCase__ , data_collator=UpperCamelCase__ , args=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
_a : Any = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
_a : Tuple = model_args.model_name_or_path
else:
_a : List[str] = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
_a : Optional[Any] = trainer.train(resume_from_checkpoint=UpperCamelCase__ )
trainer.save_model()
_a : Optional[int] = train_result.metrics
_a : List[str] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCamelCase__ )
)
_a : Optional[int] = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.log_metrics("""train""" , UpperCamelCase__ )
trainer.save_metrics("""train""" , UpperCamelCase__ )
trainer.save_state()
# Evaluation
_a : List[Any] = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_a : str = trainer.evaluate()
_a : Optional[int] = data_args.max_val_samples if data_args.max_val_samples is not None else len(UpperCamelCase__ )
_a : Dict = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.log_metrics("""eval""" , UpperCamelCase__ )
trainer.save_metrics("""eval""" , UpperCamelCase__ )
return results
if __name__ == "__main__":
main()
| 351
|
"""simple docstring"""
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
_snake_case = logging.getLogger(__name__)
_snake_case = 'pytorch_model.bin'
@dataclasses.dataclass
class UpperCamelCase :
UpperCamelCase : str = dataclasses.field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models.'''} )
UpperCamelCase : Optional[str] = dataclasses.field(
default=snake_case_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co.'''} , )
@dataclasses.dataclass
class UpperCamelCase :
UpperCamelCase : str = dataclasses.field(metadata={'''help''': '''A csv or a json file containing the training data.'''} )
UpperCamelCase : str = dataclasses.field(metadata={'''help''': '''A csv or a json file containing the data to predict on.'''} )
UpperCamelCase : Optional[str] = dataclasses.field(
default=snake_case_ , metadata={'''help''': '''A csv or a json file containing the validation data.'''} )
UpperCamelCase : Optional[str] = dataclasses.field(
default=snake_case_ , metadata={'''help''': '''The name of the task to train on.'''} , )
UpperCamelCase : Optional[List[str]] = dataclasses.field(
default=snake_case_ , metadata={'''help''': '''The list of labels for the task.'''} )
@dataclasses.dataclass
class UpperCamelCase :
UpperCamelCase : str = dataclasses.field(
metadata={'''help''': '''The output directory where the model predictions and checkpoints will be written.'''} )
UpperCamelCase : Optional[str] = dataclasses.field(
default='''accuracy''' , metadata={'''help''': '''The evaluation metric used for the task.'''} )
UpperCamelCase : Optional[str] = dataclasses.field(
default='''no''' , metadata={
'''help''': '''The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]'''
} , )
UpperCamelCase : Optional[int] = dataclasses.field(
default=10 , metadata={'''help''': '''Number of evaluation calls with no improvement after which training will be stopped.'''} , )
UpperCamelCase : Optional[float] = dataclasses.field(
default=0.0 , metadata={
'''help''': '''How much the specified evaluation metric must improve to satisfy early stopping conditions.'''
} , )
UpperCamelCase : Optional[bool] = dataclasses.field(
default=snake_case_ , metadata={'''help''': '''Whether to filter the pseudo-labeled data based on the confidence score.'''} , )
UpperCamelCase : Optional[bool] = dataclasses.field(
default=snake_case_ , metadata={'''help''': '''Whether to filter the pseudo-labeled data based on the validation performance.'''} , )
UpperCamelCase : Optional[bool] = dataclasses.field(
default=snake_case_ , metadata={'''help''': '''Whether to fine-tune on labeled data after pseudo training.'''} , )
UpperCamelCase : Optional[float] = dataclasses.field(
default=0.0 , metadata={'''help''': '''Confidence threshold for pseudo-labeled data filtering.'''} , )
UpperCamelCase : Optional[int] = dataclasses.field(
default=100 , metadata={'''help''': '''Number of evaluation calls with no improvement after which training will be stopped.'''} , )
UpperCamelCase : Optional[int] = dataclasses.field(
default=snake_case_ , metadata={'''help''': '''Random seed for initialization.'''} , )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Optional[int] = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
_a : Union[str, Any] = dataset.filter(lambda UpperCamelCase__ : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
_a : Any = int(eval_result * len(UpperCamelCase__ ) )
print(UpperCamelCase__ )
_a : str = dataset.sort("""probability""" , reverse=UpperCamelCase__ )
_a : Any = dataset.select(range(UpperCamelCase__ ) )
_a : Tuple = dataset.remove_columns(["""label""", """probability"""] )
_a : Optional[Any] = dataset.rename_column("""prediction""" , """label""" )
_a : Dict = dataset.map(lambda UpperCamelCase__ : {"label": idalabel[example["label"]]} )
_a : Union[str, Any] = dataset.shuffle(seed=args.seed )
_a : Optional[int] = os.path.join(UpperCamelCase__ , F"""train_pseudo.{args.data_file_extension}""" )
if args.data_file_extension == "csv":
dataset.to_csv(UpperCamelCase__ , index=UpperCamelCase__ )
else:
dataset.to_json(UpperCamelCase__ )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
_a : Optional[int] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
_a : Dict = STModelArguments(model_name_or_path=UpperCamelCase__ )
_a : Union[str, Any] = STDataArguments(train_file=UpperCamelCase__ , infer_file=UpperCamelCase__ )
_a : Any = STTrainingArguments(output_dir=UpperCamelCase__ )
_a : Any = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(UpperCamelCase__ ).items():
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for key, value in kwargs.items():
if hasattr(UpperCamelCase__ , UpperCamelCase__ ):
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Sanity checks
_a : Union[str, Any] = {}
_a : Tuple = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
_a : int = args.train_file
_a : List[Any] = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
_a : Union[str, Any] = args.eval_file
for key in data_files:
_a : Optional[Any] = data_files[key].split(""".""" )[-1]
assert extension in ["csv", "json"], F"""`{key}_file` should be a csv or a json file."""
if args.data_file_extension is None:
_a : str = extension
else:
assert extension == args.data_file_extension, F"""`{key}_file` should be a {args.data_file_extension} file`."""
assert (
args.eval_metric in datasets.list_metrics()
), F"""{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}."""
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info("""Creating the initial data directory for self-training...""" )
_a : Tuple = F"""{args.output_dir}/self-train_iter-{{}}""".format
_a : Dict = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=UpperCamelCase__ )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
accelerator.wait_for_everyone()
_a : str = None
_a : int = None
_a : str = 0
_a : List[Any] = False
# Show the progress bar
_a : List[Any] = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
_a : Union[str, Any] = data_dir_format(UpperCamelCase__ )
assert os.path.exists(UpperCamelCase__ )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
_a : str = os.path.join(UpperCamelCase__ , """stage-1""" )
_a : Tuple = {
"""accelerator""": accelerator,
"""model_name_or_path""": args.model_name_or_path,
"""cache_dir""": args.cache_dir,
"""do_train""": True,
"""train_file""": data_files["""train"""] if iteration == 0 else data_files["""train_pseudo"""],
"""do_eval""": True if args.eval_file is not None else False,
"""eval_file""": data_files["""eval"""],
"""do_predict""": True,
"""infer_file""": data_files["""infer"""],
"""task_name""": args.task_name,
"""label_list""": args.label_list,
"""output_dir""": current_output_dir,
"""eval_metric""": args.eval_metric,
"""evaluation_strategy""": args.evaluation_strategy,
"""early_stopping_patience""": args.early_stopping_patience,
"""early_stopping_threshold""": args.early_stopping_threshold,
"""seed""": args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(UpperCamelCase__ , UpperCamelCase__ ):
arguments_dict.update({key: value} )
_a : int = os.path.join(UpperCamelCase__ , """best-checkpoint""" , UpperCamelCase__ )
if os.path.exists(UpperCamelCase__ ):
logger.info(
"""Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.""" , UpperCamelCase__ , UpperCamelCase__ , )
else:
logger.info("""***** Running self-training: iteration: %d, stage: 1 *****""" , UpperCamelCase__ )
finetune(**UpperCamelCase__ )
accelerator.wait_for_everyone()
assert os.path.exists(UpperCamelCase__ )
logger.info("""Self-training job completed: iteration: %d, stage: 1.""" , UpperCamelCase__ )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
_a : Dict = os.path.join(UpperCamelCase__ , """best-checkpoint""" )
_a : List[str] = os.path.join(UpperCamelCase__ , """stage-2""" )
# Update arguments_dict
_a : int = model_path
_a : Dict = data_files["""train"""]
_a : int = current_output_dir
_a : Any = os.path.join(UpperCamelCase__ , """best-checkpoint""" , UpperCamelCase__ )
if os.path.exists(UpperCamelCase__ ):
logger.info(
"""Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.""" , UpperCamelCase__ , UpperCamelCase__ , )
else:
logger.info("""***** Running self-training: iteration: %d, stage: 2 *****""" , UpperCamelCase__ )
finetune(**UpperCamelCase__ )
accelerator.wait_for_everyone()
assert os.path.exists(UpperCamelCase__ )
logger.info("""Self-training job completed: iteration: %d, stage: 2.""" , UpperCamelCase__ )
_a : List[Any] = iteration
_a : int = data_dir_format(iteration + 1 )
_a : Dict = AutoConfig.from_pretrained(os.path.join(UpperCamelCase__ , """best-checkpoint""" ) )
_a : Union[str, Any] = config.idalabel
_a : Any = os.path.join(UpperCamelCase__ , """eval_results_best-checkpoint.json""" )
_a : Any = os.path.join(UpperCamelCase__ , """test_results_best-checkpoint.json""" )
assert os.path.exists(UpperCamelCase__ )
with open(UpperCamelCase__ , """r""" ) as f:
_a : Tuple = float(json.load(UpperCamelCase__ )[args.eval_metric] )
_a : Dict = os.path.join(UpperCamelCase__ , """infer_output_best-checkpoint.csv""" )
assert os.path.exists(UpperCamelCase__ )
# Loading the dataset from local csv or json files.
_a : List[Any] = load_dataset(args.data_file_extension , data_files={"""data""": data_files["""infer"""]} )["""data"""]
_a : Any = load_dataset("""csv""" , data_files={"""data""": infer_output_file} )["""data"""]
if accelerator.is_main_process:
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
shutil.copy(UpperCamelCase__ , os.path.join(UpperCamelCase__ , F"""eval_results_iter-{iteration}.json""" ) )
if os.path.exists(UpperCamelCase__ ):
shutil.copy(UpperCamelCase__ , os.path.join(UpperCamelCase__ , F"""test_results_iter-{iteration}.json""" ) )
create_pseudo_labeled_data(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
accelerator.wait_for_everyone()
_a : List[str] = os.path.join(UpperCamelCase__ , F"""train_pseudo.{args.data_file_extension}""" )
if args.evaluation_strategy != IntervalStrategy.NO.value:
_a : Any = eval_result
if best_iteration is None:
_a : Union[str, Any] = new_iteration
_a : str = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
_a : Union[str, Any] = new_iteration
_a : List[str] = new_eval_result
_a : Optional[Any] = 0
else:
if new_eval_result == best_eval_result:
_a : Tuple = new_iteration
_a : List[Any] = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
_a : Union[str, Any] = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info("""Best iteration: %d""" , UpperCamelCase__ )
logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , UpperCamelCase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(UpperCamelCase__ , F"""eval_results_iter-{iteration}.json""" ) , os.path.join(UpperCamelCase__ , """eval_results_best-iteration.json""" ) , )
else:
# Assume that the last iteration is the best
logger.info("""Best iteration: %d""" , args.max_selftrain_iterations - 1 )
logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , UpperCamelCase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(UpperCamelCase__ , F"""eval_results_iter-{args.max_selftrain_iterations - 1}.json""" ) , os.path.join(UpperCamelCase__ , """eval_results_best-iteration.json""" ) , )
| 324
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
_snake_case = None
_snake_case = logging.get_logger(__name__)
_snake_case = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
_snake_case = {
'vocab_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model',
},
'tokenizer_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json',
},
}
_snake_case = {
'google/fnet-base': 512,
'google/fnet-large': 512,
}
_snake_case = '▁'
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : Optional[Any] = VOCAB_FILES_NAMES
UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : str = ['''input_ids''', '''token_type_ids''']
UpperCamelCase : List[str] = FNetTokenizer
def __init__( self : Dict , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Optional[int]=False , UpperCAmelCase__ : int=True , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : List[Any]="<unk>" , UpperCAmelCase__ : Optional[Any]="[SEP]" , UpperCAmelCase__ : Optional[Any]="<pad>" , UpperCAmelCase__ : List[Any]="[CLS]" , UpperCAmelCase__ : List[Any]="[MASK]" , **UpperCAmelCase__ : Union[str, Any] , ) -> Optional[Any]:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_a : List[Any] = (
AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ , normalized=UpperCAmelCase__ )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
else mask_token
)
super().__init__(
UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , remove_space=UpperCAmelCase__ , keep_accents=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , **UpperCAmelCase__ , )
_a : List[str] = do_lower_case
_a : List[Any] = remove_space
_a : List[str] = keep_accents
_a : List[Any] = vocab_file
_a : Optional[Any] = False if not self.vocab_file else True
def _lowercase ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
_a : int = [self.sep_token_id]
_a : Any = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowercase ( self : Tuple , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
_a : Any = [self.sep_token_id]
_a : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a : Any = os.path.join(
UpperCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ):
copyfile(self.vocab_file , UpperCAmelCase__ )
return (out_vocab_file,)
| 352
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
_snake_case = None
_snake_case = logging.get_logger(__name__)
_snake_case = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
_snake_case = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/tokenizer.json',
},
}
_snake_case = {
'camembert-base': 512,
}
_snake_case = '▁'
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : Any = VOCAB_FILES_NAMES
UpperCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : Dict = ['''input_ids''', '''attention_mask''']
UpperCamelCase : Optional[Any] = CamembertTokenizer
def __init__( self : int , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Optional[Any]="<s>" , UpperCAmelCase__ : Optional[int]="</s>" , UpperCAmelCase__ : Tuple="</s>" , UpperCAmelCase__ : Tuple="<s>" , UpperCAmelCase__ : Tuple="<unk>" , UpperCAmelCase__ : Tuple="<pad>" , UpperCAmelCase__ : int="<mask>" , UpperCAmelCase__ : Optional[int]=["<s>NOTUSED", "</s>NOTUSED"] , **UpperCAmelCase__ : Optional[Any] , ) -> Union[str, Any]:
# Mask token behave like a normal word, i.e. include the space before it
_a : List[Any] = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else mask_token
super().__init__(
UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , **UpperCAmelCase__ , )
_a : int = vocab_file
_a : int = False if not self.vocab_file else True
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_a : List[Any] = [self.cls_token_id]
_a : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
_a : Union[str, Any] = [self.sep_token_id]
_a : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a : List[str] = os.path.join(
UpperCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ):
copyfile(self.vocab_file , UpperCAmelCase__ )
return (out_vocab_file,)
| 324
| 0
|
"""simple docstring"""
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Optional[Any] = checkpoint
_a : List[Any] = {}
_a : Tuple = vae_state_dict["""encoder.conv_in.weight"""]
_a : str = vae_state_dict["""encoder.conv_in.bias"""]
_a : Tuple = vae_state_dict["""encoder.conv_out.weight"""]
_a : Dict = vae_state_dict["""encoder.conv_out.bias"""]
_a : List[Any] = vae_state_dict["""encoder.norm_out.weight"""]
_a : Dict = vae_state_dict["""encoder.norm_out.bias"""]
_a : Tuple = vae_state_dict["""decoder.conv_in.weight"""]
_a : Union[str, Any] = vae_state_dict["""decoder.conv_in.bias"""]
_a : Any = vae_state_dict["""decoder.conv_out.weight"""]
_a : Optional[Any] = vae_state_dict["""decoder.conv_out.bias"""]
_a : Any = vae_state_dict["""decoder.norm_out.weight"""]
_a : str = vae_state_dict["""decoder.norm_out.bias"""]
_a : Any = vae_state_dict["""quant_conv.weight"""]
_a : Optional[Any] = vae_state_dict["""quant_conv.bias"""]
_a : Union[str, Any] = vae_state_dict["""post_quant_conv.weight"""]
_a : Dict = vae_state_dict["""post_quant_conv.bias"""]
# Retrieves the keys for the encoder down blocks only
_a : int = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """encoder.down""" in layer} )
_a : Tuple = {
layer_id: [key for key in vae_state_dict if F"""down.{layer_id}""" in key] for layer_id in range(UpperCamelCase__ )
}
# Retrieves the keys for the decoder up blocks only
_a : Any = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """decoder.up""" in layer} )
_a : Optional[Any] = {
layer_id: [key for key in vae_state_dict if F"""up.{layer_id}""" in key] for layer_id in range(UpperCamelCase__ )
}
for i in range(UpperCamelCase__ ):
_a : str = [key for key in down_blocks[i] if F"""down.{i}""" in key and F"""down.{i}.downsample""" not in key]
if F"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
_a : Any = vae_state_dict.pop(
F"""encoder.down.{i}.downsample.conv.weight""" )
_a : Union[str, Any] = vae_state_dict.pop(
F"""encoder.down.{i}.downsample.conv.bias""" )
_a : Any = renew_vae_resnet_paths(UpperCamelCase__ )
_a : Any = {"""old""": F"""down.{i}.block""", """new""": F"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , additional_replacements=[meta_path] , config=UpperCamelCase__ )
_a : Dict = [key for key in vae_state_dict if """encoder.mid.block""" in key]
_a : Optional[int] = 2
for i in range(1 , num_mid_res_blocks + 1 ):
_a : str = [key for key in mid_resnets if F"""encoder.mid.block_{i}""" in key]
_a : Tuple = renew_vae_resnet_paths(UpperCamelCase__ )
_a : Any = {"""old""": F"""mid.block_{i}""", """new""": F"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , additional_replacements=[meta_path] , config=UpperCamelCase__ )
_a : Optional[int] = [key for key in vae_state_dict if """encoder.mid.attn""" in key]
_a : str = renew_vae_attention_paths(UpperCamelCase__ )
_a : List[Any] = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""}
assign_to_checkpoint(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , additional_replacements=[meta_path] , config=UpperCamelCase__ )
conv_attn_to_linear(UpperCamelCase__ )
for i in range(UpperCamelCase__ ):
_a : Union[str, Any] = num_up_blocks - 1 - i
_a : Union[str, Any] = [
key for key in up_blocks[block_id] if F"""up.{block_id}""" in key and F"""up.{block_id}.upsample""" not in key
]
if F"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
_a : Union[str, Any] = vae_state_dict[
F"""decoder.up.{block_id}.upsample.conv.weight"""
]
_a : Optional[int] = vae_state_dict[
F"""decoder.up.{block_id}.upsample.conv.bias"""
]
_a : int = renew_vae_resnet_paths(UpperCamelCase__ )
_a : int = {"""old""": F"""up.{block_id}.block""", """new""": F"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , additional_replacements=[meta_path] , config=UpperCamelCase__ )
_a : int = [key for key in vae_state_dict if """decoder.mid.block""" in key]
_a : List[Any] = 2
for i in range(1 , num_mid_res_blocks + 1 ):
_a : int = [key for key in mid_resnets if F"""decoder.mid.block_{i}""" in key]
_a : Dict = renew_vae_resnet_paths(UpperCamelCase__ )
_a : List[str] = {"""old""": F"""mid.block_{i}""", """new""": F"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , additional_replacements=[meta_path] , config=UpperCamelCase__ )
_a : Any = [key for key in vae_state_dict if """decoder.mid.attn""" in key]
_a : str = renew_vae_attention_paths(UpperCamelCase__ )
_a : str = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""}
assign_to_checkpoint(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , additional_replacements=[meta_path] , config=UpperCamelCase__ )
conv_attn_to_linear(UpperCamelCase__ )
return new_checkpoint
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , ):
'''simple docstring'''
_a : List[Any] = requests.get(
""" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml""" )
_a : Dict = io.BytesIO(r.content )
_a : Dict = OmegaConf.load(UpperCamelCase__ )
_a : Union[str, Any] = 5_1_2
_a : Tuple = """cuda""" if torch.cuda.is_available() else """cpu"""
if checkpoint_path.endswith("""safetensors""" ):
from safetensors import safe_open
_a : str = {}
with safe_open(UpperCamelCase__ , framework="""pt""" , device="""cpu""" ) as f:
for key in f.keys():
_a : List[Any] = f.get_tensor(UpperCamelCase__ )
else:
_a : str = torch.load(UpperCamelCase__ , map_location=UpperCamelCase__ )["""state_dict"""]
# Convert the VAE model.
_a : List[Any] = create_vae_diffusers_config(UpperCamelCase__ , image_size=UpperCamelCase__ )
_a : Tuple = custom_convert_ldm_vae_checkpoint(UpperCamelCase__ , UpperCamelCase__ )
_a : Optional[Any] = AutoencoderKL(**UpperCamelCase__ )
vae.load_state_dict(UpperCamelCase__ )
vae.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
_snake_case = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 353
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_snake_case = logging.get_logger(__name__)
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : Dict = ['''pixel_values''']
def __init__( self : Any , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : int = 32 , UpperCAmelCase__ : Optional[Any]=PILImageResampling.BILINEAR , UpperCAmelCase__ : bool = True , **UpperCAmelCase__ : List[str] , ) -> None:
_a : int = do_resize
_a : Union[str, Any] = do_rescale
_a : Any = size_divisor
_a : Any = resample
super().__init__(**UpperCAmelCase__ )
def _lowercase ( self : Tuple , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[ChannelDimension] = None , **UpperCAmelCase__ : Optional[Any] ) -> np.ndarray:
_a , _a : Tuple = get_image_size(UpperCAmelCase__ )
# Rounds the height and width down to the closest multiple of size_divisor
_a : Optional[Any] = height // size_divisor * size_divisor
_a : Union[str, Any] = width // size_divisor * size_divisor
_a : Any = resize(UpperCAmelCase__ , (new_h, new_w) , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
return image
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : float , UpperCAmelCase__ : Optional[ChannelDimension] = None , **UpperCAmelCase__ : Optional[int] ) -> np.ndarray:
return rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[Union[TensorType, str]] = None , UpperCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase__ : int , ) -> BatchFeature:
_a : Dict = do_resize if do_resize is not None else self.do_resize
_a : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
_a : str = size_divisor if size_divisor is not None else self.size_divisor
_a : Any = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("""size_divisor is required for resizing""" )
_a : List[str] = make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError("""Invalid image(s)""" )
# All transformations expect numpy arrays.
_a : Tuple = [to_numpy_array(UpperCAmelCase__ ) for img in images]
if do_resize:
_a : Optional[int] = [self.resize(UpperCAmelCase__ , size_divisor=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images]
if do_rescale:
_a : str = [self.rescale(UpperCAmelCase__ , scale=1 / 255 ) for image in images]
_a : Any = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
_a : Optional[int] = {"""pixel_values""": images}
return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
| 324
| 0
|
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCamelCase ( snake_case_ ):
def __init__( self : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] ) -> List[str]:
super().__init__()
# make sure scheduler can always be converted to DDIM
_a : List[Any] = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
@torch.no_grad()
def __call__( self : int , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : int = 50 , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , UpperCAmelCase__ ):
_a : Optional[int] = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
_a : Optional[int] = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and len(UpperCAmelCase__ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(UpperCAmelCase__ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
_a : Any = randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(UpperCAmelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_a : List[str] = self.unet(UpperCAmelCase__ , UpperCAmelCase__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_a : List[str] = self.scheduler.step(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , eta=UpperCAmelCase__ , use_clipped_model_output=UpperCAmelCase__ , generator=UpperCAmelCase__ ).prev_sample
_a : str = (image / 2 + 0.5).clamp(0 , 1 )
_a : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_a : Optional[Any] = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase__ )
| 354
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCamelCase ( unittest.TestCase ):
@property
def _lowercase ( self : Optional[int] ) -> Union[str, Any]:
torch.manual_seed(0 )
_a : List[str] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def _lowercase ( self : Dict ) -> Dict:
_a : str = self.dummy_uncond_unet
_a : Optional[int] = KarrasVeScheduler()
_a : List[str] = KarrasVePipeline(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_a : int = torch.manual_seed(0 )
_a : List[Any] = pipe(num_inference_steps=2 , generator=UpperCAmelCase__ , output_type="""numpy""" ).images
_a : Tuple = torch.manual_seed(0 )
_a : int = pipe(num_inference_steps=2 , generator=UpperCAmelCase__ , output_type="""numpy""" , return_dict=UpperCAmelCase__ )[0]
_a : int = image[0, -3:, -3:, -1]
_a : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_a : str = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : Tuple ) -> List[str]:
_a : Optional[Any] = """google/ncsnpp-celebahq-256"""
_a : Any = UNetaDModel.from_pretrained(UpperCAmelCase__ )
_a : Dict = KarrasVeScheduler()
_a : int = KarrasVePipeline(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_a : Optional[int] = torch.manual_seed(0 )
_a : Tuple = pipe(num_inference_steps=20 , generator=UpperCAmelCase__ , output_type="""numpy""" ).images
_a : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_a : Optional[int] = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 324
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 355
|
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_snake_case = 16
_snake_case = 32
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ = 1_6 ):
'''simple docstring'''
_a : str = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_a : Dict = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(UpperCamelCase__ ):
# max_length=None => use the model max length (it's actually the default)
_a : Optional[int] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_a : Tuple = datasets.map(
UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_a : List[Any] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(UpperCamelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_a : Union[str, Any] = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_a : int = 1_6
elif accelerator.mixed_precision != "no":
_a : int = 8
else:
_a : str = None
return tokenizer.pad(
UpperCamelCase__ , padding="""longest""" , max_length=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
_a : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )
_a : List[str] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_snake_case = mocked_dataloaders # noqa: F811
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , UpperCamelCase__ ) == "1":
_a : str = 2
# Initialize accelerator
_a : int = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_a : Any = config["""lr"""]
_a : Union[str, Any] = int(config["""num_epochs"""] )
_a : str = int(config["""seed"""] )
_a : List[Any] = int(config["""batch_size"""] )
_a : Tuple = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
_a : Optional[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_a : Optional[Any] = batch_size // MAX_GPU_BATCH_SIZE
_a : str = MAX_GPU_BATCH_SIZE
set_seed(UpperCamelCase__ )
_a , _a : Optional[int] = get_dataloaders(UpperCamelCase__ , UpperCamelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_a : int = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=UpperCamelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_a : List[str] = model.to(accelerator.device )
# Instantiate optimizer
_a : List[str] = AdamW(params=model.parameters() , lr=UpperCamelCase__ )
# Instantiate scheduler
_a : List[str] = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(UpperCamelCase__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_a , _a , _a , _a , _a : Optional[Any] = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Now we train the model
for epoch in range(UpperCamelCase__ ):
model.train()
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_a : Optional[Any] = model(**UpperCamelCase__ )
_a : str = outputs.loss
_a : Optional[int] = loss / gradient_accumulation_steps
accelerator.backward(UpperCamelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
_a : Union[str, Any] = 0
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_a : Dict = model(**UpperCamelCase__ )
_a : Optional[Any] = outputs.logits.argmax(dim=-1 )
_a , _a : int = accelerator.gather((predictions, batch["""labels"""]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(UpperCamelCase__ ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
_a : str = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_a : int = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=UpperCamelCase__ , references=UpperCamelCase__ , )
_a : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , UpperCamelCase__ )
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : Tuple = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=UpperCamelCase__ , default=UpperCamelCase__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
_a : Optional[Any] = parser.parse_args()
_a : Tuple = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6}
training_function(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 324
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 356
|
"""simple docstring"""
import numpy as np
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 324
| 0
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
_snake_case = False
class UpperCamelCase ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : int ) -> List[Any]:
_a : Optional[int] = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_a : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
_a : Tuple = torch.manual_seed(0 )
_a : Union[str, Any] = pipe(
image=UpperCAmelCase__ , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
_a : int = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_a : int = np.array([0.0_4_4_1, 0.0_4_6_9, 0.0_5_0_7, 0.0_5_7_5, 0.0_6_3_2, 0.0_6_5_0, 0.0_8_6_5, 0.0_9_0_9, 0.0_9_4_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 357
|
"""simple docstring"""
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir('fixtures/test_sentencepiece.model')
_snake_case = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
_snake_case = 'pt' if is_torch_available() else 'tf'
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( snake_case_ , unittest.TestCase ):
UpperCamelCase : str = CamembertTokenizer
UpperCamelCase : List[Any] = CamembertTokenizerFast
UpperCamelCase : Optional[int] = True
UpperCamelCase : Union[str, Any] = True
def _lowercase ( self : List[Any] ) -> Union[str, Any]:
super().setUp()
# We have a SentencePiece fixture for testing
_a : List[Any] = CamembertTokenizer(UpperCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self : List[str] ) -> Tuple:
_a : Optional[Any] = """<pad>"""
_a : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ )
def _lowercase ( self : Union[str, Any] ) -> str:
_a : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(UpperCAmelCase__ ) , 1004 )
def _lowercase ( self : List[str] ) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1005 )
def _lowercase ( self : Union[str, Any] ) -> str:
_a : Tuple = CamembertTokenizer(UpperCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
_a : List[Any] = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
_a : Any = """I was born in 92000, and this is falsé."""
_a : Union[str, Any] = tokenizer.encode(UpperCAmelCase__ )
_a : Dict = rust_tokenizer.encode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Tuple = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
_a : List[Any] = rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
_a : List[str] = tokenizer.convert_ids_to_tokens(UpperCAmelCase__ )
_a : int = rust_tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def _lowercase ( self : Dict ) -> List[str]:
if not self.test_rust_tokenizer:
return
_a : Optional[int] = self.get_tokenizer()
_a : Tuple = self.get_rust_tokenizer()
_a : List[Any] = """I was born in 92000, and this is falsé."""
_a : List[str] = tokenizer.tokenize(UpperCAmelCase__ )
_a : Union[str, Any] = rust_tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
_a : int = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
_a : Optional[int] = rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
_a : int = self.get_rust_tokenizer()
_a : Optional[Any] = tokenizer.encode(UpperCAmelCase__ )
_a : Dict = rust_tokenizer.encode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def _lowercase ( self : Tuple ) -> List[Any]:
# fmt: off
_a : Dict = {"""input_ids""": [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 27575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 22804, 18818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 10326, 24, 2267, 20, 416, 5072, 15612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
_a : Union[str, Any] = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=UpperCAmelCase__ , )
| 324
| 0
|
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Optional[int] = 0
_a : List[Any] = len(UpperCamelCase__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
_a : Any = i + 1
else:
_a : List[str] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{two_pointer([2, 7, 11, 15], 9) = }''')
| 358
|
"""simple docstring"""
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
_snake_case = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
_snake_case = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
_snake_case = re.compile(r'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
_snake_case = re.compile(r'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_snake_case = re.compile(r'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
_snake_case = [
('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'),
('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'),
('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'),
('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'),
('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'),
('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'),
('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'),
('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'),
('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'),
(
'zero-shot-object-detection',
'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES',
'AutoModelForZeroShotObjectDetection',
),
('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'),
('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'),
('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'),
('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'),
(
'table-question-answering',
'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForTableQuestionAnswering',
),
('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'),
('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'),
(
'next-sentence-prediction',
'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES',
'AutoModelForNextSentencePrediction',
),
(
'audio-frame-classification',
'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForAudioFrameClassification',
),
('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'),
(
'document-question-answering',
'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForDocumentQuestionAnswering',
),
(
'visual-question-answering',
'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForVisualQuestionAnswering',
),
('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'),
(
'zero-shot-image-classification',
'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForZeroShotImageClassification',
),
('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'),
('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'),
('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'),
]
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : Dict = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , UpperCamelCase__ )
return [m.group(0 ) for m in matches]
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : Tuple = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_a : Optional[int] = {
config.replace("""Config""" , """""" ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
_a : List[Any] = collections.defaultdict(UpperCamelCase__ )
_a : List[str] = collections.defaultdict(UpperCamelCase__ )
_a : Tuple = collections.defaultdict(UpperCamelCase__ )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(UpperCamelCase__ ):
_a : str = None
if _re_tf_models.match(UpperCamelCase__ ) is not None:
_a : List[Any] = tf_models
_a : int = _re_tf_models.match(UpperCamelCase__ ).groups()[0]
elif _re_flax_models.match(UpperCamelCase__ ) is not None:
_a : Any = flax_models
_a : Any = _re_flax_models.match(UpperCamelCase__ ).groups()[0]
elif _re_pt_models.match(UpperCamelCase__ ) is not None:
_a : int = pt_models
_a : int = _re_pt_models.match(UpperCamelCase__ ).groups()[0]
if lookup_dict is not None:
while len(UpperCamelCase__ ) > 0:
if attr_name in model_prefix_to_model_type:
_a : Optional[int] = True
break
# Try again after removing the last word in the name
_a : List[Any] = """""".join(camel_case_split(UpperCamelCase__ )[:-1] )
_a : Optional[int] = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
_a : Dict = list(UpperCamelCase__ )
all_models.sort()
_a : str = {"""model_type""": all_models}
_a : List[Any] = [pt_models[t] for t in all_models]
_a : str = [tf_models[t] for t in all_models]
_a : Optional[int] = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
_a : str = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
_a : List[str] = """AutoProcessor"""
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
_a : str = """AutoTokenizer"""
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
_a : int = """AutoFeatureExtractor"""
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
_a : int = """AutoTokenizer"""
_a : Any = [processors[t] for t in all_models]
return pd.DataFrame(UpperCamelCase__ )
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : List[Any] = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
_a : List[Any] = [model_mapping, F"""TF_{model_mapping}""", F"""FLAX_{model_mapping}"""]
_a : Union[str, Any] = [auto_class, F"""TF_{auto_class}""", F"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
# The type of pipeline may not exist in this framework
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ):
continue
# First extract all model_names
_a : str = []
for name in getattr(UpperCamelCase__ , UpperCamelCase__ ).values():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
model_names.append(UpperCamelCase__ )
else:
model_names.extend(list(UpperCamelCase__ ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Dict = get_frameworks_table()
_a : Optional[Any] = Dataset.from_pandas(UpperCamelCase__ )
_a : Any = hf_hub_download(
"""huggingface/transformers-metadata""" , """pipeline_tags.json""" , repo_type="""dataset""" , token=UpperCamelCase__ )
_a : List[Any] = Dataset.from_json(UpperCamelCase__ )
_a : List[str] = {
tags_dataset[i]["""model_class"""]: (tags_dataset[i]["""pipeline_tag"""], tags_dataset[i]["""auto_class"""])
for i in range(len(UpperCamelCase__ ) )
}
_a : str = update_pipeline_and_auto_class_table(UpperCamelCase__ )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
_a : int = sorted(table.keys() )
_a : Union[str, Any] = pd.DataFrame(
{
"""model_class""": model_classes,
"""pipeline_tag""": [table[m][0] for m in model_classes],
"""auto_class""": [table[m][1] for m in model_classes],
} )
_a : Dict = Dataset.from_pandas(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(UpperCamelCase__ , """frameworks.json""" ) )
tags_dataset.to_json(os.path.join(UpperCamelCase__ , """pipeline_tags.json""" ) )
if commit_sha is not None:
_a : List[str] = (
F"""Update with commit {commit_sha}\n\nSee: """
F"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
_a : Optional[Any] = """Update"""
upload_folder(
repo_id="""huggingface/transformers-metadata""" , folder_path=UpperCamelCase__ , repo_type="""dataset""" , token=UpperCamelCase__ , commit_message=UpperCamelCase__ , )
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : List[str] = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
_a : Any = transformers_module.pipelines.SUPPORTED_TASKS
_a : List[str] = []
for key in pipeline_tasks:
if key not in in_table:
_a : Tuple = pipeline_tasks[key]["""pt"""]
if isinstance(UpperCamelCase__ , (list, tuple) ):
_a : Dict = model[0]
_a : List[str] = model.__name__
if model not in in_table.values():
missing.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
_a : Union[str, Any] = """, """.join(UpperCamelCase__ )
raise ValueError(
"""The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside """
F"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.')
parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.')
parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.')
_snake_case = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 324
| 0
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__=False ):
'''simple docstring'''
_a : Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_a : Tuple = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_a : Tuple = """"""
else:
_a : int = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_a : int = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
_a : Dict = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_a : List[str] = in_proj_weight[
: config.hidden_size, :
]
_a : List[Any] = in_proj_bias[: config.hidden_size]
_a : str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_a : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_a : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
_a : List[str] = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : List[str] = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Tuple = dct.pop(UpperCamelCase__ )
_a : Union[str, Any] = val
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_a : List[Any] = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Union[str, Any] = ViTConfig()
_a : Any = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
_a : Tuple = True
_a : List[str] = int(vit_name[-1_2:-1_0] )
_a : Dict = int(vit_name[-9:-6] )
else:
_a : Optional[Any] = 1_0_0_0
_a : Any = """huggingface/label-files"""
_a : List[str] = """imagenet-1k-id2label.json"""
_a : Optional[Any] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
_a : Tuple = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
_a : List[Any] = idalabel
_a : Union[str, Any] = {v: k for k, v in idalabel.items()}
_a : int = int(vit_name[-6:-4] )
_a : Optional[Any] = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("""tiny""" ):
_a : Any = 1_9_2
_a : str = 7_6_8
_a : List[str] = 1_2
_a : Tuple = 3
elif vit_name[9:].startswith("""small""" ):
_a : int = 3_8_4
_a : Union[str, Any] = 1_5_3_6
_a : Optional[Any] = 1_2
_a : List[str] = 6
else:
pass
else:
if vit_name[4:].startswith("""small""" ):
_a : Tuple = 7_6_8
_a : str = 2_3_0_4
_a : Tuple = 8
_a : Optional[int] = 8
elif vit_name[4:].startswith("""base""" ):
pass
elif vit_name[4:].startswith("""large""" ):
_a : int = 1_0_2_4
_a : List[str] = 4_0_9_6
_a : int = 2_4
_a : Dict = 1_6
elif vit_name[4:].startswith("""huge""" ):
_a : int = 1_2_8_0
_a : Any = 5_1_2_0
_a : Any = 3_2
_a : Optional[int] = 1_6
# load original model from timm
_a : Tuple = timm.create_model(UpperCamelCase__ , pretrained=UpperCamelCase__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_a : Dict = timm_model.state_dict()
if base_model:
remove_classification_head_(UpperCamelCase__ )
_a : Optional[int] = create_rename_keys(UpperCamelCase__ , UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
read_in_q_k_v(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# load HuggingFace model
if vit_name[-5:] == "in21k":
_a : List[str] = ViTModel(UpperCamelCase__ ).eval()
else:
_a : Union[str, Any] = ViTForImageClassification(UpperCamelCase__ ).eval()
model.load_state_dict(UpperCamelCase__ )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
_a : List[str] = DeiTImageProcessor(size=config.image_size )
else:
_a : Optional[Any] = ViTImageProcessor(size=config.image_size )
_a : List[Any] = image_processor(images=prepare_img() , return_tensors="""pt""" )
_a : Any = encoding["""pixel_values"""]
_a : Tuple = model(UpperCamelCase__ )
if base_model:
_a : Optional[int] = timm_model.forward_features(UpperCamelCase__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(UpperCamelCase__ , outputs.pooler_output , atol=1e-3 )
else:
_a : int = timm_model(UpperCamelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCamelCase__ , outputs.logits , atol=1e-3 )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(F"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_patch16_224',
type=str,
help='Name of the ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_snake_case = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 359
|
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"""files""" , [
["""full:README.md""", """dataset_infos.json"""],
["""empty:README.md""", """dataset_infos.json"""],
["""dataset_infos.json"""],
["""full:README.md"""],
] , )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Dict = tmp_path_factory.mktemp("""dset_infos_dir""" )
if "full:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""---\ndataset_info:\n dataset_size: 42\n---""" )
if "empty:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""""" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / """dataset_infos.json""" , """w""" ) as f:
f.write("""{\"default\": {\"dataset_size\": 42}}""" )
_a : Dict = DatasetInfosDict.from_directory(UpperCamelCase__ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 4_2
@pytest.mark.parametrize(
"""dataset_info""" , [
DatasetInfo(),
DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=4_2 , ),
] , )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Optional[int] = str(UpperCamelCase__ )
dataset_info.write_to_directory(UpperCamelCase__ )
_a : Any = DatasetInfo.from_directory(UpperCamelCase__ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(UpperCamelCase__ , """dataset_info.json""" ) )
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : Dict = DatasetInfo(
description="""foo""" , citation="""bar""" , homepage="""https://foo.bar""" , license="""CC0""" , features=Features({"""a""": Value("""int32""" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train""", """num_examples""": 4_2}] , download_checksums={} , download_size=1_3_3_7 , post_processing_size=4_4_2 , dataset_size=1_2_3_4 , size_in_bytes=1_3_3_7 + 4_4_2 + 1_2_3_4 , )
_a : int = dataset_info._to_yaml_dict()
assert sorted(UpperCamelCase__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
_a : List[str] = yaml.safe_dump(UpperCamelCase__ )
_a : Optional[int] = yaml.safe_load(UpperCamelCase__ )
assert dataset_info_yaml_dict == reloaded
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : List[Any] = DatasetInfo()
_a : Any = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"""dataset_infos_dict""" , [
DatasetInfosDict(),
DatasetInfosDict({"""default""": DatasetInfo()} ),
DatasetInfosDict({"""my_config_name""": DatasetInfo()} ),
DatasetInfosDict(
{
"""default""": DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=4_2 , )
} ),
DatasetInfosDict(
{
"""v1""": DatasetInfo(dataset_size=4_2 ),
"""v2""": DatasetInfo(dataset_size=1_3_3_7 ),
} ),
] , )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : List[Any] = str(UpperCamelCase__ )
dataset_infos_dict.write_to_directory(UpperCamelCase__ )
_a : List[Any] = DatasetInfosDict.from_directory(UpperCamelCase__ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_a : str = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_a : Dict = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(UpperCamelCase__ , """README.md""" ) )
| 324
| 0
|
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"""files""" , [
["""full:README.md""", """dataset_infos.json"""],
["""empty:README.md""", """dataset_infos.json"""],
["""dataset_infos.json"""],
["""full:README.md"""],
] , )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Dict = tmp_path_factory.mktemp("""dset_infos_dir""" )
if "full:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""---\ndataset_info:\n dataset_size: 42\n---""" )
if "empty:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""""" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / """dataset_infos.json""" , """w""" ) as f:
f.write("""{\"default\": {\"dataset_size\": 42}}""" )
_a : Dict = DatasetInfosDict.from_directory(UpperCamelCase__ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 4_2
@pytest.mark.parametrize(
"""dataset_info""" , [
DatasetInfo(),
DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=4_2 , ),
] , )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Optional[int] = str(UpperCamelCase__ )
dataset_info.write_to_directory(UpperCamelCase__ )
_a : Any = DatasetInfo.from_directory(UpperCamelCase__ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(UpperCamelCase__ , """dataset_info.json""" ) )
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : Dict = DatasetInfo(
description="""foo""" , citation="""bar""" , homepage="""https://foo.bar""" , license="""CC0""" , features=Features({"""a""": Value("""int32""" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train""", """num_examples""": 4_2}] , download_checksums={} , download_size=1_3_3_7 , post_processing_size=4_4_2 , dataset_size=1_2_3_4 , size_in_bytes=1_3_3_7 + 4_4_2 + 1_2_3_4 , )
_a : int = dataset_info._to_yaml_dict()
assert sorted(UpperCamelCase__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
_a : List[str] = yaml.safe_dump(UpperCamelCase__ )
_a : Optional[int] = yaml.safe_load(UpperCamelCase__ )
assert dataset_info_yaml_dict == reloaded
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : List[Any] = DatasetInfo()
_a : Any = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"""dataset_infos_dict""" , [
DatasetInfosDict(),
DatasetInfosDict({"""default""": DatasetInfo()} ),
DatasetInfosDict({"""my_config_name""": DatasetInfo()} ),
DatasetInfosDict(
{
"""default""": DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=4_2 , )
} ),
DatasetInfosDict(
{
"""v1""": DatasetInfo(dataset_size=4_2 ),
"""v2""": DatasetInfo(dataset_size=1_3_3_7 ),
} ),
] , )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : List[Any] = str(UpperCamelCase__ )
dataset_infos_dict.write_to_directory(UpperCamelCase__ )
_a : List[Any] = DatasetInfosDict.from_directory(UpperCamelCase__ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_a : str = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_a : Dict = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(UpperCamelCase__ , """README.md""" ) )
| 360
|
"""simple docstring"""
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class UpperCamelCase ( unittest.TestCase , snake_case_ ):
def _lowercase ( self : int ) -> int:
_a : Optional[Any] = load_tool("""text-to-speech""" )
self.tool.setup()
def _lowercase ( self : List[str] ) -> Union[str, Any]:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
_a : str = self.tool("""hey""" )
_a : List[str] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
def _lowercase ( self : Optional[int] ) -> Optional[Any]:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
_a : int = self.tool("""hey""" )
_a : str = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
| 324
| 0
|
"""simple docstring"""
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
_snake_case = 637_8137.0
_snake_case = 635_6752.31_4245
_snake_case = 637_8137
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Any = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
_a : Optional[int] = atan((1 - flattening) * tan(radians(UpperCamelCase__ ) ) )
_a : Optional[int] = atan((1 - flattening) * tan(radians(UpperCamelCase__ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
_a : Tuple = haversine_distance(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
_a : Tuple = (b_lata + b_lata) / 2
_a : List[Any] = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
_a : Optional[int] = (sin(UpperCamelCase__ ) ** 2) * (cos(UpperCamelCase__ ) ** 2)
_a : Dict = cos(sigma / 2 ) ** 2
_a : List[Any] = (sigma - sin(UpperCamelCase__ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
_a : List[str] = (cos(UpperCamelCase__ ) ** 2) * (sin(UpperCamelCase__ ) ** 2)
_a : Optional[Any] = sin(sigma / 2 ) ** 2
_a : str = (sigma + sin(UpperCamelCase__ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361
|
"""simple docstring"""
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class UpperCamelCase ( snake_case_ ):
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[str]=None , **UpperCAmelCase__ : str ) -> int:
_a : str = parent
_a : Union[str, Any] = config_class
_a : List[Any] = has_text_modality
_a : List[Any] = kwargs
_a : List[Any] = common_properties
def _lowercase ( self : int ) -> Tuple:
_a : List[str] = self.config_class(**self.inputs_dict )
_a : Dict = (
["""hidden_size""", """num_attention_heads""", """num_hidden_layers"""]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["""vocab_size"""] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(UpperCAmelCase__ , UpperCAmelCase__ ) , msg=f"""`{prop}` does not exist""" )
# Test that config has the common properties as setter
for idx, name in enumerate(UpperCAmelCase__ ):
try:
setattr(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
self.parent.assertEqual(
getattr(UpperCAmelCase__ , UpperCAmelCase__ ) , UpperCAmelCase__ , msg=f"""`{name} value {idx} expected, but was {getattr(UpperCAmelCase__ , UpperCAmelCase__ )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(UpperCAmelCase__ ):
try:
_a : Optional[int] = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(UpperCAmelCase__ , UpperCAmelCase__ ) , UpperCAmelCase__ , msg=f"""`{name} value {idx} expected, but was {getattr(UpperCAmelCase__ , UpperCAmelCase__ )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def _lowercase ( self : Optional[int] ) -> Optional[Any]:
_a : Optional[Any] = self.config_class(**self.inputs_dict )
_a : List[str] = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , UpperCAmelCase__ )
def _lowercase ( self : int ) -> List[str]:
_a : Optional[Any] = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Tuple = os.path.join(UpperCAmelCase__ , """config.json""" )
config_first.to_json_file(UpperCAmelCase__ )
_a : List[str] = self.config_class.from_json_file(UpperCAmelCase__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _lowercase ( self : Union[str, Any] ) -> Dict:
_a : Dict = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(UpperCAmelCase__ )
_a : Dict = self.config_class.from_pretrained(UpperCAmelCase__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _lowercase ( self : Dict ) -> Tuple:
_a : List[Any] = self.config_class(**self.inputs_dict )
_a : Any = """test"""
with tempfile.TemporaryDirectory() as tmpdirname:
_a : List[Any] = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
config_first.save_pretrained(UpperCAmelCase__ )
_a : List[Any] = self.config_class.from_pretrained(UpperCAmelCase__ , subfolder=UpperCAmelCase__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _lowercase ( self : List[str] ) -> Union[str, Any]:
_a : Tuple = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
_a : Union[str, Any] = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def _lowercase ( self : Tuple ) -> List[str]:
if self.config_class.is_composition:
return
_a : str = self.config_class()
self.parent.assertIsNotNone(UpperCAmelCase__ )
def _lowercase ( self : List[Any] ) -> Optional[Any]:
_a : Dict = copy.deepcopy(UpperCAmelCase__ )
_a : Any = self.config_class(**UpperCAmelCase__ )
_a : str = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("""torch_dtype""", config.torch_dtype, torch.floataa) )
elif getattr(UpperCAmelCase__ , UpperCAmelCase__ ) != value:
wrong_values.append((key, getattr(UpperCAmelCase__ , UpperCAmelCase__ ), value) )
if len(UpperCAmelCase__ ) > 0:
_a : List[Any] = """\n""".join([f"""- {v[0]}: got {v[1]} instead of {v[2]}""" for v in wrong_values] )
raise ValueError(f"""The following keys were not properly set in the config:\n{errors}""" )
def _lowercase ( self : int ) -> Union[str, Any]:
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 324
| 0
|
"""simple docstring"""
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
_snake_case = logging.getLogger(__name__)
_snake_case = 'Hello world! cécé herlolip'
_snake_case = namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : int = BertAbsConfig(
temp_dir=""".""" , finetune_bert=UpperCamelCase__ , large=UpperCamelCase__ , share_emb=UpperCamelCase__ , use_bert_emb=UpperCamelCase__ , encoder="""bert""" , max_pos=5_1_2 , enc_layers=6 , enc_hidden_size=5_1_2 , enc_heads=8 , enc_ff_size=5_1_2 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_6_8 , dec_heads=8 , dec_ff_size=2_0_4_8 , dec_dropout=0.2 , )
_a : List[Any] = torch.load(UpperCamelCase__ , lambda UpperCamelCase__ , UpperCamelCase__ : storage )
_a : str = AbsSummarizer(UpperCamelCase__ , torch.device("""cpu""" ) , UpperCamelCase__ )
original.eval()
_a : Any = BertAbsSummarizer(UpperCamelCase__ , torch.device("""cpu""" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("""convert the model""" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("""Make sure that the models' outputs are identical""" )
_a : str = BertTokenizer.from_pretrained("""bert-base-uncased""" )
# prepare the model inputs
_a : Optional[Any] = tokenizer.encode("""This is sample éàalj'-.""" )
encoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(UpperCamelCase__ )) )
_a : str = torch.tensor(UpperCamelCase__ ).unsqueeze(0 )
_a : Optional[int] = tokenizer.encode("""This is sample 3 éàalj'-.""" )
decoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(UpperCamelCase__ )) )
_a : Optional[Any] = torch.tensor(UpperCamelCase__ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
_a : Any = encoder_input_ids
_a : Dict = decoder_input_ids
_a : Dict = None
_a : int = None
_a : List[Any] = None
_a : List[str] = None
_a : Optional[Any] = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
_a : List[str] = original(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )[0]
_a : Optional[int] = original.generator(UpperCamelCase__ )
_a : int = new_model(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )[0]
_a : List[str] = new_model.generator(UpperCamelCase__ )
_a : Dict = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(UpperCamelCase__ ) )
_a : List[str] = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(UpperCamelCase__ ) )
_a : Optional[Any] = torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 )
if are_identical:
logging.info("""all weights are equal up to 1e-3""" )
else:
raise ValueError("""the weights are different. The new model is likely different from the original one.""" )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("""saving the model's state dictionary""" )
torch.save(
new_model.state_dict() , """./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin""" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
_snake_case = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 362
|
"""simple docstring"""
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
_snake_case = HUGGINGFACE_HUB_CACHE
_snake_case = 'config.json'
_snake_case = 'diffusion_pytorch_model.bin'
_snake_case = 'diffusion_flax_model.msgpack'
_snake_case = 'model.onnx'
_snake_case = 'diffusion_pytorch_model.safetensors'
_snake_case = 'weights.pb'
_snake_case = 'https://huggingface.co'
_snake_case = default_cache_path
_snake_case = 'diffusers_modules'
_snake_case = os.getenv('HF_MODULES_CACHE', os.path.join(hf_cache_home, 'modules'))
_snake_case = ['fp16', 'non-ema']
_snake_case = '.self_attn'
| 324
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json',
'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json',
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json',
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json',
'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json',
'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json',
'cl-tohoku/bert-base-japanese-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'
),
'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : int = '''bert'''
def __init__( self : Dict , UpperCAmelCase__ : Optional[Any]=30522 , UpperCAmelCase__ : Optional[int]=768 , UpperCAmelCase__ : Optional[Any]=12 , UpperCAmelCase__ : int=12 , UpperCAmelCase__ : Dict=3072 , UpperCAmelCase__ : Dict="gelu" , UpperCAmelCase__ : str=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : List[str]=512 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : Optional[int]=0.0_2 , UpperCAmelCase__ : Dict=1E-12 , UpperCAmelCase__ : Dict=0 , UpperCAmelCase__ : Optional[int]="absolute" , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Optional[int]=None , **UpperCAmelCase__ : List[str] , ) -> int:
super().__init__(pad_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
_a : Optional[Any] = vocab_size
_a : str = hidden_size
_a : Optional[int] = num_hidden_layers
_a : str = num_attention_heads
_a : Optional[int] = hidden_act
_a : Union[str, Any] = intermediate_size
_a : List[Any] = hidden_dropout_prob
_a : int = attention_probs_dropout_prob
_a : Tuple = max_position_embeddings
_a : Dict = type_vocab_size
_a : Dict = initializer_range
_a : List[Any] = layer_norm_eps
_a : Optional[Any] = position_embedding_type
_a : List[str] = use_cache
_a : Optional[Any] = classifier_dropout
class UpperCamelCase ( snake_case_ ):
@property
def _lowercase ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_a : List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_a : Any = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 363
|
"""simple docstring"""
from math import factorial
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if successes > trials:
raise ValueError("""successes must be lower or equal to trials""" )
if trials < 0 or successes < 0:
raise ValueError("""the function is defined for non-negative integers""" )
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError("""the function is defined for non-negative integers""" )
if not 0 < prob < 1:
raise ValueError("""prob has to be in range of 1 - 0""" )
_a : Optional[int] = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
_a : Optional[int] = float(factorial(UpperCamelCase__ ) )
coefficient /= factorial(UpperCamelCase__ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('Probability of 2 successes out of 4 trails')
print('with probability of 0.75 is:', end=' ')
print(binomial_distribution(2, 4, 0.75))
| 324
| 0
|
"""simple docstring"""
from torch import nn
class UpperCamelCase ( nn.Module ):
def __init__( self : Optional[int] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict ) -> int:
super().__init__()
_a : List[str] = class_size
_a : Union[str, Any] = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
_a : Dict = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__ )
def _lowercase ( self : List[str] , UpperCAmelCase__ : str ) -> Optional[Any]:
# hidden_state = nn.functional.relu(self.mlp1(hidden_state))
# hidden_state = self.mlp2(hidden_state)
_a : Optional[int] = self.mlp(UpperCAmelCase__ )
return logits
| 364
|
"""simple docstring"""
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a , _a : Dict = len(UpperCamelCase__ ), len(grid[0] )
if (
min(UpperCamelCase__ , UpperCamelCase__ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
_a : Any = 0
count += depth_first_search(UpperCamelCase__ , row + 1 , UpperCamelCase__ , UpperCamelCase__ )
count += depth_first_search(UpperCamelCase__ , row - 1 , UpperCamelCase__ , UpperCamelCase__ )
count += depth_first_search(UpperCamelCase__ , UpperCamelCase__ , col + 1 , UpperCamelCase__ )
count += depth_first_search(UpperCamelCase__ , UpperCamelCase__ , col - 1 , UpperCamelCase__ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 324
| 0
|
"""simple docstring"""
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
_snake_case = 5_0000
_snake_case = 5000
_snake_case , _snake_case = os.path.split(__file__)
_snake_case = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
for i in range(UpperCamelCase__ ):
_a : Any = dataset[i]
@get_duration
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
for i in range(0 , len(UpperCamelCase__ ) , UpperCamelCase__ ):
_a : Any = dataset[i : i + batch_size]
@get_duration
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
with dataset.formatted_as(type=UpperCamelCase__ ):
for i in range(UpperCamelCase__ ):
_a : List[Any] = dataset[i]
@get_duration
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
with dataset.formatted_as(type=UpperCamelCase__ ):
for i in range(0 , UpperCamelCase__ , UpperCamelCase__ ):
_a : List[str] = dataset[i : i + batch_size]
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : Optional[Any] = {"""num examples""": SPEED_TEST_N_EXAMPLES}
_a : List[str] = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_0}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_0_0}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_0_0_0}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """pandas""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """torch""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """tensorflow""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1_0}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1_0_0_0}),
]
_a : Optional[Any] = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_0}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_0_0}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_0_0_0}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1_0}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1_0_0_0}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("""generating dataset""" )
_a : str = datasets.Features(
{"""list""": datasets.Sequence(datasets.Value("""float32""" ) ), """numbers""": datasets.Value("""float32""" )} )
_a : int = generate_example_dataset(
os.path.join(UpperCamelCase__ , """dataset.arrow""" ) , UpperCamelCase__ , num_examples=UpperCamelCase__ , seq_shapes={"""list""": (1_0_0,)} , )
print("""first set of iterations""" )
for func, kwargs in functions:
print(func.__name__ , str(UpperCamelCase__ ) )
_a : Tuple = func(UpperCamelCase__ , **UpperCamelCase__ )
print("""shuffling dataset""" )
_a : str = dataset.shuffle()
print("""Second set of iterations (after shuffling""" )
for func, kwargs in functions_shuffled:
print("""shuffled """ , func.__name__ , str(UpperCamelCase__ ) )
_a : str = func(
UpperCamelCase__ , **UpperCamelCase__ )
with open(UpperCamelCase__ , """wb""" ) as f:
f.write(json.dumps(UpperCamelCase__ ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 365
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_snake_case = {
'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['VisionEncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['TFVisionEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['FlaxVisionEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 324
| 0
|
import os
import sys
_snake_case = os.path.join(os.path.dirname(__file__), 'src')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
_snake_case = [
'torch',
'numpy',
'tokenizers',
'filelock',
'requests',
'tqdm',
'regex',
'sentencepiece',
'sacremoses',
'importlib_metadata',
'huggingface_hub',
]
@add_start_docstrings(AutoConfig.__doc__ )
def lowerCAmelCase__ ( *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
return AutoConfig.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def lowerCAmelCase__ ( *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModel.__doc__ )
def lowerCAmelCase__ ( *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
return AutoModel.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def lowerCAmelCase__ ( *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def lowerCAmelCase__ ( *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def lowerCAmelCase__ ( *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def lowerCAmelCase__ ( *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
| 366
|
"""simple docstring"""
from __future__ import annotations
import time
_snake_case = list[tuple[int, int]]
_snake_case = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_snake_case = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class UpperCamelCase :
def __init__( self : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Node | None ) -> List[str]:
_a : int = pos_x
_a : Union[str, Any] = pos_y
_a : Tuple = (pos_y, pos_x)
_a : Tuple = goal_x
_a : int = goal_y
_a : str = parent
class UpperCamelCase :
def __init__( self : List[Any] , UpperCAmelCase__ : tuple[int, int] , UpperCAmelCase__ : tuple[int, int] ) -> List[str]:
_a : List[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , UpperCAmelCase__ )
_a : List[str] = Node(goal[1] , goal[0] , goal[1] , goal[0] , UpperCAmelCase__ )
_a : Optional[int] = [self.start]
_a : Tuple = False
def _lowercase ( self : str ) -> Path | None:
while self.node_queue:
_a : Tuple = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
_a : Dict = True
return self.retrace_path(UpperCAmelCase__ )
_a : Tuple = self.get_successors(UpperCAmelCase__ )
for node in successors:
self.node_queue.append(UpperCAmelCase__ )
if not self.reached:
return [self.start.pos]
return None
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : Node ) -> list[Node]:
_a : Optional[Any] = []
for action in delta:
_a : str = parent.pos_x + action[1]
_a : List[Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(UpperCAmelCase__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(UpperCAmelCase__ , UpperCAmelCase__ , self.target.pos_y , self.target.pos_x , UpperCAmelCase__ ) )
return successors
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Node | None ) -> Path:
_a : Dict = node
_a : List[str] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_a : Any = current_node.parent
path.reverse()
return path
class UpperCamelCase :
def __init__( self : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] ) -> Any:
_a : Dict = BreadthFirstSearch(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Optional[int] = BreadthFirstSearch(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Dict = False
def _lowercase ( self : Any ) -> Path | None:
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
_a : List[Any] = self.fwd_bfs.node_queue.pop(0 )
_a : Union[str, Any] = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
_a : Optional[int] = True
return self.retrace_bidirectional_path(
UpperCAmelCase__ , UpperCAmelCase__ )
_a : List[str] = current_bwd_node
_a : int = current_fwd_node
_a : Optional[Any] = {
self.fwd_bfs: self.fwd_bfs.get_successors(UpperCAmelCase__ ),
self.bwd_bfs: self.bwd_bfs.get_successors(UpperCAmelCase__ ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(UpperCAmelCase__ )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : Node , UpperCAmelCase__ : Node ) -> Path:
_a : str = self.fwd_bfs.retrace_path(UpperCAmelCase__ )
_a : List[Any] = self.bwd_bfs.retrace_path(UpperCAmelCase__ )
bwd_path.pop()
bwd_path.reverse()
_a : Tuple = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
_snake_case = (0, 0)
_snake_case = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_snake_case = time.time()
_snake_case = BreadthFirstSearch(init, goal)
_snake_case = bfs.search()
_snake_case = time.time() - start_bfs_time
print('Unidirectional BFS computation time : ', bfs_time)
_snake_case = time.time()
_snake_case = BidirectionalBreadthFirstSearch(init, goal)
_snake_case = bd_bfs.search()
_snake_case = time.time() - start_bd_bfs_time
print('Bidirectional BFS computation time : ', bd_bfs_time)
| 324
| 0
|
"""simple docstring"""
import math
class UpperCamelCase :
def _lowercase ( self : Tuple , UpperCAmelCase__ : list[list[float]] , UpperCAmelCase__ : list[int] ) -> int:
_a : str = 0.0
_a : Optional[int] = 0.0
for i in range(len(UpperCAmelCase__ ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : list[list[int | float]] , UpperCAmelCase__ : list[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : float ) -> list[list[int | float]]:
for i in range(len(UpperCAmelCase__ ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : Tuple = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_a : Tuple = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_a : Union[str, Any] = SelfOrganizingMap()
_a : Optional[Any] = 3
_a : List[Any] = 0.5
for _ in range(UpperCamelCase__ ):
for j in range(len(UpperCamelCase__ ) ):
# training sample
_a : List[str] = training_samples[j]
# Compute the winning vector
_a : Tuple = self_organizing_map.get_winner(UpperCamelCase__ , UpperCamelCase__ )
# Update the winning vector
_a : Any = self_organizing_map.update(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# classify test sample
_a : str = [0, 0, 0, 1]
_a : str = self_organizing_map.get_winner(UpperCamelCase__ , UpperCamelCase__ )
# results
print(F"""Clusters that the test sample belongs to : {winner}""" )
print(F"""Weights that have been trained : {weights}""" )
# running the main() function
if __name__ == "__main__":
main()
| 367
|
"""simple docstring"""
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_snake_case = logging.getLogger(__name__)
_snake_case = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
_snake_case = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCamelCase :
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Leave None if you want to train a model from'''
''' scratch.'''
)
} , )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(snake_case_ )} , )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class UpperCamelCase :
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''The input training data file (a text file).'''} )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={
'''help''': (
'''The input training data files (multiple files in glob format). '''
'''Very often splitting large files to smaller files can prevent tokenizer going out of memory'''
)
} , )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''An optional input train ref data file for whole word mask in Chinese.'''} , )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''An optional input eval ref data file for whole word mask in Chinese.'''} , )
UpperCamelCase : bool = field(
default=snake_case_ , metadata={'''help''': '''Whether distinct lines of text in the dataset are to be handled as distinct sequences.'''} , )
UpperCamelCase : bool = field(
default=snake_case_ , metadata={'''help''': '''Train with masked-language modeling loss instead of language modeling.'''} )
UpperCamelCase : bool = field(default=snake_case_ , metadata={'''help''': '''Whether ot not to use whole word mask.'''} )
UpperCamelCase : float = field(
default=0.1_5 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
UpperCamelCase : float = field(
default=1 / 6 , metadata={
'''help''': (
'''Ratio of length of a span of masked tokens to surrounding context length for permutation language'''
''' modeling.'''
)
} , )
UpperCamelCase : int = field(
default=5 , metadata={'''help''': '''Maximum length of a span of masked tokens for permutation language modeling.'''} )
UpperCamelCase : int = field(
default=-1 , metadata={
'''help''': (
'''Optional input sequence length after tokenization.'''
'''The training dataset will be truncated in block of this size for training.'''
'''Default to the model max input length for single sentence inputs (take into account special tokens).'''
)
} , )
UpperCamelCase : bool = field(
default=snake_case_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False , UpperCamelCase__ = None , ):
'''simple docstring'''
def _dataset(UpperCamelCase__ , UpperCamelCase__=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("""You need to set world whole masking and mlm to True for Chinese Whole Word Mask""" )
return LineByLineWithRefDataset(
tokenizer=UpperCamelCase__ , file_path=UpperCamelCase__ , block_size=args.block_size , ref_path=UpperCamelCase__ , )
return LineByLineTextDataset(tokenizer=UpperCamelCase__ , file_path=UpperCamelCase__ , block_size=args.block_size )
else:
return TextDataset(
tokenizer=UpperCamelCase__ , file_path=UpperCamelCase__ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=UpperCamelCase__ , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(UpperCamelCase__ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def lowerCAmelCase__ ( ):
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_a : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_a , _a , _a : List[str] = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"""Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file """
"""or remove the --do_eval argument.""" )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , UpperCamelCase__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
_a : str = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
_a : Union[str, Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
_a : str = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.tokenizer_name:
_a : List[str] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
_a : Union[str, Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"""
""" script, save it,and load it from here, using --tokenizer_name""" )
if model_args.model_name_or_path:
_a : Optional[Any] = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , )
else:
logger.info("""Training new model from scratch""" )
_a : List[Any] = AutoModelWithLMHead.from_config(UpperCamelCase__ )
model.resize_token_embeddings(len(UpperCamelCase__ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"""BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"""
"""--mlm flag (masked language modeling).""" )
if data_args.block_size <= 0:
_a : int = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
_a : Optional[Any] = min(data_args.block_size , tokenizer.max_len )
# Get datasets
_a : Optional[Any] = (
get_dataset(UpperCamelCase__ , tokenizer=UpperCamelCase__ , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
_a : Optional[int] = (
get_dataset(UpperCamelCase__ , tokenizer=UpperCamelCase__ , evaluate=UpperCamelCase__ , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
_a : Any = DataCollatorForPermutationLanguageModeling(
tokenizer=UpperCamelCase__ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
_a : Union[str, Any] = DataCollatorForWholeWordMask(
tokenizer=UpperCamelCase__ , mlm_probability=data_args.mlm_probability )
else:
_a : str = DataCollatorForLanguageModeling(
tokenizer=UpperCamelCase__ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
_a : Union[str, Any] = Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , data_collator=UpperCamelCase__ , train_dataset=UpperCamelCase__ , eval_dataset=UpperCamelCase__ , prediction_loss_only=UpperCamelCase__ , )
# Training
if training_args.do_train:
_a : Optional[Any] = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=UpperCamelCase__ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_a : Union[str, Any] = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_a : int = trainer.evaluate()
_a : Dict = math.exp(eval_output["""eval_loss"""] )
_a : Union[str, Any] = {"""perplexity""": perplexity}
_a : Optional[Any] = os.path.join(training_args.output_dir , """eval_results_lm.txt""" )
if trainer.is_world_master():
with open(UpperCamelCase__ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""" , UpperCamelCase__ , str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
results.update(UpperCamelCase__ )
return results
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 324
| 0
|
"""simple docstring"""
from argparse import ArgumentParser
from .env import EnvironmentCommand
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : Tuple = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
_a : Optional[Any] = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(UpperCamelCase__ )
# Let's go
_a : int = parser.parse_args()
if not hasattr(UpperCamelCase__ , """func""" ):
parser.print_help()
exit(1 )
# Run
_a : Dict = args.func(UpperCamelCase__ )
service.run()
if __name__ == "__main__":
main()
| 368
|
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
_snake_case = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['memory_attention', 'encoder_attn'],
['attention', 'attn'],
['/', '.'],
['.LayerNorm.gamma', '_layer_norm.weight'],
['.LayerNorm.beta', '_layer_norm.bias'],
['r.layer_', 'r.layers.'],
['output_proj', 'out_proj'],
['ffn.dense_1.', 'fc2.'],
['ffn.dense.', 'fc1.'],
['ffn_layer_norm', 'final_layer_norm'],
['kernel', 'weight'],
['encoder_layer_norm.', 'encoder.layer_norm.'],
['decoder_layer_norm.', 'decoder.layer_norm.'],
['embeddings.weights', 'shared.weight'],
]
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
_a : Optional[Any] = k.replace(UpperCamelCase__ , UpperCamelCase__ )
return k
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Union[str, Any] = DEFAULTS.copy()
cfg_kwargs.update(UpperCamelCase__ )
_a : Optional[Any] = PegasusConfig(**UpperCamelCase__ )
_a : Tuple = PegasusForConditionalGeneration(UpperCamelCase__ )
_a : str = torch_model.model.state_dict()
_a : Union[str, Any] = {}
for k, v in tf_weights.items():
_a : Any = rename_state_dict_key(UpperCamelCase__ )
if new_k not in sd:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
_a : str = v.T
_a : int = torch.tensor(UpperCamelCase__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
_a : Union[str, Any] = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
_a : str = mapping["""shared.weight"""]
_a : Union[str, Any] = mapping["""shared.weight"""]
_a : Optional[Any] = {k: torch.zeros_like(UpperCamelCase__ ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**UpperCamelCase__ )
_a , _a : int = torch_model.model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
_a : Optional[Any] = [
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def lowerCAmelCase__ ( UpperCamelCase__="./ckpt/aeslc/model.ckpt-32000" ):
'''simple docstring'''
_a : List[Any] = tf.train.list_variables(UpperCamelCase__ )
_a : Optional[int] = {}
_a : Dict = ["""Adafactor""", """global_step"""]
for name, shape in tqdm(UpperCamelCase__ , desc="""converting tf checkpoint to dict""" ):
_a : Optional[Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
_a : str = tf.train.load_variable(UpperCamelCase__ , UpperCamelCase__ )
_a : int = array
return tf_weights
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
# save tokenizer first
_a : Dict = Path(UpperCamelCase__ ).parent.name
_a : Optional[Any] = task_specific_params[F"""summarization_{dataset}"""]["""max_position_embeddings"""]
_a : Tuple = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=UpperCamelCase__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(UpperCamelCase__ )
# convert model
_a : List[Any] = get_tf_weights_as_numpy(UpperCamelCase__ )
_a : Dict = task_specific_params[F"""summarization_{dataset}"""]
if dataset == "large":
_a : Tuple = task_specific_params
_a : Optional[int] = convert_pegasus(UpperCamelCase__ , UpperCamelCase__ )
torch_model.save_pretrained(UpperCamelCase__ )
_a : Dict = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(UpperCamelCase__ , Path(UpperCamelCase__ ) / """pytorch_model.bin""" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument('tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('save_dir', default=None, type=str, help='Path to the output PyTorch model.')
_snake_case = parser.parse_args()
if args.save_dir is None:
_snake_case = Path(args.tf_ckpt_path).parent.name
_snake_case = os.path.join('pegasus', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 324
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.